UIDBurnManageController.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. # -*- encoding: utf-8 -*-
  2. """
  3. @File : UIDBurnManageController.py
  4. @Time : 2025/7/30 08:57
  5. @Author : stephen
  6. @Email : zhangdongming@asj6.wecom.work
  7. @Software: PyCharm
  8. """
  9. import json
  10. import os
  11. import random
  12. import string
  13. import threading
  14. import time
  15. from datetime import datetime
  16. from typing import Dict, Any
  17. from uuid import uuid4
  18. from django.core import serializers
  19. from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
  20. from django.db import transaction
  21. from django.db.models import Q
  22. from django.http import QueryDict
  23. from django.views import View
  24. from openpyxl import load_workbook
  25. from AgentModel.models import BurnRecord, BurnEncryptedICUID
  26. from Ansjer.config import LOGGER
  27. from Object.RedisObject import RedisObject
  28. from Object.ResponseObject import ResponseObject
  29. from Object.TokenObject import TokenObject
  30. class UIDBurnManageView(View):
  31. def get(self, request, *args, **kwargs):
  32. request.encoding = 'utf-8'
  33. operation = kwargs.get('operation')
  34. return self.validation(request.GET, request, operation)
  35. def post(self, request, *args, **kwargs):
  36. request.encoding = 'utf-8'
  37. operation = kwargs.get('operation')
  38. return self.validation(request.POST, request, operation)
  39. def delete(self, request, *args, **kwargs):
  40. request.encoding = 'utf-8'
  41. operation = kwargs.get('operation')
  42. delete = QueryDict(request.body)
  43. if not delete:
  44. delete = request.GET
  45. return self.validation(delete, request, operation)
  46. def put(self, request, *args, **kwargs):
  47. request.encoding = 'utf-8'
  48. operation = kwargs.get('operation')
  49. put = QueryDict(request.body)
  50. return self.validation(put, request, operation)
  51. def validation(self, request_dict, request, operation):
  52. """请求验证路由"""
  53. # 初始化响应对象
  54. language = request_dict.get('language', 'cn')
  55. response = ResponseObject(language, 'pc')
  56. # Token验证
  57. try:
  58. tko = TokenObject(
  59. request.META.get('HTTP_AUTHORIZATION'),
  60. returntpye='pc')
  61. if tko.code != 0:
  62. return response.json(tko.code)
  63. response.lang = tko.lang
  64. user_id = tko.userID
  65. except Exception as e:
  66. LOGGER.error(f"Token验证失败: {str(e)}")
  67. return response.json(444)
  68. if operation == 'getBurnRecordsPage':
  69. return self.get_burn_records_page(request_dict, response)
  70. elif operation == 'importBatchUids':
  71. return self.import_batch_uids(request, response)
  72. elif operation == 'addBurnRecord':
  73. return self.add_burn_record(request, response)
  74. elif operation == 'getBurnUidsPage':
  75. return self.get_burn_uids_page(request_dict, response)
  76. elif operation == 'getImportProgress':
  77. return self.get_import_progress(request_dict, response)
  78. elif operation == 'getImportTaskList':
  79. return self.get_import_task_list(request_dict, response)
  80. else:
  81. return response.json(414)
  82. @classmethod
  83. def get_burn_records_page(cls, request_dict: Dict[str, Any], response) -> Any:
  84. """
  85. 分页查询烧录记录
  86. :param cls:
  87. :param request_dict: 请求参数字典(包含分页参数和查询条件)
  88. :param response: 响应对象(用于返回JSON)
  89. :return: 分页查询结果的JSON响应
  90. """
  91. # 1. 分页参数处理与验证(严格类型转换+边界控制)
  92. try:
  93. page = int(request_dict.get('page', 1))
  94. page_size = int(request_dict.get('pageSize', 10))
  95. # 限制分页范围(避免页码为0或负数,页大小控制在1-100)
  96. page = max(page, 1)
  97. page_size = max(1, min(page_size, 100))
  98. except (ValueError, TypeError):
  99. return response.json(444, "分页参数错误(必须为整数)")
  100. # 2. 构建查询条件(解决原代码中query可能未定义的问题)
  101. query = Q() # 初始化空条件(无条件查询)
  102. order_number = request_dict.get('orderNumber', '').strip() # 去除首尾空格,避免空字符串查询
  103. if order_number:
  104. query &= Q(order_number__icontains=order_number)
  105. # 3. 获取查询集(延迟执行,不立即查询数据库)
  106. burn_qs = BurnRecord.objects.filter(query).order_by('-created_time')
  107. # 4. 分页处理(完善异常捕获)
  108. paginator = Paginator(burn_qs, page_size)
  109. try:
  110. page_obj = paginator.page(page)
  111. except PageNotAnInteger:
  112. # 若页码不是整数,返回第一页
  113. page_obj = paginator.page(1)
  114. except EmptyPage:
  115. # 若页码超出范围,返回最后一页(或空列表,根据业务需求调整)
  116. page_obj = paginator.page(paginator.num_pages)
  117. burn_list = serializers.serialize(
  118. 'python', # 输出Python字典格式
  119. page_obj,
  120. fields=['id', 'order_number', 'burn_count', 'purpose', 'created_time'] # 指定需要的字段
  121. )
  122. return response.json(
  123. 0,
  124. {
  125. 'list': burn_list,
  126. 'total': paginator.count, # 总记录数
  127. 'currentPage': page_obj.number, # 当前页码
  128. 'totalPages': paginator.num_pages, # 总页数
  129. 'pageSize': page_size # 返回实际使用的页大小(便于前端同步)
  130. }
  131. )
  132. @classmethod
  133. def import_batch_uids(cls, request, response) -> Any:
  134. """
  135. 导入批次UID - 异步优化版
  136. :param request: HttpRequest对象(包含上传文件)
  137. :param response: 响应对象
  138. :return: JSON响应
  139. """
  140. # 1. 验证文件上传
  141. if 'file' not in request.FILES:
  142. return response.json(444, "请上传Excel文件")
  143. excel_file = request.FILES['file']
  144. if not excel_file.name.endswith(('.xlsx', '.xls')):
  145. return response.json(444, "只支持Excel文件(.xlsx/.xls)")
  146. try:
  147. # 2. 生成任务ID和批次号
  148. task_id = str(uuid4())
  149. # 生成带时间戳和随机字符的批次号
  150. timestamp = datetime.now().strftime('%Y%m%d%H%M%S') # 精确到秒
  151. random_chars = ''.join(random.choices(string.ascii_uppercase + string.digits, k=3)) # 3个随机字符
  152. batch_number = f"ENG{timestamp}{random_chars}" # 格式: ENG+时间戳+随机字符
  153. # 3. 初始化Redis状态
  154. redis_key = f"import_task:{task_id}"
  155. redis_obj = RedisObject()
  156. # 保存任务基本信息到Redis (过期时间2小时)
  157. task_data = {
  158. 'status': 'pending',
  159. 'batch_number': batch_number,
  160. 'progress': 0,
  161. 'processed': 0,
  162. 'total': 0,
  163. 'start_time': int(time.time()),
  164. 'success_count': 0
  165. }
  166. redis_obj.set_data(redis_key, json.dumps(task_data), 7200)
  167. # 4. 保存文件到项目static/uploadedfiles目录
  168. base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  169. upload_dir = os.path.join(base_dir, 'static', 'uploaded_files')
  170. os.makedirs(upload_dir, exist_ok=True)
  171. file_path = os.path.join(upload_dir, f"{task_id}.xlsx")
  172. with open(file_path, 'wb+') as destination:
  173. for chunk in excel_file.chunks():
  174. destination.write(chunk)
  175. # 5. 启动后台线程处理
  176. thread = threading.Thread(
  177. target=cls._process_import_batch_async,
  178. args=(task_id, file_path, redis_key, batch_number),
  179. daemon=True
  180. )
  181. thread.start()
  182. return response.json(0, {
  183. "task_id": task_id,
  184. "batch_number": batch_number,
  185. "message": "导入任务已提交,正在后台处理",
  186. "redis_key": redis_key
  187. })
  188. except Exception as e:
  189. LOGGER.error(f"创建导入任务失败: {str(e)}")
  190. return response.json(500, "创建导入任务失败")
  191. @classmethod
  192. def _process_import_batch_async(cls, task_id, file_path, redis_key, batch_number):
  193. """后台线程处理批量导入任务"""
  194. redis_obj = RedisObject()
  195. try:
  196. # 获取并更新任务状态为处理中
  197. task_data = json.loads(redis_obj.get_data(redis_key))
  198. task_data['status'] = 'processing'
  199. redis_obj.set_data(redis_key, json.dumps(task_data))
  200. # 1. 读取Excel文件获取总行数
  201. wb = load_workbook(file_path)
  202. ws = wb.active
  203. total_rows = ws.max_row
  204. # 更新总行数和开始时间
  205. task_data['total'] = total_rows
  206. task_data['start_time'] = int(time.time())
  207. redis_obj.set_data(redis_key, json.dumps(task_data))
  208. # 2. 分批处理UID数据(每批500条)
  209. batch_size = 500
  210. current_time = int(time.time())
  211. processed = 0
  212. success_count = 0
  213. uids_batch = []
  214. for row in ws.iter_rows(min_row=1, values_only=True):
  215. if row[0]:
  216. uid = str(row[0]).strip()
  217. if uid:
  218. uids_batch.append(uid)
  219. processed += 1
  220. # 每处理1000条更新一次进度
  221. if processed % 1000 == 0:
  222. progress = min(99, int((processed / total_rows) * 100))
  223. task_data['progress'] = progress
  224. task_data['processed'] = processed
  225. task_data['last_update'] = int(time.time())
  226. redis_obj.set_data(redis_key, json.dumps(task_data))
  227. # 处理批次
  228. if len(uids_batch) >= batch_size:
  229. success = cls._import_uids_batch(
  230. uids_batch,
  231. batch_number,
  232. current_time,
  233. redis_key
  234. )
  235. success_count += success
  236. uids_batch = []
  237. # 处理最后一批
  238. if uids_batch:
  239. success = cls._import_uids_batch(
  240. uids_batch,
  241. batch_number,
  242. current_time,
  243. redis_key
  244. )
  245. success_count += success
  246. # 更新最终状态
  247. task_data['status'] = 'completed'
  248. task_data['progress'] = 100
  249. task_data['processed'] = processed
  250. task_data['success_count'] = success_count
  251. task_data['end_time'] = int(time.time())
  252. redis_obj.set_data(redis_key, json.dumps(task_data))
  253. LOGGER.info(f"处理批量导入完成,任务ID: {task_id}, 处理UID数量: {processed}, 成功导入: {success_count}")
  254. # 清理临时文件
  255. try:
  256. os.remove(file_path)
  257. except Exception as e:
  258. LOGGER.warning(f"删除临时文件失败: {str(e)}")
  259. except Exception as e:
  260. LOGGER.error(f"处理批量导入失败: {str(e)}")
  261. task_data = {
  262. 'status': 'failed',
  263. 'error': str(e),
  264. 'end_time': int(time.time())
  265. }
  266. redis_obj.set_data(redis_key, json.dumps(task_data))
  267. @classmethod
  268. def _import_uids_batch(cls, uids_batch, batch_number, current_time, redis_key):
  269. """批量导入UID记录"""
  270. redis_obj = RedisObject()
  271. try:
  272. with transaction.atomic():
  273. # 去重处理
  274. unique_uids = list(set(uids_batch))
  275. # 创建记录
  276. records = [
  277. BurnEncryptedICUID(
  278. batch_number=batch_number,
  279. uid=uid,
  280. purpose='批次导入',
  281. created_time=current_time,
  282. updated_time=current_time,
  283. status=0 # 未烧录状态
  284. )
  285. for uid in unique_uids
  286. ]
  287. BurnEncryptedICUID.objects.bulk_create(records)
  288. # 更新已处理数量和成功数量到Redis
  289. task_data = json.loads(redis_obj.get_data(redis_key))
  290. task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
  291. task_data['success_count'] = task_data.get('success_count', 0) + len(records)
  292. redis_obj.set_data(redis_key, json.dumps(task_data))
  293. return len(records)
  294. except Exception as e:
  295. LOGGER.error(f"批量导入UID失败: {str(e)}")
  296. # 更新失败状态但继续处理
  297. task_data = json.loads(redis_obj.get_data(redis_key))
  298. task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
  299. redis_obj.set_data(redis_key, json.dumps(task_data))
  300. return 0
  301. @classmethod
  302. def add_burn_record(cls, request, response) -> Any:
  303. """
  304. 新增烧录记录(带UID文件) - Redis字符串优化版
  305. :param request: HttpRequest对象(包含上传文件和表单数据)
  306. :param response: 响应对象
  307. :return: JSON响应
  308. """
  309. # 1. 参数验证
  310. request_dict = request.POST
  311. required_fields = ['order_number', 'burn_count', 'purpose']
  312. for field in required_fields:
  313. if not request_dict.get(field):
  314. return response.json(444, f"缺少必填参数: {field}")
  315. # 2. 验证文件上传
  316. if 'uid_file' not in request.FILES:
  317. return response.json(444, "请上传包含已烧录UID的Excel文件")
  318. excel_file = request.FILES['uid_file']
  319. if not excel_file.name.endswith(('.xlsx', '.xls')):
  320. return response.json(444, "只支持Excel文件(.xlsx/.xls)")
  321. try:
  322. burn_count = int(request_dict['burn_count'])
  323. if burn_count <= 0:
  324. return response.json(444, "烧录数量必须大于0")
  325. # 3. 创建任务ID并初始化Redis状态
  326. task_id = str(uuid4())
  327. redis_key = f"burn_task:{task_id}"
  328. redis_obj = RedisObject()
  329. # 保存任务基本信息到Redis (过期时间2小时)
  330. task_data = {
  331. 'status': 'pending',
  332. 'order_number': request_dict['order_number'].strip(),
  333. 'burn_count': burn_count,
  334. 'purpose': request_dict['purpose'].strip(),
  335. 'progress': 0,
  336. 'processed': 0,
  337. 'total': 0,
  338. 'start_time': int(time.time())
  339. }
  340. redis_obj.set_data(redis_key, json.dumps(task_data), 7200)
  341. # 4. 保存文件到项目static/uploadedfiles目录
  342. base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  343. upload_dir = os.path.join(base_dir, 'static', 'uploaded_files')
  344. os.makedirs(upload_dir, exist_ok=True)
  345. file_path = os.path.join(upload_dir, f"{task_id}.xlsx")
  346. with open(file_path, 'wb+') as destination:
  347. for chunk in excel_file.chunks():
  348. destination.write(chunk)
  349. # 5. 启动后台线程处理
  350. thread = threading.Thread(
  351. target=cls._process_burn_record_async,
  352. args=(task_id, file_path, redis_key),
  353. daemon=True
  354. )
  355. thread.start()
  356. return response.json(0, {
  357. "task_id": task_id,
  358. "message": "任务已提交,正在后台处理",
  359. "redis_key": redis_key
  360. })
  361. except ValueError:
  362. return response.json(444, "烧录数量必须是整数")
  363. except Exception as e:
  364. LOGGER.error(f"创建烧录任务失败: {str(e)}")
  365. return response.json(500, "创建烧录任务失败")
  366. @classmethod
  367. def _process_burn_record_async(cls, task_id, file_path, redis_key):
  368. """后台线程处理烧录记录任务"""
  369. redis_obj = RedisObject()
  370. try:
  371. # 获取并更新任务状态为处理中
  372. task_data = json.loads(redis_obj.get_data(redis_key))
  373. task_data['status'] = 'processing'
  374. redis_obj.set_data(redis_key, json.dumps(task_data))
  375. # 1. 读取Excel文件获取总行数
  376. wb = load_workbook(file_path)
  377. ws = wb.active
  378. total_rows = ws.max_row
  379. # 更新总行数和开始时间
  380. task_data['total'] = total_rows
  381. task_data['start_time'] = int(time.time())
  382. redis_obj.set_data(redis_key, json.dumps(task_data))
  383. # 2. 创建烧录记录
  384. with transaction.atomic():
  385. burn_record = BurnRecord(
  386. order_number=task_data['order_number'],
  387. burn_count=task_data['burn_count'],
  388. purpose=task_data['purpose'],
  389. updated_time=int(time.time()),
  390. created_time=int(time.time())
  391. )
  392. burn_record.save()
  393. task_data['burn_record_id'] = burn_record.id
  394. redis_obj.set_data(redis_key, json.dumps(task_data))
  395. # 3. 分批处理UID文件(每批300条)
  396. batch_size = 300
  397. current_time = int(time.time())
  398. processed = 0
  399. uids_batch = []
  400. for row in ws.iter_rows(min_row=1, values_only=True):
  401. if row[0]:
  402. uid = str(row[0]).strip()
  403. if uid:
  404. uids_batch.append(uid)
  405. processed += 1
  406. # 每处理100条更新一次进度
  407. if processed % 100 == 0:
  408. progress = min(99, int((processed / total_rows) * 100))
  409. task_data['progress'] = progress
  410. task_data['processed'] = processed
  411. task_data['last_update'] = int(time.time())
  412. redis_obj.set_data(redis_key, json.dumps(task_data))
  413. # 处理批次
  414. if len(uids_batch) >= batch_size:
  415. cls._update_uids_batch(
  416. uids_batch,
  417. burn_record.id,
  418. current_time,
  419. redis_key
  420. )
  421. uids_batch = []
  422. # 处理最后一批
  423. if uids_batch:
  424. cls._update_uids_batch(
  425. uids_batch,
  426. burn_record.id,
  427. current_time,
  428. redis_key
  429. )
  430. # 更新最终状态
  431. task_data['status'] = 'completed'
  432. task_data['progress'] = 100
  433. task_data['processed'] = processed
  434. task_data['end_time'] = int(time.time())
  435. redis_obj.set_data(redis_key, json.dumps(task_data))
  436. LOGGER.info(f"处理烧录记录完成,任务ID: {task_id}, 处理UID数量: {processed}")
  437. # 清理临时文件
  438. try:
  439. os.remove(file_path)
  440. except Exception as e:
  441. LOGGER.warning(f"删除临时文件失败: {str(e)}")
  442. except Exception as e:
  443. LOGGER.error(f"处理烧录记录失败: {str(e)}")
  444. task_data = {
  445. 'status': 'failed',
  446. 'error': str(e),
  447. 'end_time': int(time.time())
  448. }
  449. redis_obj.set_data(redis_key, json.dumps(task_data))
  450. @classmethod
  451. def _update_uids_batch(cls, uids_batch, burn_id, current_time, redis_key):
  452. """批量更新UID记录"""
  453. redis_obj = RedisObject()
  454. try:
  455. with transaction.atomic():
  456. updated = BurnEncryptedICUID.objects.filter(
  457. uid__in=uids_batch
  458. ).update(
  459. burn_id=burn_id,
  460. status=1, # 烧录成功
  461. updated_time=current_time
  462. )
  463. # 更新已处理数量到Redis
  464. task_data = json.loads(redis_obj.get_data(redis_key))
  465. task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
  466. redis_obj.set_data(redis_key, json.dumps(task_data))
  467. except Exception as e:
  468. LOGGER.error(f"批量更新UID失败: {str(e)}")
  469. raise
  470. @classmethod
  471. def get_import_progress(cls, request_dict: Dict[str, Any], response) -> Any:
  472. """
  473. 查询导入任务进度
  474. :param request_dict: 请求参数字典(必须包含task_id)
  475. :param response: 响应对象
  476. :return: JSON响应
  477. """
  478. # 1. 参数验证
  479. task_id = request_dict.get('task_id')
  480. if not task_id:
  481. return response.json(444, "缺少task_id参数")
  482. # 2. 构建Redis key
  483. redis_key = f"import_task:{task_id}"
  484. try:
  485. # 3. 从Redis获取任务数据
  486. redis_obj = RedisObject()
  487. task_data_str = redis_obj.get_data(redis_key)
  488. if not task_data_str:
  489. return response.json(404, "任务不存在或已过期")
  490. # 4. 解析任务数据
  491. task_data = json.loads(task_data_str)
  492. # 5. 计算耗时(秒)
  493. current_time = int(time.time())
  494. start_time = task_data.get('start_time', current_time)
  495. elapsed = current_time - start_time
  496. if task_data.get('end_time'):
  497. elapsed = task_data['end_time'] - start_time
  498. # 6. 返回标准化进度信息
  499. return response.json(0, {
  500. 'status': task_data.get('status', 'unknown'),
  501. 'progress': task_data.get('progress', 0),
  502. 'processed': task_data.get('processed', 0),
  503. 'total': task_data.get('total', 0),
  504. 'batch_number': task_data.get('batch_number', ''),
  505. 'success_count': task_data.get('success_count', 0),
  506. 'elapsed_seconds': elapsed,
  507. 'start_time': start_time,
  508. 'end_time': task_data.get('end_time'),
  509. 'error': task_data.get('error')
  510. })
  511. except json.JSONDecodeError:
  512. LOGGER.error(f"任务数据解析失败, redis_key: {redis_key}")
  513. return response.json(500, "任务数据格式错误")
  514. except Exception as e:
  515. LOGGER.error(f"查询导入进度失败: {str(e)}")
  516. return response.json(500)
  517. @classmethod
  518. def get_import_task_list(cls, request_dict: Dict[str, Any], response) -> Any:
  519. """
  520. 获取所有导入任务列表
  521. :param request_dict: 请求参数字典
  522. :param response: 响应对象
  523. :return: JSON响应
  524. """
  525. try:
  526. redis_obj = RedisObject()
  527. # 获取所有import_task开头的key
  528. keys = redis_obj.get_keys("import_task:*")
  529. if not keys:
  530. return response.json(0, {"tasks": []})
  531. tasks = []
  532. # 获取每个任务的基本信息
  533. for key in keys:
  534. try:
  535. task_data_str = redis_obj.get_data(key)
  536. if task_data_str:
  537. # 确保task_data_str是字符串类型
  538. if isinstance(task_data_str, bytes):
  539. task_data_str = task_data_str.decode('utf-8')
  540. task_data = json.loads(task_data_str)
  541. # 处理key为bytes的情况
  542. key_str = key.decode('utf-8') if isinstance(key, bytes) else key
  543. tasks.append({
  544. 'task_id': key_str.split(':')[1], # 从key中提取task_id
  545. 'status': task_data.get('status', 'unknown'),
  546. 'progress': task_data.get('progress', 0),
  547. 'batch_number': task_data.get('batch_number', ''),
  548. 'start_time': task_data.get('start_time'),
  549. 'end_time': task_data.get('end_time'),
  550. 'processed': task_data.get('processed', 0),
  551. 'total': task_data.get('total', 0),
  552. 'redis_key': key_str
  553. })
  554. except Exception as e:
  555. LOGGER.error(f"解析任务数据失败, key: {key}, error: {str(e)}")
  556. continue
  557. # 按开始时间倒序排列
  558. tasks.sort(key=lambda x: x.get('start_time', 0), reverse=True)
  559. # 限制返回数量(最近100条)
  560. tasks = tasks[:100]
  561. return response.json(0, {"tasks": tasks})
  562. except Exception as e:
  563. LOGGER.error(f"获取导入任务列表失败: {str(e)}")
  564. return response.json(500, "获取任务列表失败")
  565. @classmethod
  566. def get_burn_uids_page(cls, request_dict: Dict[str, Any], response) -> Any:
  567. """
  568. 根据burn_id分页查询烧录UID记录
  569. :param request_dict: 请求参数字典
  570. :param response: 响应对象
  571. :return: JSON响应
  572. """
  573. # 1. 参数验证
  574. burn_id = request_dict.get('burn_id')
  575. if not burn_id:
  576. return response.json(444, "缺少burn_id参数")
  577. try:
  578. burn_id = int(burn_id)
  579. except ValueError:
  580. return response.json(444, "burn_id必须是整数")
  581. # 2. 分页参数处理
  582. try:
  583. page = int(request_dict.get('page', 1))
  584. page_size = int(request_dict.get('pageSize', 10))
  585. page = max(page, 1)
  586. page_size = max(1, min(page_size, 100))
  587. except (ValueError, TypeError):
  588. return response.json(444, "分页参数错误(必须为整数)")
  589. # 3. 查询并分页
  590. query = Q(burn_id=burn_id)
  591. uid_qs = BurnEncryptedICUID.objects.filter(query).order_by('-created_time')
  592. paginator = Paginator(uid_qs, page_size)
  593. try:
  594. page_obj = paginator.page(page)
  595. except PageNotAnInteger:
  596. page_obj = paginator.page(1)
  597. except EmptyPage:
  598. page_obj = paginator.page(paginator.num_pages)
  599. uid_list = serializers.serialize(
  600. 'python',
  601. page_obj,
  602. fields=['id', 'uid', 'batch_number', 'status', 'created_time', 'updated_time']
  603. )
  604. return response.json(
  605. 0,
  606. {
  607. 'list': uid_list,
  608. 'total': paginator.count,
  609. 'currentPage': page_obj.number,
  610. 'totalPages': paginator.num_pages,
  611. 'pageSize': page_size
  612. }
  613. )