UIDBurnManageController.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. # -*- encoding: utf-8 -*-
  2. """
  3. @File : UIDBurnManageController.py
  4. @Time : 2025/7/30 08:57
  5. @Author : stephen
  6. @Email : zhangdongming@asj6.wecom.work
  7. @Software: PyCharm
  8. """
  9. import json
  10. import os
  11. import random
  12. import string
  13. import threading
  14. import time
  15. from datetime import datetime
  16. from typing import Dict, Any
  17. from uuid import uuid4
  18. from django.core import serializers
  19. from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
  20. from django.db import transaction
  21. from django.db.models import Q
  22. from django.http import QueryDict
  23. from django.views import View
  24. from openpyxl import load_workbook
  25. from AgentModel.models import BurnRecord, BurnEncryptedICUID
  26. from Ansjer.config import LOGGER
  27. from Object.RedisObject import RedisObject
  28. from Object.ResponseObject import ResponseObject
  29. from Object.TokenObject import TokenObject
  30. class UIDBurnManageView(View):
  31. def get(self, request, *args, **kwargs):
  32. request.encoding = 'utf-8'
  33. operation = kwargs.get('operation')
  34. return self.validation(request.GET, request, operation)
  35. def post(self, request, *args, **kwargs):
  36. request.encoding = 'utf-8'
  37. operation = kwargs.get('operation')
  38. return self.validation(request.POST, request, operation)
  39. def delete(self, request, *args, **kwargs):
  40. request.encoding = 'utf-8'
  41. operation = kwargs.get('operation')
  42. delete = QueryDict(request.body)
  43. if not delete:
  44. delete = request.GET
  45. return self.validation(delete, request, operation)
  46. def put(self, request, *args, **kwargs):
  47. request.encoding = 'utf-8'
  48. operation = kwargs.get('operation')
  49. put = QueryDict(request.body)
  50. return self.validation(put, request, operation)
  51. def validation(self, request_dict, request, operation):
  52. """请求验证路由"""
  53. # 初始化响应对象
  54. language = request_dict.get('language', 'cn')
  55. response = ResponseObject(language, 'pc')
  56. # Token验证
  57. try:
  58. tko = TokenObject(
  59. request.META.get('HTTP_AUTHORIZATION'),
  60. returntpye='pc')
  61. if tko.code != 0:
  62. return response.json(tko.code)
  63. response.lang = tko.lang
  64. user_id = tko.userID
  65. except Exception as e:
  66. LOGGER.error(f"Token验证失败: {str(e)}")
  67. return response.json(444)
  68. if operation == 'getBurnRecordsPage':
  69. return self.get_burn_records_page(request_dict, response)
  70. elif operation == 'importBatchUids':
  71. return self.import_batch_uids(request, response)
  72. elif operation == 'addBurnRecord':
  73. return self.add_burn_record(request,request_dict, response)
  74. elif operation == 'getBurnUidsPage':
  75. return self.get_burn_uids_page(request_dict, response)
  76. elif operation == 'getImportProgress':
  77. return self.get_import_progress(request_dict, response)
  78. elif operation == 'getImportTaskList':
  79. return self.get_import_task_list(request_dict, response)
  80. else:
  81. return response.json(414)
  82. @classmethod
  83. def get_burn_records_page(cls, request_dict: Dict[str, Any], response) -> Any:
  84. """
  85. 分页查询烧录记录
  86. :param cls:
  87. :param request_dict: 请求参数字典(包含分页参数和查询条件)
  88. :param response: 响应对象(用于返回JSON)
  89. :return: 分页查询结果的JSON响应
  90. """
  91. # 1. 分页参数处理与验证
  92. try:
  93. page = int(request_dict.get('page', 1))
  94. page_size = int(request_dict.get('pageSize', 10))
  95. # 限制分页范围
  96. page = max(page, 1)
  97. page_size = max(1, min(page_size, 100))
  98. except (ValueError, TypeError):
  99. return response.json(444, "分页参数错误(必须为整数)")
  100. # 2. 构建查询条件
  101. query = Q()
  102. order_number = request_dict.get('orderNumber', '').strip()
  103. if order_number:
  104. query &= Q(order_number__icontains=order_number)
  105. # 3. 获取查询集并指定需要的字段
  106. burn_qs = BurnRecord.objects.filter(query).order_by('-created_time').values(
  107. 'id', 'order_number', 'burn_count', 'purpose', 'created_time'
  108. )
  109. # 4. 分页处理
  110. paginator = Paginator(burn_qs, page_size)
  111. try:
  112. page_obj = paginator.page(page)
  113. except PageNotAnInteger:
  114. page_obj = paginator.page(1)
  115. except EmptyPage:
  116. page_obj = paginator.page(paginator.num_pages)
  117. # 转换为列表
  118. burn_list = list(page_obj)
  119. # 返回结果
  120. return response.json(
  121. 0,
  122. {
  123. 'total': paginator.count, # 总记录数
  124. 'list': burn_list, # 当前页数据列表
  125. 'currentPage': page_obj.number,
  126. 'totalPages': paginator.num_pages
  127. }
  128. )
  129. @classmethod
  130. def import_batch_uids(cls, request, response) -> Any:
  131. """
  132. 导入批次UID - 异步优化版
  133. :param request: HttpRequest对象(包含上传文件)
  134. :param response: 响应对象
  135. :return: JSON响应
  136. """
  137. # 1. 验证文件上传
  138. if 'file' not in request.FILES:
  139. return response.json(444, "请上传Excel文件")
  140. excel_file = request.FILES['file']
  141. if not excel_file.name.endswith(('.xlsx', '.xls')):
  142. return response.json(444, "只支持Excel文件(.xlsx/.xls)")
  143. try:
  144. # 2. 生成任务ID和批次号
  145. task_id = str(uuid4())
  146. # 生成带时间戳和随机字符的批次号
  147. timestamp = datetime.now().strftime('%Y%m%d%H%M%S') # 精确到秒
  148. random_chars = ''.join(random.choices(string.ascii_uppercase + string.digits, k=3)) # 3个随机字符
  149. batch_number = f"ENG{timestamp}{random_chars}" # 格式: ENG+时间戳+随机字符
  150. # 3. 初始化Redis状态
  151. redis_key = f"import_task:{task_id}"
  152. redis_obj = RedisObject()
  153. # 保存任务基本信息到Redis (过期时间2小时)
  154. task_data = {
  155. 'status': 'pending',
  156. 'batch_number': batch_number,
  157. 'progress': 0,
  158. 'processed': 0,
  159. 'total': 0,
  160. 'start_time': int(time.time()),
  161. 'success_count': 0
  162. }
  163. redis_obj.set_data(redis_key, json.dumps(task_data), 7200)
  164. # 4. 保存文件到项目static/uploadedfiles目录
  165. base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  166. upload_dir = os.path.join(base_dir, 'static', 'uploaded_files')
  167. os.makedirs(upload_dir, exist_ok=True)
  168. file_path = os.path.join(upload_dir, f"{task_id}.xlsx")
  169. with open(file_path, 'wb+') as destination:
  170. for chunk in excel_file.chunks():
  171. destination.write(chunk)
  172. # 5. 启动后台线程处理
  173. thread = threading.Thread(
  174. target=cls._process_import_batch_async,
  175. args=(task_id, file_path, redis_key, batch_number),
  176. daemon=True
  177. )
  178. thread.start()
  179. return response.json(0, {
  180. "task_id": task_id,
  181. "batch_number": batch_number,
  182. "message": "导入任务已提交,正在后台处理",
  183. "redis_key": redis_key
  184. })
  185. except Exception as e:
  186. LOGGER.error(f"创建导入任务失败: {str(e)}")
  187. return response.json(500, "创建导入任务失败")
  188. @classmethod
  189. def _process_import_batch_async(cls, task_id, file_path, redis_key, batch_number):
  190. """后台线程处理批量导入任务"""
  191. redis_obj = RedisObject()
  192. try:
  193. # 获取并更新任务状态为处理中
  194. task_data = json.loads(redis_obj.get_data(redis_key))
  195. task_data['status'] = 'processing'
  196. redis_obj.set_data(redis_key, json.dumps(task_data))
  197. # 1. 读取Excel文件获取总行数
  198. wb = load_workbook(file_path)
  199. ws = wb.active
  200. total_rows = ws.max_row
  201. # 更新总行数和开始时间
  202. task_data['total'] = total_rows
  203. task_data['start_time'] = int(time.time())
  204. redis_obj.set_data(redis_key, json.dumps(task_data))
  205. # 2. 分批处理UID数据(每批500条)
  206. batch_size = 500
  207. current_time = int(time.time())
  208. processed = 0
  209. success_count = 0
  210. uids_batch = []
  211. for row in ws.iter_rows(min_row=1, values_only=True):
  212. if row[0]:
  213. uid = str(row[0]).strip()
  214. if uid:
  215. uids_batch.append(uid)
  216. processed += 1
  217. # 每处理1000条更新一次进度
  218. if processed % 1000 == 0:
  219. progress = min(99, int((processed / total_rows) * 100))
  220. task_data['progress'] = progress
  221. task_data['processed'] = processed
  222. task_data['last_update'] = int(time.time())
  223. redis_obj.set_data(redis_key, json.dumps(task_data))
  224. # 处理批次
  225. if len(uids_batch) >= batch_size:
  226. success = cls._import_uids_batch(
  227. uids_batch,
  228. batch_number,
  229. current_time,
  230. redis_key
  231. )
  232. success_count += success
  233. uids_batch = []
  234. # 处理最后一批
  235. if uids_batch:
  236. success = cls._import_uids_batch(
  237. uids_batch,
  238. batch_number,
  239. current_time,
  240. redis_key
  241. )
  242. success_count += success
  243. # 更新最终状态
  244. task_data['status'] = 'completed'
  245. task_data['progress'] = 100
  246. task_data['processed'] = processed
  247. task_data['success_count'] = success_count
  248. task_data['end_time'] = int(time.time())
  249. redis_obj.set_data(redis_key, json.dumps(task_data))
  250. LOGGER.info(f"处理批量导入完成,任务ID: {task_id}, 处理UID数量: {processed}, 成功导入: {success_count}")
  251. # 清理临时文件
  252. try:
  253. os.remove(file_path)
  254. except Exception as e:
  255. LOGGER.warning(f"删除临时文件失败: {str(e)}")
  256. except Exception as e:
  257. LOGGER.error(f"处理批量导入失败: {str(e)}")
  258. task_data = {
  259. 'status': 'failed',
  260. 'error': str(e),
  261. 'end_time': int(time.time())
  262. }
  263. redis_obj.set_data(redis_key, json.dumps(task_data))
  264. @classmethod
  265. def _import_uids_batch(cls, uids_batch, batch_number, current_time, redis_key):
  266. """批量导入UID记录"""
  267. redis_obj = RedisObject()
  268. try:
  269. with transaction.atomic():
  270. # 去重处理
  271. unique_uids = list(set(uids_batch))
  272. # 创建记录
  273. records = [
  274. BurnEncryptedICUID(
  275. batch_number=batch_number,
  276. uid=uid,
  277. purpose='批次导入',
  278. created_time=current_time,
  279. updated_time=current_time,
  280. status=0 # 未烧录状态
  281. )
  282. for uid in unique_uids
  283. ]
  284. BurnEncryptedICUID.objects.bulk_create(records)
  285. # 更新已处理数量和成功数量到Redis
  286. task_data = json.loads(redis_obj.get_data(redis_key))
  287. task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
  288. task_data['success_count'] = task_data.get('success_count', 0) + len(records)
  289. redis_obj.set_data(redis_key, json.dumps(task_data))
  290. return len(records)
  291. except Exception as e:
  292. LOGGER.error(f"批量导入UID失败: {str(e)}")
  293. # 更新失败状态但继续处理
  294. task_data = json.loads(redis_obj.get_data(redis_key))
  295. task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
  296. redis_obj.set_data(redis_key, json.dumps(task_data))
  297. return 0
  298. @classmethod
  299. def add_burn_record(cls, request,request_dict, response) -> Any:
  300. """
  301. 新增烧录记录(带UID文件) - Redis字符串优化版
  302. :param request_dict:
  303. :param request: HttpRequest对象(包含上传文件和表单数据)
  304. :param response: 响应对象
  305. :return: JSON响应
  306. """
  307. required_fields = ['order_number', 'burn_count', 'purpose']
  308. for field in required_fields:
  309. if not request_dict.get(field):
  310. return response.json(444, f"缺少必填参数: {field}")
  311. # 2. 验证文件上传
  312. if 'uid_file' not in request.FILES:
  313. return response.json(444, "请上传包含已烧录UID的Excel文件")
  314. excel_file = request.FILES['uid_file']
  315. if not excel_file.name.endswith(('.xlsx', '.xls')):
  316. return response.json(444, "只支持Excel文件(.xlsx/.xls)")
  317. try:
  318. burn_count = int(request_dict['burn_count'])
  319. if burn_count <= 0:
  320. return response.json(444, "烧录数量必须大于0")
  321. # 3. 创建任务ID并初始化Redis状态
  322. task_id = str(uuid4())
  323. redis_key = f"burn_task:{task_id}"
  324. redis_obj = RedisObject()
  325. # 保存任务基本信息到Redis (过期时间2小时)
  326. task_data = {
  327. 'status': 'pending',
  328. 'order_number': request_dict['order_number'].strip(),
  329. 'burn_count': burn_count,
  330. 'purpose': request_dict['purpose'].strip(),
  331. 'progress': 0,
  332. 'processed': 0,
  333. 'total': 0,
  334. 'start_time': int(time.time())
  335. }
  336. redis_obj.set_data(redis_key, json.dumps(task_data), 7200)
  337. # 4. 保存文件到项目static/uploadedfiles目录
  338. base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  339. upload_dir = os.path.join(base_dir, 'static', 'uploaded_files')
  340. os.makedirs(upload_dir, exist_ok=True)
  341. file_path = os.path.join(upload_dir, f"{task_id}.xlsx")
  342. with open(file_path, 'wb+') as destination:
  343. for chunk in excel_file.chunks():
  344. destination.write(chunk)
  345. # 5. 启动后台线程处理
  346. thread = threading.Thread(
  347. target=cls._process_burn_record_async,
  348. args=(task_id, file_path, redis_key),
  349. daemon=True
  350. )
  351. thread.start()
  352. return response.json(0, {
  353. "task_id": task_id,
  354. "message": "任务已提交,正在后台处理",
  355. "redis_key": redis_key
  356. })
  357. except ValueError:
  358. return response.json(444, "烧录数量必须是整数")
  359. except Exception as e:
  360. LOGGER.error(f"创建烧录任务失败: {str(e)}")
  361. return response.json(500, "创建烧录任务失败")
  362. @classmethod
  363. def _process_burn_record_async(cls, task_id, file_path, redis_key):
  364. """后台线程处理烧录记录任务"""
  365. redis_obj = RedisObject()
  366. try:
  367. # 获取并更新任务状态为处理中
  368. task_data = json.loads(redis_obj.get_data(redis_key))
  369. task_data['status'] = 'processing'
  370. redis_obj.set_data(redis_key, json.dumps(task_data))
  371. # 1. 读取Excel文件获取总行数
  372. wb = load_workbook(file_path)
  373. ws = wb.active
  374. total_rows = ws.max_row
  375. # 更新总行数和开始时间
  376. task_data['total'] = total_rows
  377. task_data['start_time'] = int(time.time())
  378. redis_obj.set_data(redis_key, json.dumps(task_data))
  379. # 2. 创建烧录记录
  380. with transaction.atomic():
  381. burn_record = BurnRecord(
  382. order_number=task_data['order_number'],
  383. burn_count=task_data['burn_count'],
  384. purpose=task_data['purpose'],
  385. updated_time=int(time.time()),
  386. created_time=int(time.time())
  387. )
  388. burn_record.save()
  389. task_data['burn_record_id'] = burn_record.id
  390. redis_obj.set_data(redis_key, json.dumps(task_data))
  391. # 3. 分批处理UID文件(每批300条)
  392. batch_size = 300
  393. current_time = int(time.time())
  394. processed = 0
  395. uids_batch = []
  396. for row in ws.iter_rows(min_row=1, values_only=True):
  397. if row[0]:
  398. uid = str(row[0]).strip()
  399. if uid:
  400. uids_batch.append(uid)
  401. processed += 1
  402. # 每处理100条更新一次进度
  403. if processed % 100 == 0:
  404. progress = min(99, int((processed / total_rows) * 100))
  405. task_data['progress'] = progress
  406. task_data['processed'] = processed
  407. task_data['last_update'] = int(time.time())
  408. redis_obj.set_data(redis_key, json.dumps(task_data))
  409. # 处理批次
  410. if len(uids_batch) >= batch_size:
  411. cls._update_uids_batch(
  412. uids_batch,
  413. burn_record.id,
  414. current_time,
  415. redis_key
  416. )
  417. uids_batch = []
  418. # 处理最后一批
  419. if uids_batch:
  420. cls._update_uids_batch(
  421. uids_batch,
  422. burn_record.id,
  423. current_time,
  424. redis_key
  425. )
  426. # 更新最终状态
  427. task_data['status'] = 'completed'
  428. task_data['progress'] = 100
  429. task_data['processed'] = processed
  430. task_data['end_time'] = int(time.time())
  431. redis_obj.set_data(redis_key, json.dumps(task_data))
  432. LOGGER.info(f"处理烧录记录完成,任务ID: {task_id}, 处理UID数量: {processed}")
  433. # 清理临时文件
  434. try:
  435. os.remove(file_path)
  436. except Exception as e:
  437. LOGGER.warning(f"删除临时文件失败: {str(e)}")
  438. except Exception as e:
  439. LOGGER.error(f"处理烧录记录失败: {str(e)}")
  440. task_data = {
  441. 'status': 'failed',
  442. 'error': str(e),
  443. 'end_time': int(time.time())
  444. }
  445. redis_obj.set_data(redis_key, json.dumps(task_data))
  446. @classmethod
  447. def _update_uids_batch(cls, uids_batch, burn_id, current_time, redis_key):
  448. """批量更新UID记录"""
  449. redis_obj = RedisObject()
  450. try:
  451. with transaction.atomic():
  452. updated = BurnEncryptedICUID.objects.filter(
  453. uid__in=uids_batch
  454. ).update(
  455. burn_id=burn_id,
  456. status=1, # 烧录成功
  457. updated_time=current_time
  458. )
  459. # 更新已处理数量到Redis
  460. task_data = json.loads(redis_obj.get_data(redis_key))
  461. task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
  462. redis_obj.set_data(redis_key, json.dumps(task_data))
  463. except Exception as e:
  464. LOGGER.error(f"批量更新UID失败: {str(e)}")
  465. raise
  466. @classmethod
  467. def get_import_progress(cls, request_dict: Dict[str, Any], response) -> Any:
  468. """
  469. 查询任务进度(支持导入和烧录任务)
  470. :param request_dict: 请求参数字典(必须包含task_id, 可选type)
  471. :param response: 响应对象
  472. :return: JSON响应
  473. type参数说明:
  474. - import: 导入任务(默认)
  475. - burn: 烧录任务
  476. """
  477. # 1. 参数验证
  478. task_id = request_dict.get('task_id')
  479. if not task_id:
  480. return response.json(444, "缺少task_id参数")
  481. task_type = request_dict.get('type', 'import').lower()
  482. if task_type not in ['import', 'burn']:
  483. return response.json(444, "type参数必须是'import'或'burn'")
  484. # 2. 构建Redis key
  485. redis_key = f"{task_type}_task:{task_id}"
  486. try:
  487. # 3. 从Redis获取任务数据
  488. redis_obj = RedisObject()
  489. task_data_str = redis_obj.get_data(redis_key)
  490. if not task_data_str:
  491. return response.json(173, "任务不存在或已过期")
  492. # 4. 解析任务数据
  493. if isinstance(task_data_str, bytes):
  494. task_data_str = task_data_str.decode('utf-8')
  495. task_data = json.loads(task_data_str)
  496. # 5. 计算耗时(秒)
  497. current_time = int(time.time())
  498. start_time = task_data.get('start_time', current_time)
  499. elapsed = current_time - start_time
  500. if task_data.get('end_time'):
  501. elapsed = task_data['end_time'] - start_time
  502. # 6. 构建基础响应数据
  503. result = {
  504. 'status': task_data.get('status', 'unknown'),
  505. 'progress': task_data.get('progress', 0),
  506. 'processed': task_data.get('processed', 0),
  507. 'total': task_data.get('total', 0),
  508. 'elapsed_seconds': elapsed,
  509. 'start_time': start_time,
  510. 'end_time': task_data.get('end_time'),
  511. 'error': task_data.get('error'),
  512. 'task_type': task_type
  513. }
  514. # 7. 根据任务类型添加特定字段
  515. if task_type == 'import':
  516. result.update({
  517. 'batch_number': task_data.get('batch_number', ''),
  518. 'success_count': task_data.get('success_count', 0)
  519. })
  520. else: # burn task
  521. result.update({
  522. 'order_number': task_data.get('order_number', ''),
  523. 'purpose': task_data.get('purpose', ''),
  524. 'burn_count': task_data.get('burn_count', 0),
  525. 'burn_record_id': task_data.get('burn_record_id')
  526. })
  527. return response.json(0, result)
  528. except json.JSONDecodeError:
  529. LOGGER.error(f"任务数据解析失败, redis_key: {redis_key}")
  530. return response.json(500, "任务数据格式错误")
  531. except Exception as e:
  532. LOGGER.error(f"查询任务进度失败: {str(e)}")
  533. return response.json(500, "查询进度失败")
  534. @classmethod
  535. def get_import_task_list(cls, request_dict: Dict[str, Any], response) -> Any:
  536. """
  537. 获取所有导入任务列表
  538. :param request_dict: 请求参数字典
  539. :param response: 响应对象
  540. :return: JSON响应
  541. """
  542. try:
  543. redis_obj = RedisObject()
  544. # 获取所有import_task开头的key
  545. keys = redis_obj.get_keys("import_task:*")
  546. if not keys:
  547. return response.json(0, {"tasks": []})
  548. tasks = []
  549. # 获取每个任务的基本信息
  550. for key in keys:
  551. try:
  552. task_data_str = redis_obj.get_data(key)
  553. if task_data_str:
  554. # 确保task_data_str是字符串类型
  555. if isinstance(task_data_str, bytes):
  556. task_data_str = task_data_str.decode('utf-8')
  557. task_data = json.loads(task_data_str)
  558. # 处理key为bytes的情况
  559. key_str = key.decode('utf-8') if isinstance(key, bytes) else key
  560. tasks.append({
  561. 'task_id': key_str.split(':')[1], # 从key中提取task_id
  562. 'status': task_data.get('status', 'unknown'),
  563. 'progress': task_data.get('progress', 0),
  564. 'batch_number': task_data.get('batch_number', ''),
  565. 'start_time': task_data.get('start_time'),
  566. 'end_time': task_data.get('end_time'),
  567. 'processed': task_data.get('processed', 0),
  568. 'total': task_data.get('total', 0),
  569. 'redis_key': key_str
  570. })
  571. except Exception as e:
  572. LOGGER.error(f"解析任务数据失败, key: {key}, error: {str(e)}")
  573. continue
  574. # 按开始时间倒序排列
  575. tasks.sort(key=lambda x: x.get('start_time', 0), reverse=True)
  576. # 限制返回数量(最近100条)
  577. tasks = tasks[:100]
  578. return response.json(0, {"tasks": tasks})
  579. except Exception as e:
  580. LOGGER.error(f"获取导入任务列表失败: {str(e)}")
  581. return response.json(500, "获取任务列表失败")
  582. @classmethod
  583. def get_burn_uids_page(cls, request_dict: Dict[str, Any], response) -> Any:
  584. """
  585. 根据burn_id分页查询烧录UID记录
  586. :param request_dict: 请求参数字典
  587. :param response: 响应对象
  588. :return: JSON响应
  589. """
  590. # 1. 参数验证
  591. burn_id = request_dict.get('burn_id')
  592. if not burn_id:
  593. return response.json(444, "缺少burn_id参数")
  594. try:
  595. burn_id = int(burn_id)
  596. except ValueError:
  597. return response.json(444, "burn_id必须是整数")
  598. # 2. 分页参数处理
  599. try:
  600. page = int(request_dict.get('page', 1))
  601. page_size = int(request_dict.get('pageSize', 10))
  602. page = max(page, 1)
  603. page_size = max(1, min(page_size, 100))
  604. except (ValueError, TypeError):
  605. return response.json(444, "分页参数错误(必须为整数)")
  606. # 3. 查询并分页
  607. query = Q(burn_id=burn_id)
  608. uid_qs = BurnEncryptedICUID.objects.filter(query).order_by('-created_time')
  609. paginator = Paginator(uid_qs, page_size)
  610. try:
  611. page_obj = paginator.page(page)
  612. except PageNotAnInteger:
  613. page_obj = paginator.page(1)
  614. except EmptyPage:
  615. page_obj = paginator.page(paginator.num_pages)
  616. uid_list = serializers.serialize(
  617. 'python',
  618. page_obj,
  619. fields=['id', 'uid', 'batch_number', 'status', 'created_time', 'updated_time']
  620. )
  621. return response.json(
  622. 0,
  623. {
  624. 'list': uid_list,
  625. 'total': paginator.count,
  626. 'currentPage': page_obj.number,
  627. 'totalPages': paginator.num_pages,
  628. 'pageSize': page_size
  629. }
  630. )