Pārlūkot izejas kodu

更新烧录UID管控、使用帮助功能、序列号已占有返回20001状态码

zhangdongming 2 nedēļas atpakaļ
vecāks
revīzija
3bcda56312

+ 423 - 0
AdminController/HelpLinkManageController.py

@@ -0,0 +1,423 @@
+"""
+@File    : HelpLinkManageController.py
+@Time    : 2025/7/30 8:52
+@Author  : Zhuo
+@Email   : zhuojiaxuan@zosi.life
+@Software: PyCharm
+"""
+import csv
+import datetime
+import json
+
+import time
+from io import  StringIO
+
+from django.core.paginator import Paginator, EmptyPage
+from django.db import transaction, IntegrityError
+from django.db.models import Q, Case, When
+from django.http import QueryDict, HttpResponse
+from django.views import View
+
+from Ansjer.config import LOGGER
+from Model.models import HelpLink
+from Object.Enums.RedisKeyConstant import RedisKeyConstant
+from Object.RedisObject import RedisObject
+from Object.ResponseObject import ResponseObject
+from Object.TokenObject import TokenObject
+
+class HelpLinkManageView(View):
+    def get(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        return self.validation(request.GET, request, operation)
+
+    def post(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        return self.validation(request.POST, request, operation)
+
+    def delete(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        delete = QueryDict(request.body)
+        if not delete:
+            delete = request.GET
+        return self.validation(delete, request, operation)
+
+    def put(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        put = QueryDict(request.body)
+        return self.validation(put, request, operation)
+
+    def validation(self, request_dict, request, operation):
+        """请求验证路由"""
+        # 初始化响应对象
+        language = request_dict.get('language', 'en')
+        response = ResponseObject(language, 'pc')
+
+        # Token验证
+        try:
+            tko = TokenObject(
+                request.META.get('HTTP_AUTHORIZATION'),
+                returntpye='pc')
+            if tko.code != 0:
+                return response.json(tko.code)
+            response.lang = tko.lang
+            user_id = tko.userID
+
+        except Exception as e:
+            LOGGER.error(f"Token验证失败: {str(e)}")
+            return response.json(444)
+
+        # 操作路由映射
+        operation_handlers = {
+            'queryList': self.query_list,  # 查询
+            'add': self.add_help_link,  # 添加
+            'edit': self.edit_help_link,  # 编辑
+            'delete': self.delete_help_link,  # 删除
+            'getByDeviceType': self.get_by_device_type,  # 根据设备类型获取方案
+            'batchDelete': self.batch_delete,  # 批量删除
+            'export': self.export_help_links,  # 导出帮助链接
+        }
+
+        handler = operation_handlers.get(operation)
+        if not handler:
+            return response.json(444, 'operation')
+
+        try:
+            return handler(user_id, request_dict, response)
+        except Exception as e:
+            LOGGER.error(f"操作{operation}执行异常:{repr(e)}")
+            return response.json(500, "服务器内部错误")
+
+    def query_list(self,user_id, request_dict, response):
+        """查询帮助链接列表"""
+        # 参数处理与验证
+        try:
+            page = int(request_dict.get('page', 1))
+            page_size = min(int(request_dict.get('pageSize', 10)), 100)  # 限制最大分页大小
+        except ValueError:
+            return response.json(444, "分页参数错误")
+
+        # 构建查询条件
+        query = Q()
+
+        # 特殊字段处理
+        if device_type := request_dict.get('deviceType'):
+            query &= Q(device_type=int(device_type))
+        if lang := request_dict.get('lang'):
+            query &= Q(lang=lang)
+        if title := request_dict.get('title'):
+            query &= Q(title__icontains=title)
+        if url := request_dict.get('url'):
+            query &= Q(url__icontains=url)
+
+        # 使用分页器
+        queryset = HelpLink.objects.filter(query).order_by('-id')
+        paginator = Paginator(queryset, page_size)
+
+        try:
+            page_obj = paginator.page(page)
+        except EmptyPage:
+            return response.json(444, "页码超出范围")
+
+        # 序列化数据
+        data = [self._help_link_to_dict(help_link) for help_link in page_obj]
+
+        return response.json(0, {
+            'list': data,
+            'total': paginator.count,
+            'currentPage': page_obj.number,
+            'totalPages': paginator.num_pages
+        })
+
+
+    def add_help_link(self, user_id, request_dict, response):
+        """新增帮助链接"""
+        try:
+            # 构造帮助链接数据
+            help_link_data = {
+                'device_type': int(request_dict.get('deviceType', 0)),
+                'lang': request_dict.get('lang'),
+                'url': request_dict.get('url', ''),
+                'title': request_dict.get('title', ''),
+                'description': request_dict.get('description', ''),
+                'is_active' : request_dict.get('isActive', 1),
+                'created_time': int(time.time()),
+                'updated_time': int(time.time()),
+            }
+            help_link = HelpLink.objects.create(**help_link_data)
+
+            return response.json(0, {
+                'id': help_link.id
+            })
+
+        except IntegrityError as e:
+            LOGGER.error(f"用户 {user_id} 创建帮助链接时发生唯一性冲突: {str(e)}")
+            return response.json(173, "数据已存在")
+        except ValueError as e:
+            return response.json(444, "参数类型错误")
+        except Exception as e:
+            LOGGER.exception(f"用户 {user_id} 添加帮助链接异常:{repr(e)}")
+            return response.json(500, "添加失败")
+
+    def edit_help_link(self, user_id, request_dict, response):
+        """编辑帮助链接"""
+        help_link_id = request_dict.get('helpLinkId',None)
+        device_type = request_dict.get('deviceType', None)
+        lang = request_dict.get('lang', None)
+        url = request_dict.get('url', None)
+        title = request_dict.get('title', None)
+        description = request_dict.get('description', None)
+        is_active = request_dict.get('isActive', None)
+
+        if not all([help_link_id, device_type, lang, url]):
+            return response.json(444)
+
+        try:
+            now_time = int(time.time())
+            help_link_qs = HelpLink.objects.filter(id=help_link_id).values('device_type', 'lang')
+            if not help_link_qs.exists():
+                return response.json(173)
+
+            # 保存修改前的信息,用于清除缓存
+            old_device_type = help_link_qs[0]['device_type']
+            old_lang = help_link_qs[0]['lang']
+
+            # 更新数据
+            update_data = {
+                'device_type': int(device_type),
+                'lang': lang,
+                'url': url,
+                'title': title,
+                'description': description,
+                'updated_time': now_time
+            }
+
+            # 只有当is_active参数存在时才更新该字段
+            if is_active is not None:
+                update_data['is_active'] = bool(is_active)
+
+            HelpLink.objects.filter(id=help_link_id).update(**update_data)
+
+            # 清除相关缓存
+            self._clear_help_link_cache(old_device_type, old_lang)
+            # 如果设备类型或语言发生变化,也需要清除新缓存
+            if int(device_type) != old_device_type or lang != old_lang:
+                self._clear_help_link_cache(int(device_type), lang)
+
+            return response.json(0)
+        except Exception as e:
+            LOGGER.exception(f"用户 {user_id} 编辑帮助链接异常: {repr(e)}")
+            return response.json(500, f'error_line:{e.__traceback__.tb_lineno}, error_msg:{repr(e)}')
+
+    def delete_help_link(self, user_id, request_dict, response):
+        """删除帮助链接"""
+        # 参数校验
+        if 'id' not in request_dict:
+            return response.json(444, "缺少链接ID")
+
+        try:
+            # 先获取帮助链接信息,用于清除缓存
+            help_link = HelpLink.objects.get(id=request_dict['id'])
+            device_type = help_link.device_type
+            lang = help_link.lang
+
+            # 直接删除记录
+            deleted_count, _ = HelpLink.objects.filter(id=request_dict['id']).delete()
+
+            if deleted_count == 0:
+                return response.json(173, "帮助链接不存在")
+
+            # 清除相关缓存
+            self._clear_help_link_cache(device_type, lang)
+
+            # 记录操作日志
+            LOGGER.info(f"用户 {user_id} 删除了帮助链接,ID: {request_dict['id']}")
+
+            return response.json(0)
+        except HelpLink.DoesNotExist:
+            return response.json(173, "帮助链接不存在")
+        except ValueError:
+            return response.json(500, "链接ID格式错误")
+
+    def get_by_device_type(self,user_id, request_dict, response):
+        """根据设备类型和语言获取帮助链接"""
+        try:
+            device_type = int(request_dict.get('deviceType',0))
+            lang = request_dict.get('lang')
+        except(ValueError, TypeError):
+            return response.json(444, '参数错误')
+
+        try:
+            # 构建缓存键
+            cache_key = RedisKeyConstant.HELP_LINK_TYPE.value + f'{device_type}:{lang}'
+            redis = RedisObject("help_link")
+
+            # 先尝试从缓存获取
+            cached_data = redis.get_data(cache_key)
+            if cached_data:
+                cached_data = json.loads(cached_data)
+                return response.json(0, cached_data)
+
+            # 优化数据库查询 - 单次查询获取结果
+            help_link = HelpLink.objects.filter(
+                Q(device_type=device_type) | Q(device_type=-1),
+                lang=lang,
+                is_active=True
+            ).order_by(
+                # 优先匹配指定设备类型,其次匹配通用类型(-1)
+                Case(
+                    When(device_type=device_type, then=0),
+                    default=1,
+                )
+            ).first()
+
+            if not help_link:
+                return response.json(173)
+
+            # 构建返回数据
+            data = {
+                'url': help_link.url,
+                'title': help_link.title,
+                'description': help_link.description
+            }
+
+            # 设置缓存,过期时间30天
+            try:
+                redis.set_data(cache_key, json.dumps(data), RedisKeyConstant.EXPIRE_TIME_30_DAYS.value)
+            except Exception as e:
+                LOGGER.error(f"设置Redis缓存出错: {repr(e)}")
+                # 缓存失败不影响主流程
+
+            return response.json(0, data)
+
+        except Exception as e:
+            LOGGER.error(f"查询帮助链接出错: {repr(e)}")
+            return response.json(500)
+
+
+    def batch_delete(self, user_id, request_dict, response):
+        """批量删除帮助链接"""
+        try:
+            ids = request_dict.get('ids')
+            if not ids:
+                return  response.json(444, "参数错误")
+
+            # 解析ID列表
+            if isinstance(ids, str):
+                ids = json.loads(ids)
+
+            if not isinstance(ids, list) or not ids:
+                return response.json(444, "ID列表格式错误")
+
+            # 查询要删除的帮助链接信息,并收集缓存清理信息
+            help_links = HelpLink.objects.filter(id__in=ids)
+            if not help_links.exists():
+                return response.json(173, "没有找到可删除的帮助链接")
+
+            deleted_links_info = list(help_links.values('device_type', 'lang'))
+
+            # 批量删除
+            deleted_count, _ = help_links.delete()
+
+            # 清除相关缓存
+            for link_info in deleted_links_info:
+                self._clear_help_link_cache(link_info['device_type'], link_info['lang'])
+
+            # 记录操作日志
+            LOGGER.info(f"用户 {user_id} 批量删除了 {deleted_count} 条帮助链接,ID列表: {ids}")
+
+            return response.json(0, f"成功删除{deleted_count}条记录")
+        except json.JSONDecodeError:
+            return response.json(444, "ID列表格式错误")
+        except Exception as e:
+            LOGGER.error(f"用户 {user_id} 批量删除帮助链接失败: {str(e)}")
+            return response.json(500, "批量删除失败")
+
+    def export_help_links(self, user_id, request_dict, response):
+        """导出帮助链接为CSV格式"""
+        try:
+            # 获取所有帮助链接
+            queryset = HelpLink.objects.filter(is_active=True).order_by('-id')
+
+            # 创建CSV内容
+            output = StringIO()
+            writer = csv.writer(output)
+
+            # 写入表头
+            writer.writerow(['ID', '设备类型', '语言', '链接标题', 'URL', '描述', '是否启用', '创建时间', '更新时间'])
+
+            # 写入每一行数据
+            for link in queryset:
+                # 确保时间戳是整数
+                created_ts = int(link.created_time) if link.created_time else None
+                updated_ts = int(link.updated_time) if link.updated_time else None
+                created_time = datetime.datetime.fromtimestamp(created_ts).strftime(
+                    '%Y-%m-%d %H:%M:%S') if created_ts is not None else ''
+                updated_time = datetime.datetime.fromtimestamp(updated_ts).strftime(
+                    '%Y-%m-%d %H:%M:%S') if updated_ts is not None else ''
+
+                writer.writerow([
+                    link.id,
+                    link.device_type,
+                    link.lang,
+                    link.title,
+                    link.url,
+                    link.description,
+                    '是' if link.is_active else '否',
+                    created_time,
+                    updated_time
+                ])
+
+            LOGGER.info(f"用户 {user_id} 导出了 {queryset.count()} 条帮助链接")
+
+            # 准备响应
+            csv_content = output.getvalue()
+            output.close()
+
+            # 创建HTTP响应
+            response_http = HttpResponse(
+                csv_content,
+                content_type='text/csv',
+                headers={
+                    'Content-Disposition': 'attachment; filename="help_links_export.csv"'
+                }
+            )
+
+            return response_http
+        except Exception as e:
+            LOGGER.error(f"用户 {user_id} 导出帮助链接失败: {str(e)}")
+            return ResponseObject('cn', 'pc').json(500, "导出失败")
+
+
+    def _help_link_to_dict(self, help_link):
+        """帮助链接对象序列化"""
+        return {
+            'id': help_link.id,
+            'deviceType': help_link.device_type,
+            'lang': help_link.lang,
+            'url': help_link.url,
+            'title': help_link.title,
+            'description': help_link.description,
+            'isActive': help_link.is_active,
+            'createdTime': help_link.created_time,
+            'updatedTime': help_link.updated_time,
+        }
+
+    def _clear_help_link_cache(self, device_type, lang):
+        """
+        清除帮助链接缓存
+        """
+        try:
+            cache_key = RedisKeyConstant.HELP_LINK_TYPE.value + f'{device_type}:{lang}'
+            redis = RedisObject("help_link")
+            redis.del_data(cache_key)
+
+            # 同时清除通用类型(-1)的缓存
+            cache_key_general = f"help_link:-1:{lang}"
+            redis.del_data(cache_key_general)
+        except Exception as e:
+            LOGGER.error(f"清除帮助链接缓存失败: {repr(e)}")
+

+ 884 - 0
AdminController/UIDBurnManageController.py

@@ -0,0 +1,884 @@
+# -*- encoding: utf-8 -*-
+"""
+@File    : UIDBurnManageController.py
+@Time    : 2025/7/30 08:57
+@Author  : stephen
+@Email   : zhangdongming@asj6.wecom.work
+@Software: PyCharm
+"""
+import json
+import os
+import random
+import string
+import threading
+import time
+from datetime import datetime
+from typing import Dict, Any
+from uuid import uuid4
+
+import xlrd
+from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
+from django.db import transaction
+from django.db.models import Q
+from django.http import QueryDict
+from django.views import View
+from openpyxl import load_workbook
+
+from AgentModel.models import BurnRecord, BurnEncryptedICUID, BurnBatch
+from Ansjer.config import LOGGER
+from Object.RedisObject import RedisObject
+from Object.ResponseObject import ResponseObject
+from Object.TokenObject import TokenObject
+
+
+class UIDBurnManageView(View):
+    def get(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        return self.validation(request.GET, request, operation)
+
+    def post(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        return self.validation(request.POST, request, operation)
+
+    def delete(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        delete = QueryDict(request.body)
+        if not delete:
+            delete = request.GET
+        return self.validation(delete, request, operation)
+
+    def put(self, request, *args, **kwargs):
+        request.encoding = 'utf-8'
+        operation = kwargs.get('operation')
+        put = QueryDict(request.body)
+        return self.validation(put, request, operation)
+
+    def validation(self, request_dict, request, operation):
+        """请求验证路由"""
+        # 初始化响应对象
+        language = request_dict.get('language', 'cn')
+        response = ResponseObject(language, 'pc')
+
+        # Token验证
+        try:
+            tko = TokenObject(
+                request.META.get('HTTP_AUTHORIZATION'),
+                returntpye='pc')
+            if tko.code != 0:
+                return response.json(tko.code)
+            response.lang = tko.lang
+            user_id = tko.userID
+        except Exception as e:
+            LOGGER.error(f"Token验证失败: {str(e)}")
+            return response.json(444)
+
+        if operation == 'getBurnRecordsPage':
+            return self.get_burn_records_page(request_dict, response)
+        elif operation == 'importBatchUids':
+            return self.import_batch_uids(request, response)
+        elif operation == 'addBurnRecord':
+            return self.add_burn_record(request, request_dict, response)
+        elif operation == 'batchPageUids':
+            return self.batch_page_uids(request_dict, response)
+        elif operation == 'getImportProgress':
+            return self.get_import_progress(request_dict, response)
+        elif operation == 'getImportTaskList':
+            return self.get_import_task_list(request_dict, response)
+        elif operation == 'getBatchRecordsPage':
+            return self.get_batch_records_page(request_dict, response)
+        else:
+            return response.json(414)
+
+    @classmethod
+    def get_batch_records_page(cls, request_dict: Dict[str, Any], response) -> Any:
+        """
+        分页查询批次记录(带统计信息)
+        :param request_dict: 请求参数字典
+        :param response: 响应对象
+        :return: JSON响应
+        """
+        # 1. 分页参数处理
+        try:
+            page = int(request_dict.get('page', 1))
+            page_size = int(request_dict.get('pageSize', 10))
+            page = max(page, 1)
+            page_size = max(1, min(page_size, 100))
+        except (ValueError, TypeError):
+            return response.json(444, "分页参数错误(必须为整数)")
+
+        # 2. 构建查询条件
+        query = Q()
+        batch_number = request_dict.get('batch_number', '').strip()
+        if batch_number:
+            query &= Q(batch_number__icontains=batch_number)
+
+        # 3. 查询并分页
+        batch_qs = BurnBatch.objects.filter(query).order_by('-created_time').values(
+            'id', 'batch_number', 'purpose', 'manager', 'total_uid', 'created_time'
+        )
+
+        paginator = Paginator(batch_qs, page_size)
+        try:
+            page_obj = paginator.page(page)
+        except PageNotAnInteger:
+            page_obj = paginator.page(1)
+        except EmptyPage:
+            page_obj = paginator.page(paginator.num_pages)
+
+        # 4. 获取统计信息并构建结果
+        redis_obj = RedisObject()
+        batch_list = []
+        
+        for batch in page_obj:
+            batch_id = batch['id']
+            cache_key = f"batch_stats:{batch_id}"
+            
+            # 尝试从缓存获取统计信息
+            cached_stats = redis_obj.get_data(cache_key)
+            if cached_stats:
+                stats = json.loads(cached_stats)
+            else:
+                # 查询数据库统计
+                burned_count = BurnEncryptedICUID.objects.filter(
+                    batch_id=batch_id, 
+                    status=1
+                ).count()
+                unburned_count = BurnEncryptedICUID.objects.filter(
+                    batch_id=batch_id, 
+                    status=0
+                ).count()
+                
+                stats = {
+                    'burned_count': burned_count,
+                    'unburned_count': unburned_count
+                }
+                # 设置缓存,过期时间1小时
+                redis_obj.set_data(cache_key, json.dumps(stats), 3600)
+            
+            # 合并批次信息和统计信息
+            batch_info = dict(batch)
+            batch_info.update(stats)
+            batch_list.append(batch_info)
+
+        return response.json(
+            0,
+            {
+                'list': batch_list,
+                'total': paginator.count,
+                'currentPage': page_obj.number,
+                'totalPages': paginator.num_pages,
+                'pageSize': page_size
+            }
+        )
+
+    @classmethod
+    def get_burn_records_page(cls, request_dict: Dict[str, Any], response) -> Any:
+        """
+        分页查询烧录记录
+        :param cls:
+        :param request_dict: 请求参数字典(包含分页参数和查询条件)
+        :param response: 响应对象(用于返回JSON)
+        :return: 分页查询结果的JSON响应
+        """
+        # 1. 分页参数处理与验证
+        try:
+            page = int(request_dict.get('page', 1))
+            page_size = int(request_dict.get('pageSize', 10))
+            # 限制分页范围
+            page = max(page, 1)
+            page_size = max(1, min(page_size, 100))
+        except (ValueError, TypeError):
+            return response.json(444, "分页参数错误(必须为整数)")
+
+        # 2. 构建查询条件
+        query = Q()
+        order_number = request_dict.get('orderNumber', '').strip()
+        if order_number:
+            query &= Q(order_number__icontains=order_number)
+
+        # 3. 获取查询集并指定需要的字段
+        burn_qs = BurnRecord.objects.filter(query).order_by('-created_time').values(
+            'id', 'order_number', 'burn_count', 'purpose', 'created_time'
+        )
+
+        # 4. 分页处理
+        paginator = Paginator(burn_qs, page_size)
+        try:
+            page_obj = paginator.page(page)
+        except PageNotAnInteger:
+            page_obj = paginator.page(1)
+        except EmptyPage:
+            page_obj = paginator.page(paginator.num_pages)
+
+        # 转换为列表
+        burn_list = list(page_obj)
+        # 返回结果
+        return response.json(
+            0,
+            {
+                'total': paginator.count,  # 总记录数
+                'list': burn_list,  # 当前页数据列表
+                'currentPage': page_obj.number,
+                'totalPages': paginator.num_pages
+            }
+        )
+
+    @classmethod
+    def import_batch_uids(cls, request, response) -> Any:
+        """
+        导入批次UID - 异步优化版(适配新表结构)
+        :param request: HttpRequest对象(包含上传文件)
+        :param response: 响应对象
+        :return: JSON响应
+        """
+        # 1. 验证文件上传
+        if 'file' not in request.FILES:
+            return response.json(444, "请上传Excel文件")
+
+        excel_file = request.FILES['file']
+        if not excel_file.name.endswith(('.xlsx', '.xls')):
+            return response.json(444, "只支持Excel文件(.xlsx/.xls)")
+
+        try:
+            # 2. 生成任务ID和批次号
+            task_id = str(uuid4())
+            # 生成带时间戳和随机字符的批次号
+            timestamp = datetime.now().strftime('%Y%m%d%H%M%S')  # 精确到秒
+            random_chars = ''.join(random.choices(string.ascii_uppercase + string.digits, k=3))  # 3个随机字符
+            batch_number = f"ENG{timestamp}{random_chars}"  # 格式: ENG+时间戳+随机字符
+
+            # 3. 初始化Redis状态
+            redis_key = f"import_task:{task_id}"
+            redis_obj = RedisObject()
+
+            # 保存任务基本信息到Redis (过期时间2小时)
+            task_data = {
+                'status': 'pending',
+                'batch_number': batch_number,
+                'progress': 0,
+                'processed': 0,
+                'total': 0,
+                'start_time': int(time.time()),
+                'success_count': 0
+            }
+            redis_obj.set_data(redis_key, json.dumps(task_data), 7200)
+
+            # 4. 保存文件到项目static/uploadedfiles目录
+            base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+            upload_dir = os.path.join(base_dir, 'static', 'uploaded_files')
+            os.makedirs(upload_dir, exist_ok=True)
+            file_path = os.path.join(upload_dir, f"{task_id}.xls")
+
+            with open(file_path, 'wb+') as destination:
+                for chunk in excel_file.chunks():
+                    destination.write(chunk)
+
+            # 5. 启动后台线程处理
+            thread = threading.Thread(
+                target=cls._process_import_batch_async,
+                args=(task_id, file_path, redis_key, batch_number),
+                daemon=True
+            )
+            thread.start()
+
+            return response.json(0, {
+                "task_id": task_id,
+                "batch_number": batch_number,
+                "message": "导入任务已提交,正在后台处理",
+                "redis_key": redis_key
+            })
+
+        except Exception as e:
+            LOGGER.error(f"创建导入任务失败: {str(e)}")
+            return response.json(500, "创建导入任务失败")
+
+    @classmethod
+    def _process_import_batch_async(cls, task_id, file_path, redis_key, batch_number):
+        """后台线程处理批量导入任务(兼容 xls 和 xlsx,读取第3列UID)"""
+
+        redis_obj = RedisObject()
+        try:
+            task_data = json.loads(redis_obj.get_data(redis_key))
+            task_data['status'] = 'processing'
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+            # 判断文件类型并读取数据
+            file_ext = os.path.splitext(file_path)[-1].lower()
+            uid_rows = []
+
+            if file_ext == '.xls':
+                # 使用 xlrd 读取 .xls 文件
+                workbook = xlrd.open_workbook(file_path)
+                sheet = workbook.sheet_by_index(0)
+                total_rows = sheet.nrows
+                for row_idx in range(0, total_rows):  # 改为从第0行开始
+                    row = sheet.row_values(row_idx)
+                    if len(row) > 2 and row[2]:
+                        uid = str(row[2]).strip()
+                        if uid:
+                            uid_rows.append(uid)
+
+            elif file_ext == '.xlsx':
+                # 使用 openpyxl 读取 .xlsx 文件
+                workbook = load_workbook(file_path, read_only=True)
+                sheet = workbook.active
+                for row in sheet.iter_rows(min_row=1, values_only=True):  # 改为从第1行开始
+                    if len(row) > 2 and row[2]:
+                        uid = str(row[2]).strip()
+                        if uid:
+                            uid_rows.append(uid)
+            task_data['total'] = len(uid_rows)
+            task_data['start_time'] = int(time.time())
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+            # 创建批次记录
+            current_time = int(time.time())
+            with transaction.atomic():
+                batch = BurnBatch(
+                    batch_number=batch_number,
+                    purpose='批次导入',
+                    created_time=current_time,
+                    manager='system',
+                    total_uid=0
+                )
+                batch.save()
+                batch_id = batch.id
+
+            # 分批处理UID
+            batch_size = 500
+            processed = 0
+            success_count = 0
+            uids_batch = []
+
+            for uid in uid_rows:
+                uids_batch.append(uid)
+                processed += 1
+
+                # 每处理1000条更新进度
+                if processed % 1000 == 0:
+                    progress = min(99, int((processed / len(uid_rows)) * 100))
+                    task_data['progress'] = progress
+                    task_data['processed'] = processed
+                    task_data['last_update'] = int(time.time())
+                    redis_obj.set_data(redis_key, json.dumps(task_data))
+
+                if len(uids_batch) >= batch_size:
+                    success = cls._import_uids_batch(
+                        uids_batch,
+                        batch_id,
+                        current_time,
+                        redis_key
+                    )
+                    success_count += success
+                    uids_batch = []
+
+            # 最后一批处理
+            if uids_batch:
+                success = cls._import_uids_batch(
+                    uids_batch,
+                    batch_id,
+                    current_time,
+                    redis_key
+                )
+                success_count += success
+
+            # 更新数据库记录
+            with transaction.atomic():
+                BurnBatch.objects.filter(id=batch_id).update(total_uid=success_count)
+
+            task_data['status'] = 'completed'
+            task_data['progress'] = 100
+            task_data['processed'] = processed
+            task_data['success_count'] = success_count
+            task_data['end_time'] = int(time.time())
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+            LOGGER.info(f"批次导入完成,任务ID: {task_id},处理UID: {processed},成功导入: {success_count}")
+
+            try:
+                os.remove(file_path)
+            except Exception as e:
+                LOGGER.warning(f"删除临时文件失败: {str(e)}")
+
+        except Exception as e:
+            LOGGER.error(f"处理导入任务失败: {str(e)}")
+            task_data = {
+                'status': 'failed',
+                'error': str(e),
+                'end_time': int(time.time())
+            }
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+    @classmethod
+    def _import_uids_batch(cls, uids_batch, batch_id, current_time, redis_key):
+        """批量导入UID记录(适配新表结构)"""
+
+        redis_obj = RedisObject()
+
+        try:
+            with transaction.atomic():
+                # 去重处理
+                unique_uids = list(set(uids_batch))
+
+                # 创建记录
+                records = [
+                    BurnEncryptedICUID(
+                        batch_id=batch_id,
+                        uid=uid,
+                        created_time=current_time,
+                        updated_time=current_time,
+                        status=0  # 未烧录状态
+                    )
+                    for uid in unique_uids
+                ]
+                BurnEncryptedICUID.objects.bulk_create(records)
+
+                # 更新已处理数量和成功数量到Redis
+                task_data = json.loads(redis_obj.get_data(redis_key))
+                task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
+                task_data['success_count'] = task_data.get('success_count', 0) + len(records)
+                redis_obj.set_data(redis_key, json.dumps(task_data))
+
+                # 清除批次统计缓存
+                cache_key = f"batch_stats:{batch_id}"
+                redis_obj.del_data(cache_key)
+
+                return len(records)
+
+        except Exception as e:
+            LOGGER.error(f"批量导入UID失败: {str(e)}")
+            # 更新失败状态但继续处理
+            task_data = json.loads(redis_obj.get_data(redis_key))
+            task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+            return 0
+
+    @classmethod
+    def add_burn_record(cls, request, request_dict, response) -> Any:
+        """
+        新增烧录记录(带UID文件) - Redis字符串优化版
+        :param request_dict:
+        :param request: HttpRequest对象(包含上传文件和表单数据)
+        :param response: 响应对象
+        :return: JSON响应
+        """
+
+        required_fields = ['order_number', 'burn_count', 'purpose']
+        for field in required_fields:
+            if not request_dict.get(field):
+                return response.json(444, f"缺少必填参数: {field}")
+
+        # 2. 验证文件上传
+        if 'uid_file' not in request.FILES:
+            return response.json(444, "请上传包含已烧录UID的Excel文件")
+
+        excel_file = request.FILES['uid_file']
+        if not excel_file.name.endswith(('.xlsx', '.xls')):
+            return response.json(444, "只支持Excel文件(.xlsx/.xls)")
+
+        try:
+            burn_count = int(request_dict['burn_count'])
+            if burn_count <= 0:
+                return response.json(444, "烧录数量必须大于0")
+
+            # 3. 创建任务ID并初始化Redis状态
+            task_id = str(uuid4())
+            redis_key = f"burn_task:{task_id}"
+            redis_obj = RedisObject()
+
+            # 保存任务基本信息到Redis (过期时间2小时)
+            task_data = {
+                'status': 'pending',
+                'order_number': request_dict['order_number'].strip(),
+                'burn_count': burn_count,
+                'purpose': request_dict['purpose'].strip(),
+                'progress': 0,
+                'processed': 0,
+                'total': 0,
+                'start_time': int(time.time())
+            }
+            redis_obj.set_data(redis_key, json.dumps(task_data), 7200)
+
+            # 4. 保存文件到项目static/uploadedfiles目录
+            base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+            upload_dir = os.path.join(base_dir, 'static', 'uploaded_files')
+            os.makedirs(upload_dir, exist_ok=True)
+            file_path = os.path.join(upload_dir, f"{task_id}.xlsx")
+
+            with open(file_path, 'wb+') as destination:
+                for chunk in excel_file.chunks():
+                    destination.write(chunk)
+
+            # 5. 启动后台线程处理
+            thread = threading.Thread(
+                target=cls._process_burn_record_async,
+                args=(task_id, file_path, redis_key),
+                daemon=True
+            )
+            thread.start()
+
+            return response.json(0, {
+                "task_id": task_id,
+                "message": "任务已提交,正在后台处理",
+                "redis_key": redis_key
+            })
+
+        except ValueError:
+            return response.json(444, "烧录数量必须是整数")
+        except Exception as e:
+            LOGGER.error(f"创建烧录任务失败: {str(e)}")
+            return response.json(500, "创建烧录任务失败")
+
+    @classmethod
+    def _process_burn_record_async(cls, task_id, file_path, redis_key):
+        """后台线程处理烧录记录任务"""
+        redis_obj = RedisObject()
+
+        try:
+            # 获取并更新任务状态为处理中
+            task_data = json.loads(redis_obj.get_data(redis_key))
+            task_data['status'] = 'processing'
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+            # 1. 读取Excel文件获取总行数
+            wb = load_workbook(file_path)
+            ws = wb.active
+            total_rows = ws.max_row
+
+            # 更新总行数和开始时间
+            task_data['total'] = total_rows
+            task_data['start_time'] = int(time.time())
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+            # 2. 创建烧录记录
+            with transaction.atomic():
+                burn_record = BurnRecord(
+                    order_number=task_data['order_number'],
+                    burn_count=task_data['burn_count'],
+                    purpose=task_data['purpose'],
+                    updated_time=int(time.time()),
+                    created_time=int(time.time())
+                )
+                burn_record.save()
+                task_data['burn_record_id'] = burn_record.id
+                redis_obj.set_data(redis_key, json.dumps(task_data))
+
+            # 3. 分批处理UID文件(每批300条)
+            batch_size = 300
+            current_time = int(time.time())
+            processed = 0
+            uids_batch = []
+
+            for row in ws.iter_rows(min_row=1, values_only=True):
+                if row[0]:
+                    uid = str(row[0]).strip()
+                    if uid:
+                        uids_batch.append(uid)
+                        processed += 1
+
+                        # 每处理100条更新一次进度
+                        if processed % 100 == 0:
+                            progress = min(99, int((processed / total_rows) * 100))
+                            task_data['progress'] = progress
+                            task_data['processed'] = processed
+                            task_data['last_update'] = int(time.time())
+                            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+                        # 处理批次
+                        if len(uids_batch) >= batch_size:
+                            cls._update_uids_batch(
+                                uids_batch,
+                                burn_record.id,
+                                current_time,
+                                redis_key
+                            )
+                            uids_batch = []
+
+            # 处理最后一批
+            if uids_batch:
+                cls._update_uids_batch(
+                    uids_batch,
+                    burn_record.id,
+                    current_time,
+                    redis_key
+                )
+
+            # 更新最终状态
+            task_data['status'] = 'completed'
+            task_data['progress'] = 100
+            task_data['processed'] = processed
+            task_data['end_time'] = int(time.time())
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+            # 查询受影响的批次ID并清除缓存
+            batch_ids = BurnEncryptedICUID.objects.filter(
+                burn_id=burn_record.id
+            ).values_list('batch_id', flat=True).distinct()
+            
+            for batch_id in batch_ids:
+                cache_key = f"batch_stats:{batch_id}"
+                redis_obj.del_data(cache_key)
+
+            LOGGER.info(f"处理烧录记录完成,任务ID: {task_id}, 处理UID数量: {processed}")
+
+            # 清理临时文件
+            try:
+                os.remove(file_path)
+            except Exception as e:
+                LOGGER.warning(f"删除临时文件失败: {str(e)}")
+
+        except Exception as e:
+            LOGGER.error(f"处理烧录记录失败: {str(e)}")
+            task_data = {
+                'status': 'failed',
+                'error': str(e),
+                'end_time': int(time.time())
+            }
+            redis_obj.set_data(redis_key, json.dumps(task_data))
+
+    @classmethod
+    def _update_uids_batch(cls, uids_batch, burn_id, current_time, redis_key):
+        """批量更新UID记录"""
+        redis_obj = RedisObject()
+
+        try:
+            with transaction.atomic():
+                # 先查询出受影响的批次ID
+                batch_ids = BurnEncryptedICUID.objects.filter(
+                    uid__in=uids_batch
+                ).values_list('batch_id', flat=True).distinct()
+
+                updated = BurnEncryptedICUID.objects.filter(
+                    uid__in=uids_batch
+                ).update(
+                    burn_id=burn_id,
+                    status=1,  # 烧录成功
+                    updated_time=current_time
+                )
+                # 更新已处理数量到Redis
+                task_data = json.loads(redis_obj.get_data(redis_key))
+                task_data['processed'] = task_data.get('processed', 0) + len(uids_batch)
+                redis_obj.set_data(redis_key, json.dumps(task_data))
+
+                # 清除受影响批次的统计缓存
+                for batch_id in batch_ids:
+                    cache_key = f"batch_stats:{batch_id}"
+                    redis_obj.del_data(cache_key)
+
+        except Exception as e:
+            LOGGER.error(f"批量更新UID失败: {str(e)}")
+            raise
+
+    @classmethod
+    def get_import_progress(cls, request_dict: Dict[str, Any], response) -> Any:
+        """
+        查询任务进度(支持导入和烧录任务)
+        :param request_dict: 请求参数字典(必须包含task_id, 可选type)
+        :param response: 响应对象
+        :return: JSON响应
+        type参数说明:
+        - import: 导入任务(默认)
+        - burn: 烧录任务
+        """
+        # 1. 参数验证
+        task_id = request_dict.get('task_id')
+        if not task_id:
+            return response.json(444, "缺少task_id参数")
+
+        task_type = request_dict.get('type', 'import').lower()
+        if task_type not in ['import', 'burn']:
+            return response.json(444, "type参数必须是'import'或'burn'")
+
+        # 2. 构建Redis key
+        redis_key = f"{task_type}_task:{task_id}"
+
+        try:
+            # 3. 从Redis获取任务数据
+            redis_obj = RedisObject()
+            task_data_str = redis_obj.get_data(redis_key)
+
+            if not task_data_str:
+                return response.json(173, "任务不存在或已过期")
+
+            # 4. 解析任务数据
+            if isinstance(task_data_str, bytes):
+                task_data_str = task_data_str.decode('utf-8')
+            task_data = json.loads(task_data_str)
+
+            # 5. 计算耗时(秒)
+            current_time = int(time.time())
+            start_time = task_data.get('start_time', current_time)
+            elapsed = current_time - start_time
+            if task_data.get('end_time'):
+                elapsed = task_data['end_time'] - start_time
+
+            # 6. 构建基础响应数据
+            result = {
+                'status': task_data.get('status', 'unknown'),
+                'progress': task_data.get('progress', 0),
+                'processed': task_data.get('processed', 0),
+                'total': task_data.get('total', 0),
+                'elapsed_seconds': elapsed,
+                'start_time': start_time,
+                'end_time': task_data.get('end_time'),
+                'error': task_data.get('error'),
+                'task_type': task_type
+            }
+
+            # 7. 根据任务类型添加特定字段
+            if task_type == 'import':
+                # 从Redis获取批次号并查询批次信息
+                batch_number = task_data.get('batch_number', '')
+                if batch_number:
+                    try:
+                        batch = BurnBatch.objects.filter(batch_number=batch_number).first()
+                        if batch:
+                            result.update({
+                                'batch_number': batch_number,
+                                'success_count': task_data.get('success_count', 0),
+                                'purpose': batch.purpose,
+                                'manager': batch.manager,
+                                'total_uid': batch.total_uid
+                            })
+                        else:
+                            result.update({
+                                'batch_number': batch_number,
+                                'success_count': task_data.get('success_count', 0)
+                            })
+                    except Exception as e:
+                        LOGGER.error(f"查询批次信息失败: {str(e)}")
+                        result.update({
+                            'batch_number': batch_number,
+                            'success_count': task_data.get('success_count', 0)
+                        })
+            else:  # burn task
+                result.update({
+                    'order_number': task_data.get('order_number', ''),
+                    'purpose': task_data.get('purpose', ''),
+                    'burn_count': task_data.get('burn_count', 0),
+                    'burn_record_id': task_data.get('burn_record_id')
+                })
+
+            return response.json(0, result)
+
+        except json.JSONDecodeError:
+            LOGGER.error(f"任务数据解析失败, redis_key: {redis_key}")
+            return response.json(500, "任务数据格式错误")
+        except Exception as e:
+            LOGGER.error(f"查询任务进度失败: {str(e)}")
+            return response.json(500, "查询进度失败")
+
+    @classmethod
+    def get_import_task_list(cls, request_dict: Dict[str, Any], response) -> Any:
+        """
+        获取所有导入任务列表
+        :param request_dict: 请求参数字典
+        :param response: 响应对象
+        :return: JSON响应
+        """
+
+        try:
+            redis_obj = RedisObject()
+            # 获取所有import_task开头的key
+            keys = redis_obj.get_keys("import_task:*")
+
+            if not keys:
+                return response.json(0, {"tasks": []})
+
+            tasks = []
+
+            # 获取每个任务的基本信息
+            for key in keys:
+                try:
+                    task_data_str = redis_obj.get_data(key)
+                    if task_data_str:
+                        # 确保task_data_str是字符串类型
+                        if isinstance(task_data_str, bytes):
+                            task_data_str = task_data_str.decode('utf-8')
+                        task_data = json.loads(task_data_str)
+                        # 处理key为bytes的情况
+                        key_str = key.decode('utf-8') if isinstance(key, bytes) else key
+                        tasks.append({
+                            'task_id': key_str.split(':')[1],  # 从key中提取task_id
+                            'status': task_data.get('status', 'unknown'),
+                            'progress': task_data.get('progress', 0),
+                            'batch_number': task_data.get('batch_number', ''),
+                            'start_time': task_data.get('start_time'),
+                            'end_time': task_data.get('end_time'),
+                            'processed': task_data.get('processed', 0),
+                            'total': task_data.get('total', 0),
+                            'redis_key': key_str
+                        })
+                except Exception as e:
+                    LOGGER.error(f"解析任务数据失败, key: {key}, error: {str(e)}")
+                    continue
+
+            # 按开始时间倒序排列
+            tasks.sort(key=lambda x: x.get('start_time', 0), reverse=True)
+
+            # 限制返回数量(最近100条)
+            tasks = tasks[:100]
+
+            return response.json(0, {"tasks": tasks})
+
+        except Exception as e:
+            LOGGER.error(f"获取导入任务列表失败: {str(e)}")
+            return response.json(500, "获取任务列表失败")
+
+    @classmethod
+    def batch_page_uids(cls, request_dict: Dict[str, Any], response) -> Any:
+        """
+        根据burn_id分页查询烧录UID记录
+        :param request_dict: 请求参数字典
+        :param response: 响应对象
+        :return: JSON响应
+        """
+        try:
+            page = int(request_dict.get('page', 1))
+            page_size = int(request_dict.get('pageSize', 10))
+            page = max(page, 1)
+            page_size = max(1, min(page_size, 100))
+        except (ValueError, TypeError):
+            return response.json(444, "分页参数错误(必须为整数)")
+
+        # 3. 构建查询条件
+        query = Q()
+        
+        # 添加batch_id筛选条件
+        batch_id = request_dict.get('batch_id')
+        if batch_id:
+            try:
+                batch_id = int(batch_id)
+                query &= Q(batch_id=batch_id)
+            except ValueError:
+                return response.json(444, "batch_id必须是整数")
+
+        # 4. 查询并分页
+        uid_qs = BurnEncryptedICUID.objects.filter(query).order_by('-created_time').values(
+            'id', 'uid', 'batch_id', 'status', 'created_time', 'updated_time'
+        )
+
+        paginator = Paginator(uid_qs, page_size)
+        try:
+            page_obj = paginator.page(page)
+        except PageNotAnInteger:
+            page_obj = paginator.page(1)
+        except EmptyPage:
+            page_obj = paginator.page(paginator.num_pages)
+
+        # 转换为列表
+        uid_list = list(page_obj)
+
+        return response.json(
+            0,
+            {
+                'list': uid_list,
+                'total': paginator.count,
+                'currentPage': page_obj.number,
+                'totalPages': paginator.num_pages,
+                'pageSize': page_size
+            }
+        )

+ 47 - 0
AgentModel/models.py

@@ -217,3 +217,50 @@ class DeviceCustomUID(models.Model):
         verbose_name = '设备关联定制UID'
         verbose_name_plural = verbose_name
         app_label = 'AgentModel'
+
+
+class BurnRecord(models.Model):
+    id = models.AutoField(primary_key=True, verbose_name='自增标记ID')
+    order_number = models.CharField(default='', max_length=32, unique=True, verbose_name='订单号')
+    burn_count = models.IntegerField(verbose_name='烧录数量')
+    purpose = models.TextField(verbose_name='用途')
+    created_time = models.IntegerField(default=0, verbose_name='创建时间')
+    updated_time = models.IntegerField(default=0, verbose_name='更新时间')
+
+    class Meta:
+        db_table = 'burn_record'
+        verbose_name = '烧录记录'
+        verbose_name_plural = verbose_name
+        app_label = 'AgentModel'
+
+
+class BurnBatch(models.Model):
+    id = models.AutoField(primary_key=True, verbose_name='批次ID')
+    batch_number = models.CharField(max_length=50, unique=True, verbose_name='批次号')
+    purpose = models.TextField(verbose_name='用途')  # 批次级别的用途(替代原表的purpose,避免冗余)
+    created_time = models.IntegerField(default=0, verbose_name='批次创建时间')
+    manager = models.CharField(max_length=100, verbose_name='负责人')  # 新增:负责人(姓名或工号)
+    total_uid = models.IntegerField(default=0, verbose_name='总UID数量')  # 新增:该批次包含的UID总数
+    # 可扩展其他批次属性:如来源平台、负责人、总数量(预存统计结果)等
+
+    class Meta:
+        db_table = 'burn_batch'
+        verbose_name = '烧录批次'
+        verbose_name_plural = verbose_name
+        app_label = 'AgentModel'
+
+
+class BurnEncryptedICUID(models.Model):
+    id = models.AutoField(primary_key=True, verbose_name='自增标记ID')
+    batch_id  = models.IntegerField(default=0, verbose_name='UID批次ID')
+    burn_id = models.IntegerField(default=0, verbose_name='烧录记录ID')
+    uid = models.CharField(default='', db_index=True, max_length=32, verbose_name='设备UID')
+    status = models.IntegerField(default=0, verbose_name='0:已下载,1:烧录成功,2:失败')
+    created_time = models.IntegerField(default=0, verbose_name='创建时间')
+    updated_time = models.IntegerField(default=0, verbose_name='更新时间')
+
+    class Meta:
+        db_table = 'burn_encrypted_ic_uid'
+        verbose_name = '烧录加密ICUID'
+        verbose_name_plural = verbose_name
+        app_label = 'AgentModel'

+ 3 - 1
Ansjer/urls.py

@@ -6,7 +6,7 @@ from AdminController import UserManageController, RoleController, MenuController
     ServeManagementController, LogManagementController, DeviceManagementController, VersionManagementController, \
     AiServeController, SurveysManageController, SerialManageController, MessageMangementController, \
     EvaluationActivityController, CampaignController, DataDictManageController, ProductsSchemeManageController, \
-    ProblemEntryManagementController
+    ProblemEntryManagementController, HelpLinkManageController, UIDBurnManageController
 from AdminController.CloudServiceManage import AgentDeviceController, AgentCustomerController, AgentOrderController
 from Controller import FeedBack, EquipmentOTA, EquipmentInfo, AdminManage, AppInfo, \
     Test, MealManage, DeviceManage, EquipmentStatus, SysManage, DeviceLog, LogAccess, \
@@ -450,6 +450,8 @@ urlpatterns = [
     # 产品问题录入
     re_path('problemEntryManage/(?P<operation>.*)',
             ProblemEntryManagementController.ProblemEntryView.as_view()),
+    re_path('helpLinkManage/(?P<operation>.*)', HelpLinkManageController.HelpLinkManageView.as_view()),
+    re_path('uidRurnManage/(?P<operation>.*)', UIDBurnManageController.UIDBurnManageView.as_view()),
     # 后台界面接口 -------------------------------------------------------------------------------------------------------
 
     # 定时任务接口

+ 1 - 1
Controller/SerialNumberController.py

@@ -696,7 +696,7 @@ class SerialNumberView(View):
             elif company_serial.status == 3:  # 已占用
                 sync_success = self.sync_serial_data_and_log(request, company_serial.id, serial_number, now_time)
                 if not sync_success:
-                    return response.json(10044)
+                    return response.json(20001)
                 return response.json(0, self.get_uid_info_by_serial(company_serial.id))
         except Exception as e:
             error_logger = logging.getLogger('django')

+ 3 - 1
Object/uidManageResponseObject.py

@@ -65,8 +65,9 @@ class uidManageResponseObject(object):
             10042: '序列号已被占用',
             10043: '无法解绑,序列号的状态为被占用',
             10044: '请联系客服',
+            20001: '请联系客服',
         }
-
+# uid管理状态码用2开头避免与responseObject类冲突
         data_en = {
             0: 'Success',
             5: 'Please try again one minute later!',
@@ -120,6 +121,7 @@ class uidManageResponseObject(object):
             10042: 'Serial number is already occupied',
             10043: 'Unable to unbind, the status of the serial number is occupied',
             10044: 'Please contact customer service',
+            20001: 'Please contact customer service',
         }
 
         msg = data_cn if self.lang == 'cn' else data_en