diff --git a/Dt_report.py b/Dt_report.py index 784c7b9..a550877 100644 --- a/Dt_report.py +++ b/Dt_report.py @@ -26,7 +26,7 @@ from tools.Get_Json import ( from tools.dataproccess import ( caculate_work_days,get_year_month, merge_info,get_defect_str, - safe_get,get_resource_path + safe_get,get_resource_path,merge_dicts ) @@ -121,8 +121,8 @@ async def generate_dt_report(base_info, baogao_info): # weather = get_weather(shigong_data["weatherCode"]) #天气 不从此接口获取,待定!!! #拉取部件、图片数据 - part_data, picture_data, Yepians = get_part_picture(turbine_id) - + part_data, picture_data1, picture_data2, Yepians = get_part_picture(turbine_id) + picture_data = merge_dicts(picture_data1, picture_data2) print(Yepians) Y1_info = get_yepian_xiangqing(Yepians[0]["partId"]) Y_Code = [yepian["partCode"] for yepian in Yepians] diff --git a/Jf_report.py b/Jf_report.py index 066cb9d..b37d8ea 100644 --- a/Jf_report.py +++ b/Jf_report.py @@ -3,7 +3,7 @@ from tools.document_tools import ( create_document, add_documents,add_table_and_replace, add_table_to_document,add_dynamic_table, process_server_images_table,add_header,add_landscape_section, - merge_documents,add_table,add_defect_info_table + merge_documents,add_table,add_defect_info_table,add_table_title ) # 内容处理工具 @@ -28,9 +28,11 @@ from tools.dataproccess import ( caculate_work_days,get_year_month, merge_info,get_defect_str,get_defect_json, safe_get,get_resource_path,get_defedct_info, + tree_dict_to_table_data,merge_dicts, + defect_list_addtitle ) -from tools.json_to_docx import json_to_docx +from tools.json_to_docx import json_to_docx, list_to_json_with_merges from core.tables import fill_tables @@ -123,8 +125,8 @@ async def generate_jf_report(base_info, baogao_info): # weather = get_weather(shigong_data["weatherCode"]) #天气 不从此接口获取,待定!!! #拉取部件、图片数据 - part_data, picture_data, Yepians = get_part_picture(turbine_id) - + part_data, picture_data1, picture_data2, Yepians = get_part_picture(turbine_id) + picture_data = merge_dicts(picture_data1, picture_data2) print(Yepians) Y1_info = get_yepian_xiangqing(Yepians[0]["partId"]) Y_Code = [yepian["partCode"] for yepian in Yepians] @@ -142,10 +144,17 @@ async def generate_jf_report(base_info, baogao_info): jiancha = [] neirong = [] neirong2 = [] + use_tool_table = [] + table_index = 1 #获取对应枚举字段 if if_waibu: baogao_label.append("外观") image_source_to_find.append(baogao_info['waibu_enum']) + use_tool_table.extend( + [ [str(idx)] + item + for idx, item in enumerate(USE_TOOL_ENUM.OUT.LIST, start=table_index) ] + ) + table_index += len(USE_TOOL_ENUM.OUT.LIST) if baogao_info["shigong_fangan"] == None: print("未传入施工方案,使用已有枚举") renyuan_peizhi.append(SHIGONG_FANGAN_ENUM.WAIBU.RENYUAN_PEIZHI) @@ -160,6 +169,11 @@ async def generate_jf_report(base_info, baogao_info): if if_neibu: baogao_label.append("内部") image_source_to_find.append(baogao_info['neibu_enum']) + use_tool_table.extend( + [ [str(idx)] + item + for idx, item in enumerate(USE_TOOL_ENUM.IN.LIST, start=table_index) ] + ) + table_index += len(USE_TOOL_ENUM.IN.LIST) if baogao_info["shigong_fangan"] == None: renyuan_peizhi.append(SHIGONG_FANGAN_ENUM.NEIBU.RENYUAN_PEIZHI) gongzuo_neirong.append(SHIGONG_FANGAN_ENUM.NEIBU.GONGZUO_NEIRONG) @@ -173,6 +187,11 @@ async def generate_jf_report(base_info, baogao_info): if if_fanglei: baogao_label.append("防雷") image_source_to_find.append(baogao_info['fanglei_enum']) + use_tool_table.extend( + [ [str(idx)] + item + for idx, item in enumerate(USE_TOOL_ENUM.LIGHT.LIST, start=table_index) ] + ) + table_index += len(USE_TOOL_ENUM.LIGHT.LIST) if baogao_info["shigong_fangan"] == None: renyuan_peizhi.append(SHIGONG_FANGAN_ENUM.FANGLEI.YEPIAN.RENYUAN_PEIZHI) gongzuo_neirong.append(SHIGONG_FANGAN_ENUM.FANGLEI.YEPIAN.GONGZUO_NEIRONG) @@ -235,13 +254,14 @@ async def generate_jf_report(base_info, baogao_info): "top_margin" : JF_T_MARGIN, "bottom_margin" : JF_B_MARGIN, })) + #加封面 merge_documents(output_dir, add_list) - + #页眉 add_header(output_dir, TEMPLATE_HEADER.JINFENG_HEADER.ENUM) - + #项目机组信息表格 print(add_documents(output_dir, get_resource_path(MUBAN_DIR + '/jf_table_title.docx'))) total_table_num = 0 - print(add_table(output_dir, get_resource_path(MUBAN_DIR + '/jinfeng_table.docx'))) + print(add_table(output_dir, get_resource_path(MUBAN_DIR + '/jinfeng_table.docx'), REPORT_ENUM=TEMPLATE_HEADER.JINFENG_HEADER.ENUM)) total_picture_num += 1 list_to_replace = { 'jia_company_name' : Jia_company, @@ -271,14 +291,22 @@ async def generate_jf_report(base_info, baogao_info): print(search_and_replace(output_dir, find_text, replace_text)) print(f"静态内容生成完毕,开始生成动态内容") - defect_info = get_defedct_info(defect_records_with_pic) + #缺陷信息表格1 + defect_info, defect_part_type_list = get_defedct_info(defect_records_with_pic) - total_picture_num = add_defect_info_table(output_dir, defect_info, get_resource_path(MUBAN_DIR + '/quexian_liebiao.docx'), total_table_num) + total_picture_num = add_defect_info_table(output_dir, defect_info, get_resource_path(MUBAN_DIR + '/quexian_liebiao.docx'), total_table_num,TEMPLATE_HEADER.JINFENG_HEADER.ENUM) - # defect_json = get_defect_json(defect_info, Y_Code, jizu_bianhao) + #缺陷信息表格2 - # print(f"{defect_json} {type(defect_json)}") - # doc = json_to_docx(defect_json) - - # doc.save(output_dir) \ No newline at end of file + table_list = tree_dict_to_table_data(defect_part_type_list) + table_list = defect_list_addtitle(table_list, jizu_bianhao) + print(table_list) + add_table_title(output_dir, DEFECT_TABLE_TITLE) + table_json = list_to_json_with_merges(table_list, style_config=STYLE_CONFIG, merge_columns=2) + json_to_docx(table_json, output_dir).save(output_dir) + + #使用器具记录表 + add_table_title(output_dir, USE_TOOL_TABLE_TITLE) + print(use_tool_table) + json_to_docx(list_to_json_with_merges(use_tool_table, style_config=STYLE_CONFIG, detect_merges=False),output_dir).save(output_dir) \ No newline at end of file diff --git a/__pycache__/Dt_report.cpython-310.pyc b/__pycache__/Dt_report.cpython-310.pyc index b29a7cc..32dc3e9 100644 Binary files a/__pycache__/Dt_report.cpython-310.pyc and b/__pycache__/Dt_report.cpython-310.pyc differ diff --git a/__pycache__/Jf_report.cpython-310.pyc b/__pycache__/Jf_report.cpython-310.pyc index e0184b3..86bf436 100644 Binary files a/__pycache__/Jf_report.cpython-310.pyc and b/__pycache__/Jf_report.cpython-310.pyc differ diff --git a/core/__pycache__/tables.cpython-310.pyc b/core/__pycache__/tables.cpython-310.pyc index 6a3dfa4..f489995 100644 Binary files a/core/__pycache__/tables.cpython-310.pyc and b/core/__pycache__/tables.cpython-310.pyc differ diff --git a/core/tables.py b/core/tables.py index bb19736..d0fbf6e 100644 --- a/core/tables.py +++ b/core/tables.py @@ -103,7 +103,7 @@ def apply_table_style(table, has_header_row=False, border_style=None, shading=No except Exception: return False -def copy_table(source_table, target_doc, ifadjustheight=True, height = 1): +def copy_table(source_table, target_doc, ifadjustheight=True, height = 1, if_merge = True, REPORT_ENUM = 'DT'): """ Copy a table from one document to another. @@ -143,7 +143,9 @@ def copy_table(source_table, target_doc, ifadjustheight=True, height = 1): new_table.cell(i,j).paragraphs[0].paragraph_format.alignment = WD_TABLE_ALIGNMENT.CENTER new_table.cell(i,j).vertical_alignment = WD_ALIGN_VERTICAL.CENTER #new_table.cell(i,j).width = cell.width - + if REPORT_ENUM == 'JF': + new_table.cell(i,j).paragraphs[0].runs[0]._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体') #设置中文字体 + new_table.cell(i,j).paragraphs[0].runs[0].font.size = Pt(12) # 字体大小 """ 待添加:如何让表格自适应大小(autofit目前不知为何没有作用) """ @@ -152,10 +154,11 @@ def copy_table(source_table, target_doc, ifadjustheight=True, height = 1): if not ifadjustheight: new_table.auto_fit = True - try: - new_table = merge_tables(new_table) - except Exception as e: - print(f"合并表格失败:{e}") + if if_merge: + try: + new_table = merge_tables(new_table) + except Exception as e: + print(f"合并表格失败:{e}") return target_doc diff --git a/output/test.docx b/output/test.docx deleted file mode 100644 index 338a7fe..0000000 Binary files a/output/test.docx and /dev/null differ diff --git a/output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月29日版36.docx b/output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月29日版36.docx deleted file mode 100644 index a6ad2b3..0000000 Binary files a/output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月29日版36.docx and /dev/null differ diff --git a/output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月29日版35.docx b/output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月30日版.docx similarity index 94% rename from output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月29日版35.docx rename to output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月30日版.docx index bd70562..7ee206c 100644 Binary files a/output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月29日版35.docx and b/output/三峡能源阿城万兴风电场防雷通道检测项目项目叶片外观、内部、防雷检查报告一期012号2025年07月30日版.docx differ diff --git a/tools/1.md b/tools/1.md new file mode 100644 index 0000000..fcc0d2f --- /dev/null +++ b/tools/1.md @@ -0,0 +1,86 @@ +```python +def tree_dict_to_table_data(tree: Dict) -> List[List[str]]: + """ + 将树状字典转换为二维表格数据 + + 参数: + tree: 树状字典,结构为dict[dict[...[list]]] + + 返回: + 二维列表表示的表格数据 + """ + if not tree: + return [] + + # 首先确定树的深度 + depth = 0 + current = tree + while isinstance(current, dict): + depth += 1 + # 获取第一个子节点来继续探测深度 + if current: + current = next(iter(current.values())) + else: + break + + # 收集所有路径和叶子节点 + paths = [] + + def traverse(node, current_path): + if isinstance(node, dict): + for key, value in node.items(): + traverse(value, current_path + [str(key)]) + elif isinstance(node, list): + paths.append((current_path, node)) + else: + paths.append((current_path, [str(node)])) + + traverse(tree, []) + + # 确定最大深度(处理可能的不平衡树) + max_depth = max(len(path) for path, _ in paths) if paths else 0 + max_leaf_length = max(len(leaf) for _, leaf in paths) if paths else 0 + + # 填充路径到最大深度 + filled_paths = [] + for path, leaf in paths: + # 填充路径 + filled_path = path.copy() + while len(filled_path) < max_depth: + filled_path.append("") # 用空字符串填充不足的深度 + + # 填充叶子节点 + filled_leaf = leaf.copy() + while len(filled_leaf) < max_leaf_length: + filled_leaf.append("") # 用空字符串填充不足的叶子长度 + + filled_paths.append((filled_path, filled_leaf)) + + # 构建表格数据 + table_data = [] + + # 添加路径部分的行 + for i in range(max_depth): + row = [] + for path, leaf in filled_paths: + row.append(path[i]) + table_data.append(row) + + # 添加叶子部分的行 + for i in range(max_leaf_length): + row = [] + for path, leaf in filled_paths: + row.append(leaf[i] if i < len(leaf) else "") + table_data.append(row) + + # 转置表格,使每个路径+叶子成为一列 + if table_data: + # 获取最大列数 + max_cols = max(len(row) for row in table_data) if table_data else 0 + # 统一每行的列数 + table_data = [row + [""] * (max_cols - len(row)) for row in table_data] + # 转置 + table_data = list(map(list, zip(*table_data))) + + return table_data +``` \ No newline at end of file diff --git a/tools/Get_Json.py b/tools/Get_Json.py index 00957b9..e303d58 100644 --- a/tools/Get_Json.py +++ b/tools/Get_Json.py @@ -1,7 +1,6 @@ import requests import json from tools.API import * -from tools.dataproccess import merge_dicts def get_data(url : str, data_type : str = "data", params : dict = None ) -> dict: headers = { @@ -97,9 +96,7 @@ def get_part_picture(turbineId : str) -> tuple[list[dict], dict[list], list[str] except Exception as e: print(f"获取叶片信息失败,异常:{e}") return None, None, None - #叶片和其它部件图片入队整合 - result = merge_dicts(yepian_part_result, part_result) - return part_data, result, Yepians + return part_data, yepian_part_result, part_result, Yepians def get_yepian_xiangqing(yepian_id : str) -> dict: url = GETYEPIANINFO.format(partId=yepian_id) diff --git a/tools/__pycache__/API.cpython-310.pyc b/tools/__pycache__/API.cpython-310.pyc index baf6e7a..59d5c81 100644 Binary files a/tools/__pycache__/API.cpython-310.pyc and b/tools/__pycache__/API.cpython-310.pyc differ diff --git a/tools/__pycache__/Get_Json.cpython-310.pyc b/tools/__pycache__/Get_Json.cpython-310.pyc index 4acc4fa..918b5b3 100644 Binary files a/tools/__pycache__/Get_Json.cpython-310.pyc and b/tools/__pycache__/Get_Json.cpython-310.pyc differ diff --git a/tools/__pycache__/dataproccess.cpython-310.pyc b/tools/__pycache__/dataproccess.cpython-310.pyc index a9f5afa..beafb6b 100644 Binary files a/tools/__pycache__/dataproccess.cpython-310.pyc and b/tools/__pycache__/dataproccess.cpython-310.pyc differ diff --git a/tools/__pycache__/defines.cpython-310.pyc b/tools/__pycache__/defines.cpython-310.pyc index 40c2c27..0298590 100644 Binary files a/tools/__pycache__/defines.cpython-310.pyc and b/tools/__pycache__/defines.cpython-310.pyc differ diff --git a/tools/__pycache__/document_tools.cpython-310.pyc b/tools/__pycache__/document_tools.cpython-310.pyc index 8eae654..8b950c5 100644 Binary files a/tools/__pycache__/document_tools.cpython-310.pyc and b/tools/__pycache__/document_tools.cpython-310.pyc differ diff --git a/tools/__pycache__/json_to_docx.cpython-310.pyc b/tools/__pycache__/json_to_docx.cpython-310.pyc index 288ea61..939842b 100644 Binary files a/tools/__pycache__/json_to_docx.cpython-310.pyc and b/tools/__pycache__/json_to_docx.cpython-310.pyc differ diff --git a/tools/dataproccess.py b/tools/dataproccess.py index 8e964b6..5db02b0 100644 --- a/tools/dataproccess.py +++ b/tools/dataproccess.py @@ -1,4 +1,6 @@ import sys, os +from tools.Get_Json import get_defect_detail +from tools.defines import * def caculate_work_days(start_date : str, end_date : str) -> str: """根据起止日期计算工期 @@ -184,19 +186,29 @@ def get_defedct_info(defect_dict): ] ... } + } } + result2 (dict): + { + '叶片1': { #按叶片分记录 + '表面裂纹': [ #按类型分记录 + """ - result = {} + result = {} #缺陷等级数量列表 + result2 = {} #缺陷树状列表 for part_name, part_info in defect_dict.items(): if part_name not in result: result[part_name] = {} - + result2[part_name] = {} for defect_type, defect_list in part_info.items(): for defect_dict in defect_list: record = defect_dict['record'] type_label = record['defectTypeLabel'] level = record['defectLevel'] + if type_label not in result[part_name]: + result2[part_name][type_label] = [] + if type_label not in result[part_name]: result[part_name][type_label] = {} @@ -204,8 +216,20 @@ def get_defedct_info(defect_dict): result[part_name][type_label][level] = [] result[part_name][type_label][level].append(defect_dict) - - return result + p = record['defectPosition'] + t = record['defectTypeLabel'] + l = record['defectLevelLabel'] + d = record['description'] + did = record['defectId'] + iid = record['imageId'] + detail = get_defect_detail(did) + a = detail.get('axial', '') + c = detail.get('chordwise', '') + area = f"轴向:{a}mm,弦向:{c}mm" + + record_info = [f"{p},{t},{l},{d}", area, iid] + result2[part_name][type_label].append(record_info) + return result, result2 def get_defect_json(defect_info: dict, Y_code: list[str], turbine_code: str) -> dict: """将缺陷信息转换为表格形式的JSON格式 @@ -352,4 +376,47 @@ def create_cell(row, col, text, border, is_merged=False, is_primary=False): } }] }] - } \ No newline at end of file + } +from typing import Dict, List, Any + +def tree_dict_to_table_data(tree: Dict) -> List[List[Any]]: + """ + 将树状字典转换为二维表格数据(保留原始类型) + + 参数: + tree: 树状字典,结构为dict[dict[...[list]]] + + 返回: + 二维列表,每个子列表代表一条路径(元素类型与输入一致),确保最内层列表被展开 + """ + result = [] + + def traverse(node: Dict, path: List[Any]) -> None: + for key, value in node.items(): + current_path = path + [key] # 保留键的原始类型 + if isinstance(value, dict): + traverse(value, current_path) + elif isinstance(value, list): + for item in value: + if isinstance(item, list): + # 如果列表项本身是列表,则展开它 + result.append(current_path + item) + else: + # 否则作为单个元素添加 + result.append(current_path + [item]) + else: + result.append(current_path + [value]) # 保留值的原始类型 + + traverse(tree, []) + return result + +def defect_list_addtitle(list_data: List[List[str]], turbine_code: str): + """ + 给缺陷列表添加标题行 + """ + list_data.insert(0, DEFECT_TABLE) + for i,l1 in enumerate(list_data): + if i == 0: + continue + l1.insert(0, turbine_code) + return list_data \ No newline at end of file diff --git a/tools/defines.py b/tools/defines.py index e04190d..ed9410e 100644 --- a/tools/defines.py +++ b/tools/defines.py @@ -92,7 +92,7 @@ STYLE_CONFIG = { "alignment": "center", "font": { "name": "宋体", - "size": 9, + "size": 12, "bold": False, }, "border": { @@ -104,4 +104,47 @@ STYLE_CONFIG = { "shading": { "color": "FFFFFF" } -} \ No newline at end of file +} +TITLE_STYLE_CONFIG = { + "alignment": "center", + "font": { + "name": "宋体", + "size": 12, + "bold": False, + }, + "border": { + "top": {"style": "single", "size": "4", "color": "000000"}, + "bottom": {"style": "single", "size": "4", "color": "000000"}, + "left": {"style": "single", "size": "4", "color": "000000"}, + "right": {"style": "single", "size": "4", "color": "000000"} + }, + "shading": { + "color": "FFFFFF" + } +} +DEFECT_TABLE_TITLE = '叶片故障信息表' +DEFECT_TABLE = ['机组号', '叶片编号', '损伤名称', '损坏描述', '面积/S', '备注'] + +USE_TOOL_TABLE_TITLE = '使用工器具记录' +USE_TOOL_TABLE = ['序号', '工器具名称', '型号规格', '数量', '用途', '备注'] +class USE_TOOL_ENUM: + class OUT: + LIST = [ + ["无人机", "大疆 M30", "1", "叶片外观检查观测"] + ] + + class LIGHT: + LIST = [ + ["智能无人悬吊系统", "DT01", "1", "叶片导通测试"], + ["检测导线", "/", "3", "叶片导通测试"], + ["电阻仪", "S1950", "1", "叶片导通测试"] + ] + + class IN: + LIST = [ + ["头灯", "/", "2", "轮毂叶片内照明"], + ["含氧量检测仪", "/", "1", "叶片内部环境检测"], + ["活动扳手", "17-19", "2", "拆卸·禁锢盖板螺栓"], + ["手机", "/", "2", "拍照记录"], + ["叶片内部爬壁机器人", "DT02", "1", "拍照记录"] + ] \ No newline at end of file diff --git a/tools/document_tools.py b/tools/document_tools.py index 119835f..5ec53b3 100644 --- a/tools/document_tools.py +++ b/tools/document_tools.py @@ -6,16 +6,21 @@ import json, re from typing import Dict, List, Optional, Any from docx import Document +from core.tables import copy_table from utils.file_utils import check_file_writeable, ensure_docx_extension, create_document_copy -from utils.document_utils import get_document_properties, extract_document_text, get_document_structure +from utils.document_utils import get_document_properties, extract_document_text, get_document_structure, clear_header from core.styles import ensure_heading_style, ensure_table_style from docx.oxml.shared import qn -from docx.oxml import OxmlElement -from tools.content_tools import search_and_replace,add_picture_to_table +from docx.oxml import OxmlElement, parse_xml +from tools.content_tools import search_and_replace,add_picture_to_table,add_picture from tools.Get_Json import get_full_picture_url -from tools.get_pictures import resize_and_reduce_quality +from tools.get_pictures import resize_and_reduce_quality, get_template_pic +from tools.defines import * +from docx.enum.section import WD_SECTION +from tools.json_to_docx import list_to_json_with_merges, json_to_docx -async def create_document(filename: str, title: Optional[str] = None, author: Optional[str] = None) -> str: + +async def create_document(filename: str, title: Optional[str] = None, author: Optional[str] = None, section_args: Optional[Dict[str, Any]] = None) -> str: """创建一个包含可选元数据的新Word文档。 参数: @@ -45,11 +50,20 @@ async def create_document(filename: str, title: Optional[str] = None, author: Op # 更改纸张大小为A4 from docx.shared import Mm, Inches sections = doc.sections - for section in sections: - section.page_height = Mm(297) - section.page_width = Mm(210) - section.left_margin = Inches(0.94) - section.right_margin = Inches(0.94) + if section_args: + for section in sections: + section.page_height = Mm(section_args.get('page_height', 297)) + section.page_width = Mm(section_args.get('page_width', 210)) + section.left_margin = Mm(section_args.get('left_margin', 20)) + section.right_margin = Mm(section_args.get('right_margin', 20)) + section.top_margin = Mm(section_args.get('top_margin', 10)) + section.bottom_margin = Mm(section_args.get('bottom_margin', 10)) + else: + for section in sections: + section.page_height = Mm(297) + section.page_width = Mm(210) + section.left_margin = Inches(0.94) + section.right_margin = Inches(0.94) # Save the document doc.save(filename) @@ -152,13 +166,13 @@ def add_documents(target_filename: str, source_filename: str) -> str: target_doc = Document(target_filename) source_filename = ensure_docx_extension(source_filename) source_doc = Document(source_filename) + for source_paragraph in source_doc.paragraphs: new_paragraph = target_doc.add_paragraph(source_paragraph.text) new_paragraph.style = target_doc.styles['Normal'] # Default style #获取合并等样式2025427 new_paragraph.alignment = source_paragraph.alignment - print(f"Source paragraph alignment: {source_paragraph.alignment}") # Try to match the style if possible try: @@ -274,9 +288,8 @@ def set_document_para(target_doc: Document) -> Document: p.getparent().remove(p) return target_doc - -async def add_table_to_document(target_filename: str, source_filename: str, rows: int, cols: int, table_num: int, data: Optional[List[List[str]]] = None, ifadjustheight: Optional[bool] = True, height: Optional[float] = 1, key_words: re.Pattern[str] = None, ALIGMENT: Optional[str] = 'CENTER') -> str: +def add_table_to_document(target_filename: str, source_filename: str, rows: int, cols: int, table_num: int, data: Optional[List[List[str]]] = None, ifadjustheight: Optional[bool] = True, height: Optional[float] = 1, key_words: re.Pattern[str] = None, ALIGMENT: Optional[str] = 'CENTER') -> str: """复制源文件中的文字与表格(先文字后表格格式)到目标文档 Args: target_filename: 目标文档路径 @@ -291,13 +304,14 @@ async def add_table_to_document(target_filename: str, source_filename: str, rows target_filename = ensure_docx_extension(target_filename) source_filename = ensure_docx_extension(source_filename) source_doc = Document(source_filename) - target_doc = Document(target_filename) + target_doc.add_paragraph() try: # Copy all paragraphs for paragraph in source_doc.paragraphs: # Create a new paragraph with the same text and style new_paragraph = target_doc.add_paragraph(paragraph.text) + new_paragraph.style = target_doc.styles['Normal'] # Default style #获取合并等样式2025427 new_paragraph.alignment = paragraph.alignment @@ -335,21 +349,11 @@ async def add_table_to_document(target_filename: str, source_filename: str, rows # Font size if specified if run.font.size: new_run.font.size = run.font.size - - # 复制分页符(处理w:br标签) - for element in run._element: - if element.tag.endswith('br'): - br_type = element.get(qn('type'), '') - if br_type == 'page': - new_br = OxmlElement('w:br') - new_br.set(qn('type'), 'page') - new_run._element.append(new_br) except Exception as e: print(f"添加表格前文章失败:{str(e)}") try:# Copy all tables - from core.tables import copy_table copy_table(source_doc.tables[0], target_doc, ifadjustheight, height) except Exception as e: print(f"添加表格失败:{str(e)}") @@ -357,85 +361,108 @@ async def add_table_to_document(target_filename: str, source_filename: str, rows target_doc = set_document_para(target_doc) target_doc.save(target_filename) target_doc = Document(target_filename) - try: - target_doc = write_table(target_filename, rows, cols, table_num, data, ifadjustheight, height, key_words, ALIGMENT) - except Exception as e: - print(f"{target_filename}写入{data}失败:{str(e)}") + if data: + try: + target_doc = write_table(target_filename, rows, cols, table_num, data, ifadjustheight, height, key_words, ALIGMENT) + except Exception as e: + print(f"{target_filename}写入{data}失败:{str(e)}") target_doc.save(target_filename) return target_doc,f"{target_filename}添加表格{source_doc}成功" +from docx.document import Document as Document_ +from docx.table import Table +def add_table(target_filename: str, source_filename : str | Document_, if_merge = True, REPORT_ENUM = 'DT'): + if isinstance(source_filename, str): + output_doc = copy_table(Document(source_filename).tables[0], Document(target_filename), True, if_merge=if_merge, REPORT_ENUM = REPORT_ENUM) + output_doc.save(target_filename) + else: + try: + output_doc = copy_table(source_filename.tables[0], Document(target_filename), True, if_merge=if_merge, REPORT_ENUM = REPORT_ENUM) + output_doc.save(target_filename) + except Exception as e: + print(f"添加表格失败:{str(e)}") + return f"{target_filename}添加表格{source_filename}成功" -async def add_table_and_replace(target_filename: str, source_filename: str, ifadjustheight: Optional[bool] = True, list_to_replace: dict = {}, height: Optional[float] = 1): +def add_table_and_replace( + target_filename: str, + source_filename: str, + ifadjustheight: Optional[bool] = True, + list_to_replace: dict = {}, + height: Optional[float] = 1, + no_para : Optional[bool] = False, + REPORT_ENUM = 'DT'): """复制源文件中的文字与表格(先文字后表格格式)到目标文档 Args: target_filename: 目标文档路径 source_doc: 源文档路径 ifadjustheight: bool,为真则表格行高自动调整 list_to_replace: dict, 待替换内容和替换内容 + height: float, 表格行高 + no_para: bool, 无段落 """ target_filename = ensure_docx_extension(target_filename) source_filename = ensure_docx_extension(source_filename) source_doc = Document(source_filename) - target_doc = Document(target_filename) - try: - # Copy all paragraphs - for paragraph in source_doc.paragraphs: - # Create a new paragraph with the same text and style - new_paragraph = target_doc.add_paragraph(paragraph.text) - new_paragraph.style = target_doc.styles['Normal'] # Default style - #获取合并等样式2025427 - new_paragraph.alignment = paragraph.alignment + if not no_para: + try: + # Copy all paragraphs + for paragraph in source_doc.paragraphs: + # Create a new paragraph with the same text and style + new_paragraph = target_doc.add_paragraph(paragraph.text) + new_paragraph.style = target_doc.styles['Normal'] # Default style + #获取合并等样式2025427 + new_paragraph.alignment = paragraph.alignment - # 复制段落分页属性 - new_paragraph.paragraph_format.page_break_before = paragraph.paragraph_format.page_break_before - # Try to match the style if possible - try: - if paragraph.style and paragraph.style.name in target_doc.styles: - new_paragraph.style = target_doc.styles[paragraph.style.name] - except: - pass - # Copy run formatting - for i, run in enumerate(paragraph.runs): - if i < len(new_paragraph.runs): - new_run = new_paragraph.runs[i] - # Copy basic formatting - new_run.bold = run.bold - new_run.italic = run.italic - new_run.underline = run.underline - #添加同时合并字体2025427 - new_run.font.name = run.font.name - rPr = new_run.element.get_or_add_rPr() - rFonts = rPr.get_or_add_rFonts() - # 检查 run.font.name 是否为 None - if run.font.name is None: - # 设置默认的中文字体名称 - run.font.name = '宋体(中文正文)' # 或者使用其他你喜欢的中文字体 - rFonts.set(qn('w:eastAsia'), run.font.name) - new_run.font.color.rgb = run.font.color.rgb + # 复制段落分页属性 + new_paragraph.paragraph_format.page_break_before = paragraph.paragraph_format.page_break_before + # Try to match the style if possible + try: + if paragraph.style and paragraph.style.name in target_doc.styles: + new_paragraph.style = target_doc.styles[paragraph.style.name] + except: + pass + # Copy run formatting + for i, run in enumerate(paragraph.runs): + if i < len(new_paragraph.runs): + new_run = new_paragraph.runs[i] + # Copy basic formatting + new_run.bold = run.bold + new_run.italic = run.italic + new_run.underline = run.underline + #添加同时合并字体2025427 + new_run.font.name = run.font.name + rPr = new_run.element.get_or_add_rPr() + rFonts = rPr.get_or_add_rFonts() + # 检查 run.font.name 是否为 None + if run.font.name is None: + # 设置默认的中文字体名称 + run.font.name = '宋体(中文正文)' # 或者使用其他你喜欢的中文字体 + rFonts.set(qn('w:eastAsia'), run.font.name) + new_run.font.color.rgb = run.font.color.rgb - # Font size if specified - if run.font.size: - new_run.font.size = run.font.size - - # 复制分页符(处理w:br标签) - for element in run._element: - if element.tag.endswith('br'): - br_type = element.get(qn('type'), '') - if br_type == 'page': - new_br = OxmlElement('w:br') - new_br.set(qn('type'), 'page') - new_run._element.append(new_br) - except Exception as e: - print(f"添加表格前文章失败:{str(e)}") + # Font size if specified + if run.font.size: + new_run.font.size = run.font.size + + # 复制分页符(处理w:br标签) + for element in run._element: + if element.tag.endswith('br'): + br_type = element.get(qn('type'), '') + if br_type == 'page': + new_br = OxmlElement('w:br') + new_br.set(qn('type'), 'page') + new_run._element.append(new_br) + except Exception as e: + print(f"添加表格前文章失败:{str(e)}") try:# Copy all tables from core.tables import copy_table - copy_table(source_doc.tables[0], target_doc, ifadjustheight, height) + copy_table(source_doc.tables[0], target_doc, ifadjustheight, height, REPORT_ENUM=REPORT_ENUM) target_doc.save(target_filename) except Exception as e: print(f"添加表格失败:{str(e)}") for find_text, replace_text in list_to_replace.items(): - print(await search_and_replace(target_filename, find_text, replace_text)) + print(search_and_replace(target_filename, find_text, replace_text)) async def merge_documents(target_filename: str, source_filenames: List[str], add_page_breaks: bool = True) -> str: """合并文档(文本) 表格会添加到最后 @@ -726,4 +753,182 @@ async def process_server_images_table(data_list, image_source_list, output_dir, picture_index += 1 i += 1 print(message) - return i # 返回最后使用的表格序号 \ No newline at end of file + return i # 返回最后使用的表格序号 + +def add_header(target_dir : str, report_enum : str, if_clear_header : Optional[bool] = True): + """添加页眉,添加封面后调用此函数,会分离页面和后续页面的节。 + + Args: + target_dir (str): 目标目录 + start_section (int): 开始节 + end_section (int): 结束节 + report_enum (str): 报告类型(DT或JF) + """ + document = Document(target_dir) # 打开文档 + + if if_clear_header: + for section in document.sections: # 遍历所有节的页眉 + clear_header(section) # 清除页眉的段落 + + print(f"文档节数:{len(document.sections)},开始往当前节后添加页眉") + document.sections[0].header.is_linked_to_previous = False # 取消页眉与上一页关联 + + document.add_section(WD_SECTION.NEW_PAGE) + header = document.sections[1].header + + header.is_linked_to_previous = False # 取消页眉与上一页关联 + paragraph = header.paragraphs[0] # 获取页眉的第一个段落 + run = paragraph.add_run() + + if report_enum == 'JF': + print("添加金风模板的页眉") + pic = run.add_picture(get_template_pic(TEMPLATE_HEADER.JINFENG_HEADER.PIC_DIR)) + run = paragraph.add_run(TEMPLATE_HEADER.JINFENG_HEADER.PARA) + run.font.name = TEMPLATE_HEADER.JINFENG_HEADER.FONT + run.font.size = TEMPLATE_HEADER.JINFENG_HEADER.PT + document.save(target_dir) # 保存文档 + + elif report_enum == 'DT': + print("添加迪特模板的页眉") + pic = run.add_picture(get_template_pic(TEMPLATE_HEADER.DT_HEADER.PIC_DIR)) + run = paragraph.add_run(TEMPLATE_HEADER.DT_HEADER.PARA) + run.font.name = TEMPLATE_HEADER.DT_HEADER.FONT + run.font.size = TEMPLATE_HEADER.DT_HEADER.PT + document.save(target_dir) # 保存文档 + + else: + print("未知模板,不添加页眉") + # 定义边框的 XML 字符串 + border_xml = """ + + + + """ + + # 解析 XML 并添加到段落属性 + pPr = paragraph._element.get_or_add_pPr() + pBdr = parse_xml(border_xml) + pPr.append(pBdr) + + print(f"文档节数:{len(document.sections)}") + +from docx.enum.section import WD_ORIENT +from docx.enum.text import WD_BREAK + +def add_landscape_section(target_dir : str): + # 添加横向节 + doc = Document(target_dir) + section = doc.add_section() + section.orientation = WD_ORIENT.LANDSCAPE + section.page_width, section.page_height = section.page_height, section.page_width + + +def merge_documents(target_dir : str, source_dirs : List[str]): + """合并多个文档、图片 + + Args: + target_dir (str): 目标目录 + source_dirs (List[str]): 源目录列表 + """ + # 打开目标文档 + document = Document(target_dir) + + for dir in source_dirs: + if dir.endswith('.docx'): + print(add_documents(target_dir, dir)) + if dir.endswith('.jpg') or dir.endswith('.png'): + print(add_picture(target_dir, dir, is_center=True)) + +def add_defect_info_table(output_dir, defect_info, MUBAN_DIR, total_table_num, REPORT_ENUM): + """将defectinfo写入word文档 + + Args: + output_dir: 输出目录 + defect_info: 缺陷信息 + { + '叶片1': { #按叶片分记录 + '表面裂纹': { 'SLIGHT': #按类型分等级,等级从'defectLevel'获取 + [ #按等级分记录 + { + record:{...} + }, #一张图片一个记录 + ... + ] + ... + } + } + total_table_num: 总表格数量 + Returns: + table_num: 表格数量 + 功能:表头分别为:序号,缺陷类型,等级,损伤数量,处理建议,备注(是否完成处理) (默认否)。 + 总体就按等级分类,但损伤数量需要说明为:n支m处。n为有这个缺陷和等级的叶片数,m为有这个缺陷和等级的所有叶片加起来的缺陷数。 + 处理建议从record[0]的值中获取 + """ + table_data = [] # 存储所有表格数据的列表,每个元素是一个字典 # 添加新记录(使用字典格式) + table_data.append({ + "xuhao" : "序号", # 序号 + "defecttype" : "损伤名称", # 缺陷类型 + "level" : "损伤等级", # 等级 + "defectnuminfo" : "损伤数量", # 损伤数量 + "suggestion" : "处理建议", # 处理建议 + "remark" : "备注(是否处理完成)" # 备注(是否完成处理) + }) + # 遍历缺陷信息,整理数据 + for blade_name, defects in defect_info.items(): + for defect_type, levels in defects.items(): + for level, records in levels.items(): + if records: # 如果有记录 + # 计算损伤数量:n支m处 + blade_count = 1 # 当前叶片就是1支 + defect_count = len(records) # 当前叶片的缺陷数量 + + # 查找是否已有同类型同等级的数据 + found = False + for item in table_data: + if item["defecttype"] == defect_type and item["level"] == level: + # 更新已有记录 + current_n = item["defectnuminfo"] + parts = current_n.split("支") + existing_blades = int(parts[0]) + existing_defects = int(parts[1].split("处")[0]) + item["defectnuminfo"] = f"{existing_blades + blade_count}支{existing_defects + defect_count}处" + found = True + break + + if not found: + # 获取处理建议(假设第一条记录中有处理建议) + suggestion = records[0].get('record', {}).get('处理建议', '') + + # 添加新记录(使用字典格式) + table_data.append({ + "xuhao" : str(len(table_data)), # 序号 + "defecttype" : defect_type, # 缺陷类型 + "level" : level, # 等级 + "defectnuminfo" : f"{blade_count}支{defect_count}处", # 损伤数量 + "suggestion" : suggestion, # 处理建议 + "remark" : "否" # 备注(是否完成处理) + }) + # 添加空行 + for _ in range(2): + table_data.append({ + "xuhao" : "", # 序号 + "defecttype" : "", # 缺陷类型 + "level" : "", # 等级 + "defectnuminfo" : "", # 损伤数量 + "suggestion" : "", # 处理建议 + "remark" : "" # 备注(是否完成处理) + }) + for row in table_data: + add_table_and_replace(output_dir, MUBAN_DIR, list_to_replace=row, no_para = True, ifadjustheight=True, REPORT_ENUM=REPORT_ENUM) + total_table_num += 1 + return total_table_num + +from docx.enum.text import WD_ALIGN_PARAGRAPH +def add_table_title(output_dir, TITLE): + doc = Document(output_dir) + table = doc.add_table(rows=1, cols=6, style='Table Grid') + table.cell(0,0).merge(table.cell(0,5)) + para = table.cell(0,0).paragraphs[0] + para.text = TITLE + para.alignment = WD_ALIGN_PARAGRAPH.CENTER + doc.save(output_dir) \ No newline at end of file diff --git a/tools/json_to_docx.py b/tools/json_to_docx.py index 0348d51..a5b96b1 100644 --- a/tools/json_to_docx.py +++ b/tools/json_to_docx.py @@ -4,9 +4,12 @@ from docx.enum.text import WD_ALIGN_PARAGRAPH from docx.oxml.shared import qn, OxmlElement import json -def json_to_docx(json_data): +def json_to_docx(json_data, output_dir = None): print(f"\n开始转换JSON到DOCX文档") - doc = Document() + if output_dir: + doc = Document(output_dir) + else: + doc = Document() total_elements = len(json_data) print(f"文档包含 {total_elements} 个元素(段落和表格)") @@ -73,7 +76,7 @@ def add_paragraph_from_json(doc, para_json): def add_table_from_json(doc, table_json, bold=False): print(f" 创建表格: {table_json['rows']}行 × {table_json['cols']}列") table = doc.add_table(rows=table_json["rows"], cols=table_json["cols"]) - + table.autofit = True # 自动调整列宽和行高 # 设置表格样式为无网格线(我们将自定义边框) table.style = 'Table Grid' @@ -172,13 +175,13 @@ def set_cell_alignment(cell, cell_data): # 垂直对齐设置 tcPr = cell._tc.get_or_add_tcPr() vAlign = OxmlElement('w:vAlign') - align_value = cell_data.get('vertical_align', 'top') + align_value = cell_data.get('vertical_align', 'center') print(f" 设置垂直对齐: {align_value}") # 确保使用有效的对齐值 valid_alignments = ['top', 'center', 'bottom'] if align_value not in valid_alignments: - align_value = 'top' # 默认值 + align_value = 'center' # 默认值 vAlign.set(qn('w:val'), align_value) tcPr.append(vAlign) @@ -243,11 +246,13 @@ if __name__ == "__main__": # 将JSON转换回DOCX json_to_docx(json_data, output_path) -from typing import List, Dict, Any +from typing import List, Dict, Any, Union + def list_to_json_with_merges( table_data: List[List[str]], style_config: Dict[str, Any] = None, - detect_merges: bool = True # 新增控制参数 + detect_merges: bool = True, + merge_columns: Union[int, List[int]] = None # 新增参数,控制合并哪些列 ) -> Dict[str, Any]: """ 将二维列表转换为表格JSON,可选是否合并相邻相同单元格 @@ -256,6 +261,10 @@ def list_to_json_with_merges( table_data: 二维字符串列表表示的表格数据 style_config: 包含样式配置的字典(可选) detect_merges: 是否检测并合并相邻相同单元格(默认为True) + merge_columns: 控制合并哪些列,可以是: + - None:合并所有列(默认) + - int n:只合并前n列 + - List[int]:只合并指定的列 返回: 符合表格JSON结构的字典 @@ -275,20 +284,31 @@ def list_to_json_with_merges( "cells": [[None for _ in range(cols)] for _ in range(rows)] } } + + # 处理merge_columns参数 + columns_to_merge = set() + if merge_columns is not None: + if isinstance(merge_columns, int): + columns_to_merge = set(range(merge_columns)) # 前n列 + elif isinstance(merge_columns, list): + columns_to_merge = set(merge_columns) # 指定列 for col in range(cols): + # 检查当前列是否需要合并 + should_merge = detect_merges + if merge_columns is not None: + should_merge = should_merge and (col in columns_to_merge) + start_row = 0 while start_row < rows: current_value = table_data[start_row][col] end_row = start_row - # 只有开启合并检测时才查找可合并区域 - if detect_merges: + if should_merge: while end_row + 1 < rows and table_data[end_row + 1][col] == current_value: end_row += 1 - # 处理单元格(区分合并/非合并模式) - if detect_merges and end_row > start_row: # 合并模式 + if should_merge and end_row > start_row: merge_info = { "start_row": start_row, "start_col": col, @@ -308,7 +328,7 @@ def list_to_json_with_merges( merge_range=merge_info ) result["content"]["cells"][row][col] = cell_data - else: # 非合并模式或无需合并的单单元格 + else: cell_data = create_cell_data( row=start_row, col=col, @@ -337,7 +357,7 @@ def create_cell_data( "col": col, "is_merged": is_merged, "content": create_cell_content(value, style_config), - "alignment": style_config.get("alignment", "left") if style_config else "left", + "alignment": style_config.get("alignment", "center") if style_config else "center", "border": style_config.get("border", {}) if style_config else {}, "shading": style_config.get("shading", {}) if style_config else {}, "margins": style_config.get("margins", {}) if style_config else {}