feat: 优化模板生成与表格绘制逻辑

This commit is contained in:
puzzlesion 2025-09-09 20:18:20 +08:00
parent e8643dd628
commit 68e54c5afd
3 changed files with 355 additions and 138 deletions

View File

@ -6,11 +6,8 @@
"relative_x_start": 0.01,
"text_definitions": [
{
"data_key": "main",
"relative_pos": [
5.82,
2.38
],
"data_key": "1",
"relative_pos": [5.82, 2.38],
"alignment": "BOTTOM_LEFT",
"height": 3.5,
"style": "HZTXT",
@ -25,10 +22,7 @@
"text_definitions": [
{
"data_key": "main",
"relative_pos": [
14.77,
2.07
],
"relative_pos": [14.77, 2.07],
"alignment": "BOTTOM_CENTER",
"height": 3.0,
"style": "HZTXT",
@ -43,10 +37,7 @@
"text_definitions": [
{
"data_key": "chinese_name",
"relative_pos": [
1.98,
3.58
],
"relative_pos": [1.98, 3.58],
"alignment": "BOTTOM_LEFT",
"height": 3.5,
"style": "HZ",
@ -55,10 +46,7 @@
},
{
"data_key": "english_name",
"relative_pos": [
1.68,
0.92
],
"relative_pos": [1.68, 0.92],
"alignment": "BOTTOM_LEFT",
"height": 2.0,
"style": "HZTXT",
@ -73,10 +61,7 @@
"text_definitions": [
{
"data_key": "main",
"relative_pos": [
4.97,
2.28
],
"relative_pos": [4.97, 2.28],
"alignment": "BOTTOM_CENTER",
"height": 3.0,
"style": "HZTXT",
@ -91,10 +76,7 @@
"text_definitions": [
{
"data_key": "main",
"relative_pos": [
15.16,
2.04
],
"relative_pos": [15.16, 2.04],
"alignment": "BOTTOM_CENTER",
"height": 3.5,
"style": "HZ",
@ -106,7 +88,17 @@
{
"name": "单",
"relative_x_start": 140.01,
"text_definitions": []
"text_definitions": [
{
"data_key": "main",
"relative_pos": [5.0, 2.34],
"alignment": "BOTTOM_CENTER",
"height": 3.0,
"style": "HZ",
"layer": "6文字层",
"color": 256
}
]
},
{
"name": "总",
@ -114,10 +106,7 @@
"text_definitions": [
{
"data_key": "main",
"relative_pos": [
5.06,
2.34
],
"relative_pos": [5.06, 2.34],
"alignment": "BOTTOM_CENTER",
"height": 3.0,
"style": "HZ",
@ -129,7 +118,84 @@
{
"name": "备 注",
"relative_x_start": 160.01,
"text_definitions": []
"text_definitions": [
{
"data_key": "main",
"relative_pos": [2.0, 3.58],
"alignment": "BOTTOM_LEFT",
"height": 3.5,
"style": "HZ",
"layer": "6文字层",
"color": 256
}
]
}
],
"data_rows": [
{
"件 号": "1",
"图号或标准号": "A5-001-01",
"名 称": {
"chinese_name": "支撑板",
"english_name": "Support Plate"
},
"数量": 1,
"材 料": "SS400",
"单": 2.5,
"总": 2.5,
"备 注": "激光切割"
},
{
"件 号": "2",
"图号或标准号": "A5-001-02",
"名 称": {
"chinese_name": "轴",
"english_name": "Shaft"
},
"数量": 2,
"材 料": "40Cr",
"单": 1.2,
"总": 2.4,
"备 注": "热处理"
},
{
"件 号": "3",
"图号或标准号": "GB/T 5783-2000",
"名 称": {
"chinese_name": "六角头螺栓",
"english_name": "Hexagon Head Bolt"
},
"数量": 4,
"材 料": "45#",
"单": 0.05,
"总": 0.20,
"备 注": "M8x20"
},
{
"件 号": "4",
"图号或标准号": "GB/T 97.1-2002",
"名 称": {
"chinese_name": "平垫圈",
"english_name": "Plain Washer"
},
"数量": 8,
"材 料": "304SS",
"单": 0.01,
"总": 0.08,
"备 注": "标准件"
},
{
"件 号": "5",
"图号或标准号": "JB/T 88-1994",
"名 称": {
"chinese_name": "六角螺母",
"english_name": "Hexagon Nut"
},
"数量": 4,
"材 料": "Q235",
"单": 0.02,
"总": 0.08,
"备 注": ""
}
]
}

View File

@ -19,34 +19,16 @@ ALIGNMENT_MAP = {
}
# ==============================================================================
# 1. SAMPLE DATA (This would normally come from Excel, a database, etc.)
# 1. SAMPLE DATA (This is now loaded from an external file)
# ==============================================================================
BOM_DATA = [
{
"件 号": {"main": "1"},
"图号或标准号": {"main": "JB/T XXXX"},
"名 称": {
"chinese_name": "新零件-A",
"english_name": "NEW PART-A",
"specification": "M20x150"
},
"数量": {"main": "4"},
"材 料": {"main": "Q345R"},
"备 注": {"main": "自定义备注"}
},
{
"件 号": {"main": "2"},
"图号或标准号": {"main": "GB/T YYYY"},
"名 称": {
"chinese_name": "新零件-B",
"english_name": "NEW PART-B",
"specification": "DN200"
},
"数量": {"main": "2"},
"材 料": {"main": "S30408"},
"备 注": {"main": ""}
}
]
def load_bom_data(file_path):
"""Loads BOM data from a JSON file."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
return json.load(f)
except (IOError, json.JSONDecodeError) as e:
print(f"Error loading BOM data from {file_path}: {e}")
return None
# ==============================================================================
# 2. DRAWING LOGIC
@ -115,40 +97,69 @@ def draw_table_from_template(msp, start_pos, header_template, columns_template,
abs_y = header_bottom_y + text_def['relative_pos'][1]
add_aligned_text(msp, text_def['content'], (abs_x, abs_y), text_def)
# --- 2. Draw Data Rows upwards ---
header_top_y = start_pos.y + header_height
current_y = header_top_y
for data_row in data_rows: # Normal order, drawing upwards
row_bottom_y = current_y
row_top_y = row_bottom_y + row_height
# --- Draw Data Rows (growing upwards) ---
current_y = header_bottom_y + header_height
# Draw top horizontal line for the row
msp.add_line((start_pos.x, row_top_y), (start_pos.x + table_width, row_top_y))
# Correctly iterate through all data rows and column definitions
for i, data_row in enumerate(data_rows):
row_y_bottom = current_y + (i * row_height)
# Draw vertical divider lines for the row
for x_rel in col_boundaries:
msp.add_line((start_pos.x + x_rel, row_bottom_y), (start_pos.x + x_rel, row_top_y))
# Draw text for each column in the row using the new list structure
# Iterate through all column definitions from the template for each row
for col_def in col_defs:
col_name = col_def["name"]
# Check if the data for this column exists in the current data_row
if col_name in data_row:
cell_data = data_row[col_name]
col_start_x_rel = col_def["relative_x_start"]
if col_name in data_row:
# A column can have multiple text fields (e.g., main and sub-text)
for text_def in col_def["text_definitions"]:
data_key = text_def['data_key']
text_content = data_row[col_name].get(data_key, "")
if not text_content:
continue
data_key = text_def["data_key"]
# Calculate absolute position for the text's alignment point
# abs_x = table_start + column_start + text_start_in_column
# Check if the specific data_key exists for the cell
if data_key in cell_data:
content = str(cell_data[data_key])
# --- Calculate Absolute Position ---
# Text's relative position is relative to the column's start
abs_x = start_pos.x + col_start_x_rel + text_def['relative_pos'][0]
abs_y = row_bottom_y + text_def['relative_pos'][1]
abs_y = row_y_bottom + text_def['relative_pos'][1]
add_aligned_text(msp, text_content, (abs_x, abs_y), text_def)
alignment_str = text_def.get("alignment", "BOTTOM_LEFT")
alignment = ALIGNMENT_MAP.get(alignment_str, TextEntityAlignment.BOTTOM_LEFT)
current_y = row_top_y
dxfattribs = {
'style': text_def['style'],
'height': text_def['height'],
'color': text_def['color'],
'width': 0.7 # Ensure width factor is applied
}
# Add the text entity with correct placement
msp.add_text(
content,
dxfattribs=dxfattribs
).set_placement(
(abs_x, abs_y),
align=alignment
)
# --- Draw Row and Column Lines ---
# (This part seems correct, but we'll double check if text fix doesn't solve all issues)
num_data_rows = len(data_rows)
table_height = header_height + num_data_rows * row_height
table_base_y = start_pos.y
# Draw horizontal lines for each data row
for i in range(num_data_rows + 1):
y = table_base_y + header_height + i * row_height
msp.add_line((start_pos.x, y), (start_pos.x + table_width, y))
# Draw vertical lines based on column boundaries
for x_rel in col_boundaries:
x_abs = start_pos.x + x_rel
msp.add_line((x_abs, table_base_y), (x_abs, table_base_y + table_height))
def add_aligned_text(msp, content, point, text_def):
"""Adds a TEXT entity with specified alignment."""
@ -172,21 +183,23 @@ def add_aligned_text(msp, content, point, text_def):
# ==============================================================================
def main():
parser = argparse.ArgumentParser(description="Draw a BOM table in a DXF file based on JSON templates.")
# Input files
parser.add_argument("source_dxf", help="Path to the source DXF file to read.")
parser.add_argument("header_template", help="Path to the header template JSON file.")
parser.add_argument("columns_template", help="Path to the columns template JSON file.")
parser.add_argument("data_json", help="Path to the BOM data JSON file.")
# Output file
parser.add_argument("output_dxf", help="Path to the output DXF file to write.")
# Optional coordinates
parser.add_argument("--x", type=float, default=260.0, help="The X coordinate for the table's bottom-left insertion point.")
parser.add_argument("--y", type=float, default=50.0, help="The Y coordinate for the table's bottom-left insertion point.")
args = parser.parse_args()
# Get the absolute path to the directory where this script is located
script_dir = os.path.dirname(os.path.abspath(__file__))
header_template_path = os.path.join(script_dir, "header_template.json")
columns_template_path = os.path.join(script_dir, "columns_template.json")
# --- Load Templates ---
try:
with open(header_template_path, 'r', encoding='utf-8') as f:
with open(args.header_template, 'r', encoding='utf-8') as f:
header_template = json.load(f)
with open(columns_template_path, 'r', encoding='utf-8') as f:
with open(args.columns_template, 'r', encoding='utf-8') as f:
columns_template = json.load(f)
except (IOError, json.JSONDecodeError) as e:
print(f"Error reading template files: {e}")
@ -210,11 +223,15 @@ def main():
print(f"An unexpected error occurred: {e}")
return
# --- Load Data ---
bom_data = load_bom_data(args.data_json)
if bom_data is None:
return
# --- Draw Table ---
print("Drawing table from templates...")
# Using a fixed start position for predictability
start_position = Vec3(260, 50)
draw_table_from_template(msp, start_position, header_template, columns_template, BOM_DATA)
start_position = Vec3(args.x, args.y)
draw_table_from_template(msp, start_position, header_template, columns_template, bom_data)
# --- Save Output ---
try:

View File

@ -1,5 +1,6 @@
import json
import os
import argparse
def find_table_boundaries(lines):
"""
@ -59,6 +60,129 @@ def find_table_boundaries(lines):
# Return boundaries and the absolute X coords of vertical lines
return boundaries, vert_lines_x
def find_table_boundaries_from_texts(texts, lines, y_cluster_tolerance=2.0, expansion_margin=2.0, header_cluster_gap_tolerance=5.0):
"""
Finds table boundaries by identifying the densest group of adjacent text clusters (multi-line header),
then finds the closest data row cluster (either above or below).
"""
if not texts:
return None, None
# 1. Cluster texts by their Y-coordinate to find "rows" of text.
texts.sort(key=lambda t: t['insert_point'][1])
y_clusters = []
if texts:
current_cluster = [texts[0]]
for i in range(1, len(texts)):
if abs(texts[i]['insert_point'][1] - current_cluster[-1]['insert_point'][1]) < y_cluster_tolerance:
current_cluster.append(texts[i])
else:
y_clusters.append(current_cluster)
current_cluster = [texts[i]]
y_clusters.append(current_cluster)
if not y_clusters:
return None, None
# 2. Find the densest *group* of adjacent clusters (our multi-line header).
best_header_group = []
max_density = 0
for i in range(len(y_clusters)):
current_group = [y_clusters[i]]
current_density = len(y_clusters[i])
# Look ahead to see if the next clusters are close enough to be part of the same header
for j in range(i + 1, len(y_clusters)):
# Calculate vertical gap between the last cluster in the group and the next one
last_cluster_avg_y = sum(t['insert_point'][1] for t in current_group[-1]) / len(current_group[-1])
next_cluster_avg_y = sum(t['insert_point'][1] for t in y_clusters[j]) / len(y_clusters[j])
if abs(next_cluster_avg_y - last_cluster_avg_y) < header_cluster_gap_tolerance:
current_group.append(y_clusters[j])
current_density += len(y_clusters[j])
else:
break # The gap is too large, this block has ended
if current_density > max_density:
max_density = current_density
best_header_group = current_group
if not best_header_group:
print("Warning: Could not identify a header group.")
return None, None
# 3. All texts within the identified header group belong to the header.
all_header_texts = [text for cluster in best_header_group for text in cluster]
# 4. Find the closest data row (can be above or below the header).
header_indices = {y_clusters.index(cluster) for cluster in best_header_group}
first_data_row_cluster = None
min_dist = float('inf')
for i, cluster in enumerate(y_clusters):
if i not in header_indices:
# It's a data row candidate. Find its distance to the header block.
header_min_y = min(t['insert_point'][1] for t in all_header_texts)
header_max_y = max(t['insert_point'][1] for t in all_header_texts)
cluster_avg_y = sum(t['insert_point'][1] for t in cluster) / len(cluster)
dist = min(abs(cluster_avg_y - header_min_y), abs(cluster_avg_y - header_max_y))
if dist < min_dist:
min_dist = dist
first_data_row_cluster = cluster
data_start_y = None
if first_data_row_cluster:
data_start_y = first_data_row_cluster[0]['insert_point'][1]
else:
print("Warning: Could not automatically detect a data row near the header.")
# 5. Define boundaries based on the multi-line header text block.
min_x = min(t['insert_point'][0] for t in all_header_texts)
max_x = max(t['insert_point'][0] for t in all_header_texts)
min_y = min(t['insert_point'][1] for t in all_header_texts)
max_y = max(t['insert_point'][1] + t['height'] for t in all_header_texts)
# ... (The rest of the logic to find lines and define final bounds remains largely the same,
# but it will now operate on the correct header_texts and boundaries)
# Re-using the line-finding logic from the previous implementation
expansion_margin = 5.0 # Increase margin slightly for complex layouts
bbox_min_x, bbox_max_x = min_x - expansion_margin, max_x + expansion_margin
bbox_min_y, bbox_max_y = min_y - expansion_margin, max_y + expansion_margin
table_h_lines = [l for l in lines if (bbox_min_y < l['start'][1] < bbox_max_y and
bbox_min_y < l['end'][1] < bbox_max_y)]
table_v_lines = [l for l in lines if (bbox_min_x < l['start'][0] < bbox_max_x and
bbox_min_x < l['end'][0] < bbox_max_x)]
if not table_h_lines or not table_v_lines:
print("Warning: Could not find enough lines near the identified text header.")
return None, None
final_min_y = min(l['start'][1] for l in table_h_lines)
final_max_y = max(l['start'][1] for l in table_h_lines)
col_x_coords = set()
for line in table_v_lines:
if min(line['start'][1], line['end'][1]) < final_min_y + 1 and \
max(line['start'][1], line['end'][1]) > final_max_y - 1:
col_x_coords.add(round(line['start'][0], 2))
sorted_col_x = sorted(list(col_x_coords))
if not sorted_col_x:
return None, None
bounds = {
'y_min': final_min_y,
'y_max': final_max_y,
'x_min': sorted_col_x[0],
'x_max': sorted_col_x[-1],
'header_total_height': final_max_y - final_min_y,
'data_start_y': data_start_y
}
return bounds, sorted_col_x
def generate_header_template(data, bounds, col_x_coords_abs):
"""
Generates the header part of the template from extracted entity data,
@ -155,7 +279,12 @@ def generate_column_definitions(data, bounds, col_x_coords_abs, header_template)
col_names[i] = main_text['content'].strip()
# --- Find text patterns in the first data row ---
first_data_row_y = bounds["data_start_y"]
first_data_row_y = bounds.get("data_start_y")
if first_data_row_y is None:
print("Warning: No data row was found in the source DXF. No column definitions will be generated.")
return []
data_row_texts = [
t for t in texts
if first_data_row_y < t['insert_point'][1] < first_data_row_y + 8.0
@ -204,50 +333,55 @@ def generate_column_definitions(data, bounds, col_x_coords_abs, header_template)
def main():
source_json_path = os.path.join("03_Python_OpenSource_DXF", "Drawing1_entities.json")
header_template_path = os.path.join("03_Python_OpenSource_DXF", "header_template.json")
columns_template_path = os.path.join("03_Python_OpenSource_DXF", "columns_template.json")
parser = argparse.ArgumentParser(description="Generate modular header and column templates from a DXF entities JSON file.")
parser.add_argument("source_json", help="Path to the source JSON file (digital snapshot).")
parser.add_argument("output_header_template", help="Path to write the output header_template.json.")
parser.add_argument("output_columns_template", help="Path to write the output columns_template.json.")
args = parser.parse_args()
if not os.path.exists(source_json_path):
print(f"Error: Source JSON file not found at {source_json_path}")
if not os.path.exists(args.source_json):
print(f"Error: Source JSON file not found at {args.source_json}")
return
print(f"Reading entity data from {source_json_path}...")
with open(source_json_path, 'r', encoding='utf-8') as f:
print(f"Reading entity data from {args.source_json}...")
with open(args.source_json, 'r', encoding='utf-8') as f:
entity_data = json.load(f)
print("Generating templates...")
bounds, col_x_coords_abs = find_table_boundaries(entity_data["lines"])
print("Generating templates using text-based detection...")
# USE THE NEW, ROBUST FUNCTION
bounds, col_x_coords_abs = find_table_boundaries_from_texts(entity_data.get("texts", []), entity_data.get("lines", []))
if not bounds or not col_x_coords_abs:
print("Error: Could not determine table boundaries from the provided snapshot.")
print("Attempting to fall back to the old line-based method...")
bounds, col_x_coords_abs = find_table_boundaries(entity_data.get("lines", []))
if not bounds or not col_x_coords_abs:
print("Fallback method also failed. Aborting.")
return
if bounds and col_x_coords_abs:
# 1. Generate and save the header template
header_template = generate_header_template(entity_data, bounds, col_x_coords_abs)
if header_template:
try:
with open(header_template_path, 'w', encoding='utf-8') as f:
with open(args.output_header_template, 'w', encoding='utf-8') as f:
json.dump(header_template, f, ensure_ascii=False, indent=2)
print(f"Successfully generated header template: {header_template_path}")
print(f"Successfully generated header template: {args.output_header_template}")
except IOError as e:
print(f"Error writing header template file: {e}")
# 2. Generate and save the columns template
# We need the header text to name the columns correctly
if not header_template:
header_template = generate_header_template(entity_data, bounds, col_x_coords_abs)
column_definitions = generate_column_definitions(entity_data, bounds, col_x_coords_abs, header_template)
# Create the final columns template structure
columns_template = {
"row_height": 8.0,
"row_height": header_template.get("row_height", 8.0), # Get row_height from header or default
"column_definitions": column_definitions
}
if column_definitions:
try:
with open(columns_template_path, 'w', encoding='utf-8') as f:
with open(args.output_columns_template, 'w', encoding='utf-8') as f:
json.dump(columns_template, f, ensure_ascii=False, indent=2)
print(f"Successfully generated columns template: {columns_template_path}")
print(f"Successfully generated columns template: {args.output_columns_template}")
except IOError as e:
print(f"Error writing columns template file: {e}")