import os import numpy as np def parse_matrix_and_pos(tokens): """解析 1 行中的 位置和矩阵""" color = tokens[1] pos = np.array([float(tokens[2]), float(tokens[3]), float(tokens[4])]) mat = np.array([ [float(tokens[5]), float(tokens[6]), float(tokens[7])], [float(tokens[8]), float(tokens[9]), float(tokens[10])], [float(tokens[11]), float(tokens[12]), float(tokens[13])] ]) filename = " ".join(tokens[14:]).upper() return color, pos, mat, filename def apply_transform(pos_parent, mat_parent, pos_child, mat_child): """位置相加 & 矩阵相乘""" new_pos = pos_parent + mat_parent.dot(pos_child) new_mat = mat_parent.dot(mat_child) return new_pos, new_mat def expand_submodel(line, submodels): """展开一个子模型引用""" tokens = line.strip().split() color, pos, mat, filename = parse_matrix_and_pos(tokens) if filename.lower().endswith(".dat"): # 普通零件,不展开 return [line] if filename not in submodels: print(f"⚠️ 未找到子模型定义: {filename}") return [] expanded_lines = [] for subline in submodels[filename]: if not subline.strip().startswith("1 "): # 保留注释等 expanded_lines.append(subline) continue stokens = subline.strip().split() scolor, spos, smat, sfile = parse_matrix_and_pos(stokens) new_pos, new_mat = apply_transform(pos, mat, spos, smat) new_line = "1 {} {:.3f} {:.3f} {:.3f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {}".format( scolor, new_pos[0], new_pos[1], new_pos[2], new_mat[0,0], new_mat[0,1], new_mat[0,2], new_mat[1,0], new_mat[1,1], new_mat[1,2], new_mat[2,0], new_mat[2,1], new_mat[2,2], sfile ) expanded_lines.append(new_line) return expanded_lines def expand_all(main_lines, submodels): """不断展开 main_lines,直到所有 1 行的子模型引用都是以 .dat 结尾""" changed = True while changed: changed = False new_lines = [] for line in main_lines: if line.strip().startswith("1 "): tokens = line.strip().split() filename = " ".join(tokens[14:]).upper() if not filename.lower().endswith(".dat"): if filename in submodels: expanded = expand_submodel(line, submodels) new_lines.extend(expanded) changed = True continue new_lines.append(line.strip()) main_lines = new_lines return main_lines def process_ldr(input_file, output_file): with open(input_file, "r", encoding="utf-8") as f: lines = f.readlines() # 找第一个 NOFILE nofile_idx = None for i, line in enumerate(lines): if line.strip().upper().startswith("0 NOFILE"): nofile_idx = i break # 如果没有找到 0 NOFILE,说明没有子模型,直接保存文件 if nofile_idx is None: print(f"⚠️ {input_file} 未找到 0 NOFILE,文件没有子模型,直接保存") with open(output_file, "w", encoding="utf-8") as f: f.writelines(lines) # 直接保存原始文件 return main_lines = lines[:nofile_idx] sub_lines = lines[nofile_idx+1:] # 收集子模型定义 submodels = {} cur_name = None cur_buf = [] for line in sub_lines: if line.strip().upper().startswith("0 FILE"): if cur_name and cur_buf: submodels[cur_name.upper()] = cur_buf cur_name = line.strip().split(maxsplit=2)[-1].upper() cur_buf = [] elif line.strip().upper().startswith("0 NOFILE"): if cur_name and cur_buf: submodels[cur_name.upper()] = cur_buf cur_name = None cur_buf = [] else: if cur_name is not None: cur_buf.append(line.strip()) if cur_name and cur_buf: submodels[cur_name.upper()] = cur_buf # 处理主文件,递归展开 output_lines = expand_all(main_lines, submodels) os.makedirs(os.path.dirname(output_file), exist_ok=True) with open(output_file, "w", encoding="utf-8") as f: for l in output_lines: f.write(l + "\n") #print(f"✅ 已展开并保存到 {output_file}") import shutil def process_all_ldr(input_root, output_root): """遍历 input_root 下所有 .ldr 文件,并保存到 output_root 下相同路径,同时复制同名 txt 和 jpeg 文件""" for dirpath, _, filenames in os.walk(input_root): for filename in filenames: if filename.lower().endswith(".ldr"): in_file = os.path.join(dirpath, filename) rel_path = os.path.relpath(in_file, input_root) out_file = os.path.join(output_root, rel_path) # 处理 ldr 文件 process_ldr(in_file, out_file) # 基础文件名,不含扩展名 base_name = os.path.splitext(filename)[0] if __name__ == "__main__": input_root = "/public/home/wangshuo/gap/assembly/data/car_1k/subset_bottom_300/ldr_rot_test" output_root = "/public/home/wangshuo/gap/assembly/data/car_1k/subset_bottom_300/ldr_rot_expand_test" process_all_ldr(input_root, output_root)