.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1a1a6e88dd23c2b1854d978f58bc7b414a4658bc
--- /dev/null
+++ b/README.md
@@ -0,0 +1,4 @@
+---
+title: HoLa-BRep
+sdk: docker
+---
\ No newline at end of file
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0e4b72fcb23aa2bfd663ca2d666de8df319b75d
--- /dev/null
+++ b/app.py
@@ -0,0 +1,656 @@
+#Frontend
+import sys
+import gradio as gr
+from pathlib import Path
+import os
+sys.path.insert(0, "/data")
+from app.AppLayout import *
+from app.GeneratingMethod import *
+from app.ModelDirector import *
+from app.DataProcessor import *
+
+os.environ["HF_HOME"] = "/data/.huggingface"
+os.environ["TORCH_HOME"] = "/data/.cache/torch"
+
+# Theme
+theme = gr.themes.Soft(
+ primary_hue="slate",
+ text_size="lg",
+ font=['IBM Plex Sans', 'ui-sans-serif', 'system-ui', gr.themes.GoogleFont('sans-serif')],
+).set(
+ block_background_fill='*primary_200',
+ button_primary_background_fill='*primary_100',
+ body_background_fill='*secondary_50',
+)
+
+force_light = """
+function refresh() {
+ const url = new URL(window.location);
+
+ if (url.searchParams.get('__theme') !== 'light') {
+ url.searchParams.set('__theme', 'light');
+ window.location.href = url.href;
+ }
+}
+"""
+
+# 自定义CSS样式
+custom_css = """
+.gr-tabs.gr-tab-label {
+ text-align: center;
+}
+button[role="tab"] {
+ font-size: 20px;
+}
+div[role="tablist"] {
+ height: var(--size-12);
+}
+
+#top-row {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ width: 100%;
+}
+#button-group {
+ display: flex;
+ gap: 10px;
+ justify-content: flex-end;
+}
+.small-button {
+ max-width: 80px;
+ padding: 6px 10px;
+ font-size: 14px;
+}
+
+@media (min-width: 1024px) {
+ div[role="tablist"] {
+ /* 电脑端居中 */
+ justify-content: center;
+ }
+}
+
+.tabs {
+ margin-top: 20px;
+}
+
+div[data-testid="markdown"] span p:not(:first-child) {
+ margin-top: unset;
+}
+div[data-testid="markdown"] pre code {
+ font-family: 'IBM Plex Sans', 'ui-sans-serif', 'system-ui', 'sans-serif';
+ font-size: 14px;
+}
+
+/* 媒体查询 */
+@media (max-width: 768px) {
+ button[role="tab"] {
+ font-size: 15px;
+ }
+ p.title1 {
+ font-size: 46px !important;
+ letter-spacing: unset !important;
+ }
+ p.title2 {
+ font-size: 21px !important;
+ }
+ p.title4 {
+ font-size: 16px !important;
+ }
+ p.title5 {
+ font-size: 12px !important;
+ }
+}
+
+h2.heading {
+ font-size: 23px;
+}
+
+@media (max-width: 550px) {
+ .title3-responsive {
+ gap: 0 !important;
+ }
+ .title3-responsive span:first-child,
+ .title3-responsive span:last-child {
+ width: 20px !important;
+ }
+ .title3-responsive span:nth-child(2) {
+ padding: 2px 6px !important;
+ font-size: 12px;
+ }
+}
+
+@media (min-width: 551px) and (max-width: 768px) {
+ .title3-responsive {
+ gap: 0 !important;
+ }
+ .title3-responsive span:first-child,
+ .title3-responsive span:last-child {
+ width: 50px !important;
+ }
+ .title3-responsive span:nth-child(2) {
+ font-size: 15px;
+ }
+}
+
+@media (min-width: 769px) and (max-width: 1024px) {
+ .title3-responsive span:first-child,
+ .title3-responsive span:last-child {
+ width: 70px !important;
+ }
+ .title3-responsive span:nth-child(2) {
+ font-size: 20px;
+ }
+}
+
+@media (max-width: 768px) {
+ .mobile-break {
+ display: block;
+ }
+}
+@media (min-width: 769px) {
+ .mobile-break {
+ display: none;
+ }
+}
+"""
+
+DEMO_NUM = 4
+WIREFRAME_FILE = 0
+SOLID_FILE = 1
+STEP_FILE = 2
+
+BACKEND_CONDITION_DICT = {
+ 'Unconditional': 'uncond',
+ 'Point Cloud' : 'pc',
+ 'Text' : 'txt',
+ 'Sketch' : 'sketch',
+ 'SVR' : 'single_img',
+ 'MVR': 'multi_img'
+}
+
+# Dynamically registered functions
+def switch_model(user_state: dict, generate_mode: str, model_index: int, offset: int):
+ model_index = (model_index + offset) % DEMO_NUM
+ generate_mode = BACKEND_CONDITION_DICT[generate_mode]
+ # Check if the condition has been generated
+ if generate_mode not in user_state.keys():
+ return model_index, gr.update(value='empty.obj', label=f'Wireframe{model_index + 1}'), gr.update(value='empty.obj', label=f'Solid{model_index + 1}'), gr.update( value=["app/examples/empty_examples/sample.stl", "app/examples/empty_examples/sample.ply", "app/examples/empty_examples/sample.step"], label=f'Models{model_index + 1}')
+
+ # Check if model_index exceeds the number of current valid models
+ if model_index >= len(user_state[generate_mode]):
+ return model_index, gr.update(value='empty.obj', label=f'Wireframe{model_index + 1}'), gr.update(value='empty.obj', label=f'Solid{model_index + 1}'), gr.update( value=["app/examples/empty_examples/sample.stl", "app/examples/empty_examples/sample.ply", "app/examples/empty_examples/sample.step"], label=f'Models{model_index + 1}')
+
+ wireframe_model = user_state[generate_mode][model_index][WIREFRAME_FILE]
+ solid_model = user_state[generate_mode][model_index][SOLID_FILE]
+ if not (os.path.exists(wireframe_model) and os.path.exists(solid_model)):
+ gr.Warning("The operation is too frequent!", title="Frequent Operation")
+ return gr.update(), gr.update(), gr.update(), gr.update()
+ return model_index, gr.Model3D(wireframe_model, label=f'Wireframe{model_index + 1}'), gr.Model3D(solid_model, label=f'Solid{model_index + 1}'), gr.Files(user_state[generate_mode][model_index], label=f'Models{model_index + 1}', interactive=False)
+
+
+def set_generating_type(mode):
+ return gr.Text(mode, visible=False)
+
+def make_Chinese_descriptions():
+ return (title_cn,
+ description_cn,
+ UncondLayout().get_Chinese_note(),
+ PCLayout().get_Chinese_note(),
+ SketchLayout().get_Chinese_note(),
+ TextLayout().get_Chinese_note(),
+ SVRLayout().get_Chinese_note(),
+ MVRLayout().get_Chinese_note(),
+ notification_mvr_cn,
+ gr.update(label="无条件"),
+ gr.update(label="点云"),
+ gr.update(label="草图"),
+ gr.update(label="文本"),
+ gr.update(label="单视图"),
+ gr.update(label="多视图"),
+ gr.update(label="多视图输入注意事项:"),
+ gr.update(value="生成"),
+ gr.update(value="生成"),
+ gr.update(value="生成"),
+ gr.update(value="生成"),
+ gr.update(value="生成"),
+ gr.update(value="生成"),
+ gr.update(value="上一个"),
+ gr.update(value="下一个"),
+ gr.update(label="实体"),
+ gr.update(label="线框"),
+ gr.update(label="下载"),
+ citation_cn
+ )
+
+def make_English_descriptions():
+ return (title_en,
+ description_en,
+ UncondLayout().get_English_note(),
+ PCLayout().get_English_note(),
+ SketchLayout().get_English_note(),
+ TextLayout().get_English_note(),
+ SVRLayout().get_English_note(),
+ MVRLayout().get_English_note(),
+ notification_mvr_en,
+ gr.update(label="Unconditional"),
+ gr.update(label="Point Cloud"),
+ gr.update(label="Sketch"),
+ gr.update(label="Text"),
+ gr.update(label="SVR"),
+ gr.update(label="MVR"),
+ gr.update(label="MVR input notification:"),
+ gr.update(value="generate"),
+ gr.update(value="generate"),
+ gr.update(value="generate"),
+ gr.update(value="generate"),
+ gr.update(value="generate"),
+ gr.update(value="generate"),
+ gr.update(value="Last"),
+ gr.update(value="Next"),
+ gr.update(label="Solid"),
+ gr.update(label="Wireframe"),
+ gr.update(label="Download"),
+ citation_en
+ )
+
+# Declarations for pre-rendering
+model_solid = gr.Model3D(label=f'Solid1', value='empty.obj', key="Solid")
+model_wireframe = gr.Model3D(label=f'Wireframe1', value='empty.obj', key="Wireframe")
+step_file = gr.File(label=f'Step', file_count='single', file_types=['.step'], interactive=False, visible=False)
+download_files = gr.Files(label=f"Models1", value=["app/examples/empty_examples/sample.stl", "app/examples/empty_examples/sample.ply", "app/examples/empty_examples/sample.step"], interactive=False, key="Downloads")
+
+input_tab = gr.Tabs()
+
+generating_type = gr.Text("Unconditional",visible=False)
+
+title_en = gr.Markdown(
+ """
+
+
+ HoLa-BRep
+
+
+ HoLa: B-Rep Generation using a Holistic Latent Representation
+
+
+
+
+ ACM Trans. on Graphics (SIGGRAPH) 2025
+
+
+
+
+ Yilin Liu, Duoteng Xu, Xingyao Yu, Xiang Xu, Daniel Cohen-Or, Hao Zhang, Hui Huang*
+
+
+ (Visual Computing Research Center, Shenzhen University)
+
+
+ """
+ )
+title_cn = gr.Markdown(
+ """
+
+
+ HoLa-BRep
+
+
+ HoLa: B-Rep Generation using a Holistic Latent Representation
+
+
+
+
+ ACM Trans. on Graphics (SIGGRAPH) 2025
+
+
+
+
+ 刘奕林, 许铎腾, 余星耀, 徐翔,
+
+ Daniel Cohen-Or, 张皓, 黄惠*
+
+
+ (深圳大学可视计算研究中心)
+
+
+ """
+ )
+
+description_en = gr.Markdown(
+ """
+ # What is HoLa-BRep?
+ HoLa-BRep is a generative model that produces CAD models in boundary representation (BRep) based on various conditions, including point cloud, single-view image, multi-view images, single-view sketch or text description.
+ It contains **1 unified** BRep variational encoder (VAE) to encode a BRep model's topological and geometric information into a holistic latent space, and a latent diffusion model (LDM) to generate such latent from multiple modalities.
+ Compared with the state-of-the-art method, HoLa-BRep only has 1 unified VAE and the corresponding latent space and 1 LDM for generation, so it is easier to train the model without any inter-dependency of the model. This is extremely useful when incorporating more modalities and even mix-modality training.
+
+ # How to use it?
+ + Please refer to the example below for more details. You can select the desired **modality** below and upload your own data.
+ + We generate **4** plausible BRep models for each input(**about 3 minutes**) and visualize them in the 3D viewer.
+ + Try to explore the generated BRep models by rotating, zooming, and panning the 3D viewer, or **download** either the wireframe, surface mesh, or solid BRep model as OBJ or STEP files.
+
+ # Project page
+ + https://vcc.tech/research/2025/HolaBRep
+ """
+ )
+description_cn = gr.Markdown(
+ """
+ # HoLa-BRep是什么?
+ HoLa-BRep 是一个多模态CAD生成模型,它支持输入点云、单视角图像、多视角图像、单视角草图或文本描述等多种模态条件,生成边界表示 (BRep) 的 CAD 模型。
+ 它包含**1个统一**的 BRep变分自编码器 (VAE),可将 BRep 模型的拓扑和几何信息编码到一个结构化的低维隐空间,以及一个隐式扩散模型(LDM)用于从多种模态生成这种BRep结构化嵌入。
+ 与目前国内外领先技术相比,HoLa-BRep 只有1个 自编码器和1个扩散模型的特性极大地减少了训练的复杂程度并且利于向更大规模的训练拓展。同时这种单个结构化隐空间的设计模式也解决了现有方法多个模型相互依赖复杂的问题。在结合更多模态甚至混合模态训练时能显著提升训练效率。
+
+ # 如何使用?
+ + 请参考下面的示例。您可以在下面选择所需的**模式**并上传自己的数据。
+ + 我们会为每个输入生成 4 个可选的 BRep 模型(**大约3分钟**),并在 3D 查看器中可视化。
+ + 你可以通过旋转、缩放和平移等操作查看生成的 BRep 模型,也可以将线框、曲面网格或实体 BRep 模型下载为 OBJ 或 STEP 文件。
+
+ # 项目主页
+ + https://vcc.tech/research/2025/HolaBRep
+ """
+ )
+
+citation_en = gr.Markdown(
+ value=
+ """
+ Citation
+
+ If our work is helpful for your research or applications, please cite us via:
+
+ ```
+ @article{HolaBRep25,
+ title={HoLa: B-Rep Generation using a Holistic Latent Representation},
+ author={Yilin Liu and Duoteng Xu and Xinyao Yu and Xiang Xu and Daniel Cohen-Or and Hao Zhang and Hui Huang},
+ journal={ACM Transactions on Graphics (SIGGRAPH)},
+ volume={44},
+ number={4},
+ year={2025},
+ }
+ ```
+ """,
+ height=300,
+ )
+
+citation_cn = gr.Markdown(
+ value=
+ """
+ 引用
+
+ 如果我们的工作对您的研究或者应用有帮助,请通过以下方式进行引用:
+
+
+ ```
+ @article{HolaBRep25,
+ title={HoLa: B-Rep Generation using a Holistic Latent Representation},
+ author={Yilin Liu and Duoteng Xu and Xinyao Yu and Xiang Xu and Daniel Cohen-Or and Hao Zhang and Hui Huang},
+ journal={ACM Transactions on Graphics (SIGGRAPH)},
+ volume={44},
+ number={4},
+ year={2025},
+ }
+ ```
+ """,
+ height=300,
+ )
+
+notification_mvr_en = gr.Markdown("**You can take and upload photos of objects as shown below.**")
+notification_mvr_cn = gr.Markdown("**您可以按如下方式拍摄并上传物体照片**")
+
+descriptions = []
+
+# Main body
+with gr.Blocks(js=force_light, theme=theme, css=custom_css) as inference:
+ with gr.Row(elem_id="top-row"):
+ gr.HTML(
+ """
+
+ """
+ )
+
+ with gr.Row(elem_id="button-group"):
+ btn_cn = gr.Button("中文", elem_classes="small-button")
+ btn_en = gr.Button("English", elem_classes="small-button")
+ btn_cn.click(fn=make_Chinese_descriptions, outputs=descriptions)
+ btn_en.click(fn=make_English_descriptions, outputs=descriptions)
+
+ title_en.render()
+ descriptions.append(title_en)
+
+ description_en.render()
+ descriptions.append(description_en)
+
+ user_state = gr.BrowserState({
+ "user_id" : None,
+ "user_output_dir" : None,
+ })
+
+ generating_type.render()
+
+ with gr.Row():
+ # Input Column
+ with gr.Column() as input_col:
+ with gr.Tabs() as input_tab:
+ with gr.Tab("Unconditional") as uncond_tab:
+ uncond_layout = UncondLayout()
+ uncond_description = uncond_layout.get_English_note()
+ descriptions.append(uncond_description)
+ uncond_input_components = uncond_layout.get_input_components()
+
+ uncond_button = gr.Button("Generate")
+ uncond_button.click(
+ fn=UncondGeneratingMethod().generate(),
+ inputs=[*uncond_input_components, user_state],
+ outputs=[model_wireframe, model_solid, step_file, download_files, user_state]
+ )
+
+ with gr.Tab("Point Cloud") as pc_tab:
+ pc_layout = PCLayout()
+ pc_description = pc_layout.get_English_note()
+ descriptions.append(pc_description)
+ pc_input_components = pc_layout.get_input_components()
+
+ pc_button = gr.Button("Generate")
+ pc_button.click(
+ fn=ConditionedGeneratingMethod(PointCloudDirector(), PointCloudProcessor(), DEMO_NUM).generate(),
+ inputs=[user_state, *pc_input_components],
+ outputs=[user_state, model_wireframe, model_solid, step_file, download_files]
+ )
+
+ with gr.Tab("Sketch") as sketch_tab:
+ sketch_layout = SketchLayout()
+ sketch_description = sketch_layout.get_English_note()
+ descriptions.append(sketch_description)
+ sketch_input_components = sketch_layout.get_input_components()
+
+ sketch_button = gr.Button("Generate")
+ sketch_button.click(
+ fn=ConditionedGeneratingMethod(SketchDirector(), SingleImageProcessor(), DEMO_NUM).generate(),
+ inputs=[user_state, *sketch_input_components],
+ outputs=[user_state, model_wireframe, model_solid, step_file, download_files]
+ )
+
+ with gr.Tab("Text") as text_tab:
+ text_layout = TextLayout()
+ text_description = text_layout.get_English_note()
+ descriptions.append(text_description)
+ text_input_components = text_layout.get_input_components()
+
+ text_button = gr.Button("Generate")
+ text_button.click(
+ fn=ConditionedGeneratingMethod(TextDirector(), TextProcessor(), DEMO_NUM).generate(),
+ inputs=[user_state, *text_input_components],
+ outputs=[user_state, model_wireframe, model_solid, step_file, download_files]
+ )
+
+ with gr.Tab("SVR") as svr_tab:
+ svr_layout = SVRLayout()
+ svr_description = svr_layout.get_English_note()
+ descriptions.append(svr_description)
+ svr_input_components = svr_layout.get_input_components()
+
+ svr_button = gr.Button("Generate")
+ svr_button.click(
+ fn=ConditionedGeneratingMethod(SVRDirector(), SingleImageProcessor(), DEMO_NUM).generate(),
+ inputs=[user_state, *svr_input_components],
+ outputs=[user_state, model_wireframe, model_solid, step_file, download_files]
+ )
+
+ with gr.Tab("MVR") as mvr_tab:
+ mvr_layout = MVRLayout()
+ mvr_description = mvr_layout.get_English_note()
+ descriptions.append(mvr_description)
+ with gr.Accordion("MVR input notification:", open=False) as mvr_notification:
+ notification_mvr_en.render()
+ gr.Image(value='app/examples/mvr.png',show_download_button=False, show_label=False,show_share_button=False,interactive=False)
+
+ with gr.Row():
+ mvr_input_components = mvr_layout.get_input_components()
+ mvr_button = gr.Button("Generate")
+ mvr_button.click(
+ fn=ConditionedGeneratingMethod(MVRDirector(), MultiImageProcessor(), DEMO_NUM).generate(),
+ inputs=[user_state, *mvr_input_components],
+ outputs=[user_state, model_wireframe, model_solid, step_file, download_files]
+ )
+
+ uncond_tab.select(fn=set_generating_type, inputs=gr.Text(uncond_tab.label, visible=False), outputs=generating_type)
+ pc_tab.select(fn=set_generating_type, inputs=gr.Text(pc_tab.label, visible=False), outputs=generating_type)
+ sketch_tab.select(fn=set_generating_type, inputs=gr.Text(sketch_tab.label, visible=False), outputs=generating_type)
+ svr_tab.select(fn=set_generating_type, inputs=gr.Text(svr_tab.label, visible=False), outputs=generating_type)
+ mvr_tab.select(fn=set_generating_type, inputs=gr.Text(mvr_tab.label, visible=False), outputs=generating_type)
+ text_tab.select(fn=set_generating_type, inputs=gr.Text(text_tab.label, visible=False), outputs=generating_type)
+
+ descriptions.append(notification_mvr_en)
+ descriptions.append(uncond_tab)
+ descriptions.append(pc_tab)
+ descriptions.append(sketch_tab)
+ descriptions.append(text_tab)
+ descriptions.append(svr_tab)
+ descriptions.append(mvr_tab)
+ descriptions.append(mvr_notification)
+ descriptions.append(uncond_button)
+ descriptions.append(pc_button)
+ descriptions.append(sketch_button)
+ descriptions.append(text_button)
+ descriptions.append(svr_button)
+ descriptions.append(mvr_button)
+
+
+ # Output demonstration
+ with gr.Column() as output_col:
+ with gr.Tabs():
+ with gr.Tab("Solid") as solid_tab:
+ model_solid.render()
+ with gr.Tab("Wireframe") as wireframe_tab:
+ model_wireframe.render()
+ with gr.Tab("Download") as download_tab:
+ step_file.render()
+ download_files.render()
+
+
+ model_index = gr.Number(value=0, visible=False)
+ with gr.Row() as switch_row:
+ last_button = gr.Button("Last")
+ next_button = gr.Button("Next")
+
+ last_button.click(
+ fn=switch_model,
+ inputs=[user_state, generating_type, model_index, gr.Number(-1, visible=False)],
+ outputs=[model_index, model_wireframe, model_solid, download_files])
+ next_button.click(
+ fn=switch_model,
+ inputs=[user_state, generating_type, model_index, gr.Number(1, visible=False)],
+ outputs=[model_index, model_wireframe, model_solid, download_files])
+
+ descriptions.append(last_button)
+ descriptions.append(next_button)
+ descriptions.append(solid_tab)
+ descriptions.append(wireframe_tab)
+ descriptions.append(download_tab)
+
+ # Examples
+ @gr.render(inputs=[generating_type], triggers=[generating_type.change, inference.load])
+ def show_examples(generate_mode):
+ if generate_mode == "Unconditional":
+ pass
+
+ elif generate_mode == "Point Cloud":
+ pc_samples=[
+ [Path("app/examples/pc_examples") / sample_number / "pc.png"] for sample_number in os.listdir("app/examples/pc_examples") if sample_number != "take_photo.py"
+ ]
+ with gr.Row():
+ def dummy_pc_func(pic_path):
+ return Path(pic_path[0]).with_suffix(".ply").as_posix()
+ for i in range(len(pc_samples)):
+ with gr.Column(min_width=100):
+ dummy_image = gr.Image(type="filepath", format="png", visible=False)
+ point_cloud_data = gr.Dataset(
+ label=f"Example{i+1}",
+ components=[dummy_image],
+ samples=[pc_samples[i]],
+ layout="table"
+ )
+ point_cloud_data.click(dummy_pc_func, inputs=point_cloud_data, outputs=pc_input_components)
+
+ elif generate_mode == "Text":
+ text_data = gr.Dataset(
+ components=text_input_components,
+ samples=[
+ ["The object is a rectangular prism with two protruding L-shaped sections on opposite sides."],
+ ["This design creates a rectangular plate with rounded edges. The plate measures about 0.3214 units in length, 0.75 units in width, and 0.0429 units in height. The rounded edges give the plate a smooth, aesthetically pleasing appearance."],
+ ["The U-shaped bracket has a flat top and a curved bottom. The design begins by creating a new coordinate system with specific Euler angles and a translation vector. A two-dimensional sketch is then drawn, forming a complex shape with multiple lines and arcs. This sketch is scaled down, rotated, and translated to align with the coordinate system. The sketch is extruded to create a three-dimensional model. The final dimensions of the bracket are approximately 0.7 units in length, 0.75 units in width, and 0.19 units in height. The bracket is designed to integrate seamlessly with other components, providing a sturdy and functional structure."]
+ ],
+ layout='table',
+ label="Examples",
+ headers=["Prompt"]
+ )
+ def dummy_func(text):
+ return gr.Text(text[0])
+ text_data.click(fn=dummy_func, inputs=text_data, outputs=text_input_components)
+
+ elif generate_mode == "Sketch":
+ with gr.Row():
+ for i in range(12):
+ with gr.Column(min_width=100):
+ example = gr.Examples(
+ inputs=sketch_input_components,
+ examples=[
+ [f"app/examples/sketch_examples/{i + 1}.png"]
+ ],
+ label=f"Example{i+1}"
+ )
+
+ elif generate_mode == "SVR":
+ with gr.Row():
+ for i in range(12):
+ with gr.Column(min_width=100):
+ example = gr.Examples(
+ inputs=svr_input_components,
+ examples=[
+ [f"app/examples/svr_examples/{i + 1}.png"]
+ ],
+ label=f"Example{i+1}"
+ )
+
+ elif generate_mode == "MVR":
+ with gr.Row():
+ for i in range(4):
+ file_num = ["00017462", "00131007", "00189220", "00218887"]
+ with gr.Column():
+ example = gr.Examples(
+ inputs=mvr_input_components,
+ examples=[
+ [f"app/examples/mvr_examples/{file_num[i]}_img0.png", f"app/examples/mvr_examples/{file_num[i]}_img1.png", f"app/examples/mvr_examples/{file_num[i]}_img2.png", f"app/examples/mvr_examples/{file_num[i]}_img3.png"],
+ ],
+ label=f"Example{i+1}"
+ )
+
+ citation_en.render()
+ descriptions.append(citation_en)
+
+
+if __name__ == "__main__":
+ inference.launch(allowed_paths=['/data'], server_name='0.0.0.0', server_port=7860)
\ No newline at end of file
diff --git a/app/AppLayout.py b/app/AppLayout.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5ab1df760a2edc1f583f293aa756cd8a8b5c2d0
--- /dev/null
+++ b/app/AppLayout.py
@@ -0,0 +1,291 @@
+import time
+
+import gradio as gr
+from typing import List, Callable
+from abc import ABC, abstractmethod
+
+# Tab Interface
+class AppLayout(ABC):
+ @abstractmethod
+ def get_English_note(self) -> gr.Markdown:
+ pass
+
+ @abstractmethod
+ def get_Chinese_note(self):
+ pass
+
+ @abstractmethod
+ def get_input_components(self) -> List[gr.Component]:
+ pass
+
+
+# Concrete Implementation
+class UncondLayout(AppLayout):
+
+ def get_English_note(self):
+ return gr.Markdown(
+ """
+ **Note:**
+
+ + We generate 4 BRep models from sampled noise in Gaussian distribution.
+ + The model is trained on ABC dataset with a complexity range of 10~100 surface primitives.
+ + Compared with the state-of-the-art BRep generation methods, HoLa-BRep has a 20%-40% improvement in the validity ratio of the generated models on both the DeepCAD dataset and the ABC dataset.
+ + Try to adjust the seed for various results.
+
+
+
+
+
+
+
+
+
+ """
+ )
+
+ def get_Chinese_note(self):
+ return gr.Markdown(
+ """
+ **无条件生成介绍:**
+
+ + 我们从高斯分布的采样噪声中生成 4 个 BRep 模型。
+ + 模型在 ABC 数据集上进行训练,复杂度范围为 10~100 个表面基元。
+ + 与最先进的 BRep 生成方法相比,HoLa-BRep 在 DeepCAD 数据集和 ABC 数据集上生成模型的有效率提高了 20%-40%。
+ + 请随意调整采样种子,以获得不同的结果。
+
+
+
+
+
+
+
+
+
+ """
+ )
+
+ def get_input_components(self) -> List[gr.Component]:
+ return [
+ gr.Number(
+ label="Seed",
+ value=int(time.time()),
+ minimum=0,
+ maximum=2**31-1,
+ step=1
+ ),
+ ]
+
+
+class TextLayout(AppLayout):
+
+ def get_English_note(self):
+ return gr.Markdown(
+ """
+ **Note:**
+
+ + Text can be either abstract or descriptive.
+ + We use a frozen gte-large-en-v1.5 to extract the feature from the text description.
+ + While we use the existing Text2CAD dataset which contains more descriptive text, the out of distribution abstract text prompt also works.
+
+
+
+ """
+ )
+ def get_Chinese_note(self):
+ return gr.Markdown(
+ """
+ **文本条件生成介绍:**
+
+ + HoLa-BRep支持简单抽象的文本和复杂的描述性文本。
+ + 我们使用冻结的gte-large-en-v1.5从文本描述中提取特征。
+ + 虽然我们使用的是包含更多复杂描述性文本的Text2CAD 数据集,但HoLa-BRep同样适用于简单抽象的文本输入。
+ + **当前文本输入仅支持英文,敬请谅解。**
+
+
+ """
+ )
+
+ def get_input_components(self) -> List[gr.Component]:
+ return [
+ gr.Textbox(lines = 8,max_length=1024, label="Text"),
+ ]
+
+
+class PCLayout(AppLayout):
+
+ def get_English_note(self):
+ return gr.Markdown(
+ """
+ **Note:**
+
+ + The input point cloud should be in .ply format with the position in -1~+1 and normal vectors.
+ + The input point cloud can be either sparse or dense. We will downsample the point cloud into 2048 points.
+ + After test-time augmentation the validity of the generated B-Rep model can reach ~98%.
+ + We use a small and trainable PointNet++ to extract the feature from the point cloud.
+ + This checkpoint is only for a clean point cloud without any noise.
+ + Point cloud contains less ambiguity and usually yields the best conditional generation results compared to other modalities.
+ """
+ )
+ def get_Chinese_note(self):
+ return gr.Markdown(
+ """
+ **点云条件生成介绍:**
+
+ + HoLa-BRep接受.ply 格式的点云输入,且坐标值应该归一化到-1~+1并带有法向信息。
+ + HoLa-BRep接受稀疏或密集点云,网络处理点云时会将其降采样到2048 个点。
+ + 经过测试时增强后点云条件生成的有效性可达98%以上。
+ + 我们使用一个小型可训练的 PointNet++ 从点云中提取特征。
+ + 目前开放权重仅支持没有任何噪声的点云。
+ + 三维点云作为条件输入具有更少的歧义性,与其他条件相比通常能产生最佳的生成结果。
+ """
+ )
+
+ def get_input_components(self):
+ return [
+ gr.File(
+ label='PC',
+ file_count='single',
+ ),
+ ]
+
+
+class SketchLayout(AppLayout):
+
+ def get_English_note(self):
+ return gr.Markdown(
+ """
+ **Note:**
+
+ + The input sketch is in 1:1 ratio and on a white background, it will be further downsampled to 224*224 before feeding into the network.
+ + The input sketch should be a perspective projection rather than an orthogonal projection.
+ + We use a frozen DINOv2 to extract the feature from the sketch image.
+ + We obtained the training sketches using wireframe rendering in OpenCascade.
+
+
+
+ """
+ )
+
+ def get_Chinese_note(self):
+ return gr.Markdown(
+ """
+ **线框图条件生成介绍:**
+
+ + 输入线框图的长宽比应为1:1,背景为白色,系统处理时会降采样到224*224分辨率。
+ + 输入的线框图应该是透视投影,而不是正交投影。
+ + 我们使用冻结的 DINOv2 从线框图图像中提取特征。
+ + 我们使用 OpenCascade 中的线框渲染来获取训练线框图。
+
+
+
+ """
+ )
+
+ def get_input_components(self) -> List[gr.Component]:
+ return [
+ gr.Image(
+ label='Sketch',
+ type='filepath',
+ sources=["upload"],
+ interactive=True,
+ )
+ ]
+
+
+class SVRLayout(AppLayout):
+
+ def get_English_note(self):
+ return gr.Markdown(
+ """
+ **Note:**
+
+ + The input image is in 1:1 ratio and on a white background, it will be further downsampled to 224*224 before feeding into the network.
+ + Keep the object in grey for better generation results.
+ + We use a frozen DINOv2 to extract the feature from the sketch image.
+ + We obtained the training images using solid rendering in OpenCascade.
+
+
+
+ """
+ )
+
+ def get_Chinese_note(self):
+ return gr.Markdown(
+ """
+ **单视角图片条件生成介绍:**
+
+ + 输入图片的长宽比应为1:1,背景为白色,系统处理时会降采样到224*224分辨率。
+ + 为了获得更好的生成效果,请将对象保持为灰色。
+ + 我们使用冻结的 DINOv2 从草图图像中提取特征。
+ + 我们使用 OpenCascade 中的实体渲染来获取训练图像。
+
+
+
+ """
+ )
+
+ def get_input_components(self) -> List[gr.Component]:
+ return [
+ gr.Image(
+ label='Image',
+ type='filepath',
+ sources=["upload"],
+ interactive=True,
+ ),
+ ]
+
+
+class MVRLayout(AppLayout):
+
+ def get_English_note(self):
+ return gr.Markdown(
+ """
+ **Note:**
+
+ + Similar to the single-view condition, the input image should be in 1:1 ratio and 4 fixed angles, **see the camera pose schematic**.
+ + Image features are extracted by a frozen DINOv2 and averaged after adding the positional encoding on the camera **pose** embedding.
+ """
+ )
+
+ def get_Chinese_note(self):
+ return gr.Markdown(
+ """
+ **多视角图片条件生成介绍:**
+
+ + 与单视角条件类似,输入图像应为 1:1长宽比和4 个固定角度,**见相机位姿示意图**。
+ + 图像特征由冻结的 DINOv2 提取,并在对相机**位姿**特征进行位置编码后取平均值。
+
+ """
+ )
+
+ def get_input_components(self) -> List[gr.Component]:
+ return [
+ gr.Image(
+ label='View1',
+ type='filepath',
+ interactive=True,
+ sources=["upload"]
+ ),
+ gr.Image(
+ label='View2',
+ type='filepath',
+ interactive=True,
+ sources=["upload"]
+
+ ),
+ gr.Image(
+ label='View3',
+ type='filepath',
+ interactive=True,
+ sources=["upload"]
+
+ ),
+
+ gr.Image(
+ label='View4',
+ type='filepath',
+ interactive=True,
+ sources=["upload"]
+
+ ),
+ ]
diff --git a/app/DataProcessor/DataProcessor.py b/app/DataProcessor/DataProcessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2d8976323d45be224043deb2db7653c25ebca0b
--- /dev/null
+++ b/app/DataProcessor/DataProcessor.py
@@ -0,0 +1,25 @@
+import torch
+import numpy as np
+import open3d as o3d
+import torchvision.transforms as T
+from PIL import Image
+from pathlib import Path
+from abc import abstractmethod, ABC
+
+class DataProcessor(ABC):
+ NUM_PROPOSALS = 16
+
+ def __init__(self, device=None):
+ if device is None:
+ self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ else:
+ self._device = device
+
+ def process(self, input_data):
+ data = dict()
+ data["conditions"] = self.process_input_data(input_data)
+ return data
+
+ @abstractmethod
+ def process_input_data(self, input_data):
+ pass
diff --git a/app/DataProcessor/ImageProcessor.py b/app/DataProcessor/ImageProcessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3455e2d9ae3f7acf754255e2597fec8362e39a9
--- /dev/null
+++ b/app/DataProcessor/ImageProcessor.py
@@ -0,0 +1,23 @@
+import torch
+import numpy as np
+import torchvision.transforms as T
+
+from pathlib import Path
+from PIL import Image
+from pathlib import Path
+from app.DataProcessor.DataProcessor import DataProcessor
+
+class ImageProcessor(DataProcessor):
+ def _get_img_tensor(self, image_file: Path) -> torch.Tensor:
+ """
+ Return a (3, 224, 224) shape tensor
+ """
+ transform = T.Compose([
+ T.ToPILImage(),
+ T.Resize((224, 224)),
+ T.ToTensor(),
+ T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
+ ])
+ img = np.array(Image.open(Path(image_file)).convert("RGB"))
+ img = transform(img).to(self._device)
+ return img
\ No newline at end of file
diff --git a/app/DataProcessor/MultiImageProcessor.py b/app/DataProcessor/MultiImageProcessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1fdf7e64e56f4c4792ab1f042998cc9b3f0ba20
--- /dev/null
+++ b/app/DataProcessor/MultiImageProcessor.py
@@ -0,0 +1,23 @@
+import torch
+import numpy as np
+import torchvision.transforms as T
+
+from pathlib import Path
+from typing import Tuple
+from app.DataProcessor.ImageProcessor import ImageProcessor
+
+class MultiImageProcessor(ImageProcessor):
+ def process_input_data(self, image_files: Tuple[str]):
+ multi_imgs = None
+ for one_imgage in image_files:
+ single_img = self._get_img_tensor(Path(one_imgage))[None, None, ...]
+ if multi_imgs is None:
+ multi_imgs = single_img
+ else:
+ multi_imgs = torch.cat((multi_imgs, single_img), axis=1)
+ multi_imgs = multi_imgs.repeat(self.NUM_PROPOSALS, 1, 1, 1, 1)
+ img_id = torch.tensor([list(range(len(image_files)))], device=self._device).repeat(self.NUM_PROPOSALS, 1)
+ return {
+ "imgs" : multi_imgs,
+ "img_id" : img_id
+ }
\ No newline at end of file
diff --git a/app/DataProcessor/PointCloudProcessor.py b/app/DataProcessor/PointCloudProcessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..343cfc1fd976cccbf629e68b8d7e3411b4c31fa3
--- /dev/null
+++ b/app/DataProcessor/PointCloudProcessor.py
@@ -0,0 +1,44 @@
+import torch
+import numpy as np
+import open3d as o3d
+from pathlib import Path
+from app.DataProcessor.DataProcessor import DataProcessor
+
+'''
+Raw Data should be a Pathlike or str path, accept file path only
+'''
+class PointCloudProcessor(DataProcessor):
+ PC_DOWNSAMPLE_NUM = 4096
+ def process_input_data(self, pc_file_path):
+ points_tensor = self._get_point_cloud_tensor(Path(pc_file_path[0]))
+ return {"points" : points_tensor[None, None, :, :].repeat(self.NUM_PROPOSALS, 1, 1, 1)}
+
+ def _get_point_cloud_tensor(self, input_file: Path | str) -> torch.Tensor:
+ # Read point cloud
+ pcd = o3d.io.read_point_cloud(input_file)
+ points = np.array(pcd.points)
+
+ # Check normals
+ if pcd.has_normals():
+ normals = np.array(pcd.normals)
+ else:
+ normals = np.zeros_like(points)
+
+ # Concatenate points and normals
+ points = np.concatenate([self._normalize_points(points), normals], axis=1)
+
+ # Downsample
+ index = np.random.choice(points.shape[0], self.PC_DOWNSAMPLE_NUM, replace=False)
+ points = points[index]
+
+ return torch.tensor(points, dtype=torch.float32).to(self._device)
+
+ def _normalize_points(self, points):
+ bbox_min = np.min(points, axis=0)
+ bbox_max = np.max(points, axis=0)
+ center = (bbox_min + bbox_max) / 2
+ points -= center
+ scale = np.max(bbox_max - bbox_min)
+ points /= scale
+ points *= 0.9 * 2
+ return points
\ No newline at end of file
diff --git a/app/DataProcessor/SingleImageProcessor.py b/app/DataProcessor/SingleImageProcessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4dc1b499202a691b049603eb71ac01181802a6c
--- /dev/null
+++ b/app/DataProcessor/SingleImageProcessor.py
@@ -0,0 +1,14 @@
+import torch
+
+from pathlib import Path
+from app.DataProcessor.ImageProcessor import ImageProcessor
+
+class SingleImageProcessor(ImageProcessor):
+ def process_input_data(self, image_file : Path | str):
+ img = self._get_img_tensor(Path(image_file[0]))
+ img = img[None, None, :].repeat(self.NUM_PROPOSALS, 1, 1, 1, 1)
+ img_id = torch.tensor([[0]], device=self._device).repeat(self.NUM_PROPOSALS, 1)
+ return {
+ "imgs" : img,
+ "img_id" : img_id
+ }
\ No newline at end of file
diff --git a/app/DataProcessor/TxtProcessor.py b/app/DataProcessor/TxtProcessor.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b8f61012b09d253c38cb13412176335b0f900e7
--- /dev/null
+++ b/app/DataProcessor/TxtProcessor.py
@@ -0,0 +1,5 @@
+from app.DataProcessor.DataProcessor import DataProcessor
+
+class TextProcessor(DataProcessor):
+ def process_input_data(self, text: str):
+ return { "txt" : [text[0]] * self.NUM_PROPOSALS}
\ No newline at end of file
diff --git a/app/DataProcessor/__init__.py b/app/DataProcessor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..be764bbb8b16e56f6cedc808c7499a687dcee887
--- /dev/null
+++ b/app/DataProcessor/__init__.py
@@ -0,0 +1,14 @@
+from .DataProcessor import DataProcessor
+from .ImageProcessor import ImageProcessor
+from .MultiImageProcessor import MultiImageProcessor
+from .PointCloudProcessor import PointCloudProcessor
+from .SingleImageProcessor import SingleImageProcessor
+from .TxtProcessor import TextProcessor
+
+# __init__.py
+
+# This package contains modules for processing data in the HoLa-Brep-Space application.
+# Import necessary classes or functions here for easier access.
+
+
+__all__ = ["DataProcessor", "ImageProcessor", "MultiImageProcessor", "PointCloudProcessor", "SingleImageProcessor", "TextProcessor"]
\ No newline at end of file
diff --git a/app/GeneratingMethod/ConditionedGenerating.py b/app/GeneratingMethod/ConditionedGenerating.py
new file mode 100644
index 0000000000000000000000000000000000000000..95aa31eaea923ff5905ca14e95439a1a30c4a848
--- /dev/null
+++ b/app/GeneratingMethod/ConditionedGenerating.py
@@ -0,0 +1,246 @@
+import os
+import shutil
+import uuid
+import torch
+import gradio as gr
+import numpy as np
+import ray
+import time
+
+from pathlib import Path
+
+from diffusion.utils import export_edges
+from construct_brep import construct_brep_from_datanpz
+from app.DataProcessor import DataProcessor
+from app.ModelDirector import ModelDirector
+
+_EDGE_FILE = 0
+_SOLID_FILE = 1
+_STEP_FILE = 2
+
+class ConditionedGeneratingMethod():
+ def __init__(
+ self,
+ model_building_director: ModelDirector,
+ dataprocessor: DataProcessor,
+ model_num_to_return: int,
+ model_seed: int = 0,
+ output_main_dir: Path | str = Path('./outputs')
+ ):
+ self.director = model_building_director
+ self.dataprocessor = dataprocessor
+ self.model_num_to_return = model_num_to_return
+ self.model_seed = model_seed
+ self.output_main_dir = output_main_dir
+
+ def generate(self):
+ def generating_method(browser_state: dict, *inputs):
+ try:
+ # Some checks
+ assert len(inputs) > 0
+ self._user_state_check(browser_state)
+ self._empty_input_check(inputs)
+
+ # Inference device(also shouldn't appear here)
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ # Process user input data
+ tensor_data = self.dataprocessor.process(inputs)
+
+ # Basic configuration of a model
+ self.director.config_setup()
+ model_builder = self.director.buider
+
+ # Should be refactored in the future since picking an output folder is not the responsibility of a model
+ diffusion_output_dir = self._get_diffusion_output_dir(browser_state, self.director.get_generating_condition())
+ postprocess_output_dir = self._get_postprocess_output_dir(browser_state, self.director.get_generating_condition())
+
+ model_builder.setup_output_dir(diffusion_output_dir)
+ model_builder.setup_seed(self.model_seed)
+
+ model_builder.make_model(device)
+ model = model_builder.model
+
+ #############
+ # Inference #
+ #############
+ gr.Info("Start diffusing", title="Runtime Info")
+ with torch.no_grad():
+ pred_results = model.inference(self.dataprocessor.NUM_PROPOSALS, device, v_data=tensor_data, v_log=True)
+
+ # Save intermediate files for post-processing
+ for i, result in enumerate(pred_results):
+ diffusion_output_subdir = diffusion_output_dir / f"00_{i:02d}"
+ diffusion_output_subdir.mkdir(parents=True, exist_ok=True)
+
+ export_edges(result["pred_edge"], (diffusion_output_subdir / "edge.obj").as_posix())
+
+ np.savez_compressed(
+ file = (diffusion_output_subdir / "data.npz").as_posix(),
+ pred_face_adj_prob = result["pred_face_adj_prob"],
+ pred_face_adj = result["pred_face_adj"].cpu().numpy(),
+ pred_face = result["pred_face"],
+ pred_edge = result["pred_edge"],
+ pred_edge_face_connectivity = result["pred_edge_face_connectivity"],
+ )
+ gr.Info("Finished diffusing", title="Runtime Info")
+
+ ###################
+ # Post-Processing #
+ ###################
+ # Multi-thread preparation
+ gr.Info("Start post-processing!", title="Runtime Info")
+ if not ray.is_initialized():
+ ray.init(
+ num_cpus=2,
+ )
+
+ construct_brep_from_datanpz_ray = ray.remote(num_cpus=1, max_retries=0)(construct_brep_from_datanpz)
+ diffusion_results = sorted(os.listdir(diffusion_output_dir))
+
+ tasks = [
+ construct_brep_from_datanpz_ray.remote(
+ data_root=diffusion_output_dir,
+ out_root=postprocess_output_dir,
+ folder_name=model_number,
+ v_drop_num=1,
+ use_cuda=False,
+ from_scratch=True,
+ is_log=False,
+ is_ray=True,
+ is_optimize_geom=True,
+ isdebug=False,
+ is_save_data=True
+ )
+ for model_number in diffusion_results
+ ]
+
+ results = []
+ success_count = 0
+ while tasks and success_count < self.model_num_to_return:
+ done_ids, tasks = ray.wait(tasks, num_returns=1, timeout=30)
+ for done_id in done_ids:
+ try:
+ result = ray.get(done_id)
+ results.append(result)
+
+ # Delay just a bit to ensure file handles are released
+ time.sleep(0.2)
+
+ # Check for 'success.txt' in output folders
+ for done_folder in postprocess_output_dir.iterdir():
+ output_files = os.listdir(done_folder)
+ if 'success.txt' in output_files:
+ success_count += 1
+
+ except Exception as e:
+ print(f"Task failed or timed out: {e}")
+ results.append(None)
+
+ if success_count >= self.model_num_to_return:
+ # Make sure the files are written successfully
+ time.sleep(5.0)
+ break
+ time.sleep(5.0)
+ gr.Info("Finished post-processing!", title="Runtime Info")
+ # Get valid model serial numbers
+ valid_models = self._get_valid_models(postprocess_output_dir)
+
+ #####################
+ # Update User State #
+ #####################
+ browser_state = self._update_user_state(browser_state, postprocess_output_dir, valid_models)
+
+ # Check if there's no valid output
+ self._postprocess_output_check(valid_models)
+
+
+ # Multi-thread processing may return valid models more than 4
+ gr.Info(f"{len(valid_models) if len(valid_models) < 4 else 4} valid models generated!", title="Finish generating")
+ condition = self.director.get_generating_condition()
+
+ # Return the first model as the default demonstration
+ edge_file = browser_state[condition][0][_EDGE_FILE]
+ solid_file = browser_state[condition][0][_SOLID_FILE]
+ step_file = browser_state[condition][0][_STEP_FILE]
+
+ return browser_state, edge_file, solid_file, step_file, browser_state[condition][0]
+
+ except EmptyInputException as input_e:
+ gr.Warning(str(input_e), title="Empty Input")
+
+ except GeneraingException as generating_e:
+ gr.Warning(str(generating_e), title="No Valid Generation")
+
+ except UnicodeEncodeError as uni_error:
+ gr.Warning("We sincerely apologize, but we currently only support English.", title="English Support Only")
+
+ except FileNotFoundError as file_e:
+ gr.Warning("The operation is too frequent!", title="Frequent Operation")
+
+ except Exception as e:
+ print(e)
+ gr.Warning("Something bad happened. Please try some other models", title="Unknown Error")
+
+ return browser_state, gr.update(), gr.update(), gr.update(), gr.update()
+
+ return generating_method
+
+ def _update_user_state(self, browser_state, postprocess_output_dir, valid_model):
+ # Unstable. May be refactored in the future
+ condition = self.director.get_generating_condition()
+ browser_state[condition] = list()
+ for i, model_number in enumerate(valid_model):
+ if (postprocess_output_dir / model_number / 'debug_face_loop' / 'optimized_edge.obj').exists():
+ edge = (postprocess_output_dir / model_number / 'debug_face_loop' / 'optimized_edge.obj').as_posix()
+ else:
+ edge = (postprocess_output_dir / model_number / 'debug_face_loop' / 'edge.obj').as_posix() # Hard coding is not good.
+ solid = (postprocess_output_dir / model_number / 'recon_brep.stl').as_posix()
+ step = (postprocess_output_dir / model_number / 'recon_brep.step').as_posix()
+ browser_state[condition].append([edge, solid, step])
+ return browser_state
+
+ def _postprocess_output_check(self, valid_model):
+ if len(valid_model) <= 0:
+ raise GeneraingException("No Valid Model Generated!")
+
+ def _empty_input_check(self, inputs):
+ for input_component in inputs:
+ if input_component is None:
+ raise EmptyInputException("Empty input exists!")
+
+ def _user_state_check(self, state_dict):
+ if state_dict['user_id'] is None:
+ state_dict['user_id'] = uuid.uuid4()
+ if state_dict['user_output_dir'] is None:
+ state_dict['user_output_dir'] = Path(self.output_main_dir) / f"user_{state_dict['user_id']}"
+ os.makedirs(state_dict['user_output_dir'], exist_ok=True)
+
+ def _get_valid_models(self, postprocess_output: Path):
+ # Get valid **model number** after post-processing
+ output_folders = [model_folder for model_folder in os.listdir(postprocess_output) if 'success.txt' in os.listdir(postprocess_output / model_folder)]
+ return output_folders
+
+ def _get_diffusion_output_dir(self, state_dict, condition):
+ # Create and clean the diffusion output directory
+ diffusion_output_dir = Path(state_dict['user_output_dir']) / condition
+ os.makedirs(diffusion_output_dir, exist_ok=True)
+ if len(os.listdir(diffusion_output_dir)) > 0:
+ shutil.rmtree(diffusion_output_dir)
+ return diffusion_output_dir
+
+ def _get_postprocess_output_dir(self, state_dict, condition):
+ # Create and clean the post-process output directory
+ postprocess_output_dir = Path(state_dict['user_output_dir']) / f'{condition}_post'
+ os.makedirs(postprocess_output_dir, exist_ok=True)
+ if len(os.listdir(postprocess_output_dir)) > 0:
+ shutil.rmtree(postprocess_output_dir)
+ return postprocess_output_dir
+
+class GeneraingException(Exception):
+ """Custom exception if generating failed."""
+ pass
+
+class EmptyInputException(Exception):
+ """Custom exception if the input is empty."""
+ pass
diff --git a/app/GeneratingMethod/UnconditionedGenerating.py b/app/GeneratingMethod/UnconditionedGenerating.py
new file mode 100644
index 0000000000000000000000000000000000000000..8362b1f33725666170c05c11d17e92f5cf6f817d
--- /dev/null
+++ b/app/GeneratingMethod/UnconditionedGenerating.py
@@ -0,0 +1,138 @@
+import os
+import shutil
+import subprocess
+import uuid
+import gradio as gr
+
+from pathlib import Path
+from typing import Tuple
+
+from app.inference import inference_batch_postprocess
+
+# Should be refactored in the future
+class UncondGeneratingMethod():
+ def __init__(self, output_main_dir=Path('./outputs')):
+ self.output_main_dir = output_main_dir
+
+ def generate(self):
+ def generate_uncond(seed, state: gr.BrowserState):
+ try:
+ state = check_user_output_dir(state, self.output_main_dir)
+
+ generate_output = Path(state['user_output_dir']) / 'unconditional'
+ os.makedirs(generate_output, exist_ok=True)
+ if len(os.listdir(generate_output)) > 0:
+ shutil.rmtree(generate_output)
+ os.makedirs(generate_output, exist_ok=True)
+
+ # Get the generated model
+ command = [
+ "python", "-m", "diffusion.train_diffusion",
+ "trainer.evaluate=true",
+ "trainer.batch_size=1000",
+ "trainer.gpu=1",
+ f"trainer.test_output_dir={generate_output.as_posix()}",
+ "trainer.resume_from_checkpoint=YuXingyao/HoLa-Brep/Diffusion_uncond_1100k.ckpt",
+ "trainer.num_worker=1",
+ "trainer.accelerator=\"32-true\"",
+ "trainer.exp_name=test",
+ "dataset.name=Dummy_dataset",
+ "dataset.length=32",
+ "dataset.num_max_faces=30",
+ "dataset.condition=None",
+ f"dataset.random_seed={seed}",
+ "model.name=Diffusion_condition",
+ "model.autoencoder_weights=YuXingyao/HoLa-Brep/AE_deepcad_1100k.ckpt",
+ "model.autoencoder=AutoEncoder_1119_light",
+ "model.with_intersection=true",
+ "model.in_channels=6",
+ "model.dim_shape=768",
+ "model.dim_latent=8",
+ "model.gaussian_weights=1e-6",
+ "model.pad_method=random",
+ "model.diffusion_latent=768",
+ "model.diffusion_type=epsilon",
+ "model.gaussian_weights=1e-6",
+ "model.condition=None",
+ "model.num_max_faces=30",
+ "model.beta_schedule=linear",
+ "model.addition_tag=false",
+ "model.name=Diffusion_condition"
+ ]
+ env = os.environ.copy()
+ env["CUDA_VISIBLE_DEVICES"] = "0"
+
+ gr.Info("Start diffusing", title="Runtime Info")
+ subprocess.run(command, check=True, env=env)
+ gr.Info("Finished diffusing", title="Runtime Info")
+
+ # Post-process the generated model
+ postprocess_output = Path(state['user_output_dir']) / 'unconditional_post'
+ os.makedirs(postprocess_output, exist_ok=True)
+ if len(os.listdir(postprocess_output)) > 0:
+ shutil.rmtree(postprocess_output)
+ os.makedirs(postprocess_output, exist_ok=True)
+
+ gr.Info("Start post-processing.", title="Runtime Info")
+ inference_batch_postprocess(
+ file_dir=generate_output.as_posix(),
+ output_dir=postprocess_output.as_posix(),
+ num_cpus=2,
+ drop_num=0
+ )
+ gr.Info("Finished post-processing!", title="Runtime Info")
+ valid_models = get_valid_models(postprocess_output)
+
+ # Should have valid outputs
+ if len(valid_models) <= 0:
+ raise UncondGeneraingException("No Valid Model Generated!")
+
+ # Update the user state
+ state["uncond"] = list()
+ for i, model_number in enumerate(valid_models):
+ if (postprocess_output / model_number / 'debug_face_loop' / 'optimized_edge.obj').exists():
+ edge = (postprocess_output / model_number / 'debug_face_loop' / 'optimized_edge.obj').as_posix()
+ else:
+ edge = (postprocess_output / model_number / 'debug_face_loop' / 'edge.obj').as_posix() # Hard coding is not good.
+ solid = (postprocess_output / model_number / 'recon_brep.stl').as_posix()
+ step = (postprocess_output / model_number / 'recon_brep.step').as_posix()
+ state["uncond"].append([edge, solid, step])
+
+ gr.Info(f"{len(valid_models) if len(valid_models) < 4 else 4} valid models generated!", title="Finish generating")
+
+ edge_file = state["uncond"][0][0]
+ solid_file = state["uncond"][0][1]
+ step_file = state["uncond"][0][2]
+ return edge_file, solid_file, step_file, state["uncond"][0], state
+ except UncondEmptyInputException as input_e:
+ gr.Warning(str(input_e), title="Empty Input")
+
+ except UncondGeneraingException as generating_e:
+ gr.Warning(str(generating_e), title="No Valid Generation")
+
+ except Exception as e:
+ print(e)
+ gr.Warning("Something bad happened. Please try some other models", title="Unknown Error")
+ return gr.update(), gr.update(), gr.update(), gr.update(), state
+
+ return generate_uncond
+
+def get_valid_models(postprocess_output: Path) -> Tuple[Path, Path, Path]:
+ output_folders = [model_folder for model_folder in os.listdir(postprocess_output) if 'success.txt' in os.listdir(postprocess_output / model_folder)]
+ return output_folders
+
+def check_user_output_dir(state: dict, output_dir):
+ if state['user_id'] is None:
+ state['user_id'] = uuid.uuid4()
+ if state['user_output_dir'] is None:
+ state['user_output_dir'] = Path(output_dir) / f"user_{state['user_id']}"
+ os.makedirs(state['user_output_dir'], exist_ok=True)
+ return state
+
+class UncondGeneraingException(Exception):
+ """Custom exception if generating failed."""
+ pass
+
+class UncondEmptyInputException(Exception):
+ """Custom exception if the input is empty."""
+ pass
\ No newline at end of file
diff --git a/app/GeneratingMethod/__init__.py b/app/GeneratingMethod/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb483eeea5bb5c4fafcd82834521105b82794dfc
--- /dev/null
+++ b/app/GeneratingMethod/__init__.py
@@ -0,0 +1,4 @@
+from .ConditionedGenerating import ConditionedGeneratingMethod
+from .UnconditionedGenerating import UncondGeneratingMethod
+
+__all__ = ["ConditionedGeneratingMethod", "UncondGeneratingMethod"]
\ No newline at end of file
diff --git a/app/ModelBuilder.py b/app/ModelBuilder.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee0b458c8beee060a12bc0f478aebeb3e0db8c14
--- /dev/null
+++ b/app/ModelBuilder.py
@@ -0,0 +1,126 @@
+import torch
+from pathlib import Path
+from typing import Optional
+from lightning_fabric import seed_everything
+from huggingface_hub import hf_hub_download
+
+from diffusion.diffusion_model import Diffusion_condition
+
+
+'''
+Steps to make a model:
+1. Set up the model structure depending on the modal
+2. Set up AutoEncoder weights
+3. Set up Diffusor weights
+*. Set up the condition flag (Should be deleted in the future)
+4. Pick a random seed
+**. Designate the output folder (Also should be deleted in the future, this is not the responsibility of a model!)
+'''
+class ModelBuilder():
+ NUM_PROPOSALS = 32
+ def __init__(self):
+ self.reset()
+
+ def set_up_model_template(self, model_class: Diffusion_condition):
+ # This shouldn't exist due to the Diffusion_condition's inheritence
+ self._model_class = model_class
+
+ # Theoretically, this function should be the true Builder API
+ # def set_up_modal(self, modal: Diffusion_condition):
+ # # Set up the modal for the model(pc, txt, sketch, svr, mvr)
+ # self._model_instance = modal
+
+ def setup_autoencoder_weights(self, weights_path: Path | str):
+ self._config["autoencoder_weights"] = weights_path
+
+ def setup_diffusion_weights(self, weights_path: Path | str):
+ self._config["diffusion_weights"] = weights_path
+
+ def setup_condition(self, condition: str):
+ self._config["condition"] = [condition]
+
+ def setup_seed(self, seed: Optional[int] = None):
+ if seed is not None:
+ seed_everything(seed)
+ else:
+ seed_everything(0)
+
+ def setup_output_dir(self, output_dir: Path | str):
+ self._config["output_dir"] = output_dir
+
+ def make_model(self, device: Optional[torch.device] = None):
+ # Torch condition
+ torch.backends.cudnn.benchmark = False
+ torch.set_float32_matmul_precision("medium")
+
+ # Device
+ if device is None:
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ # Set up modal for the model
+ # (Need to be refactored in the future)
+ self._model_instance = self._model_class(self._config)
+
+ # Load diffusion weights
+ repo_id = Path(self._config["diffusion_weights"]).parent.as_posix()
+ model_name = Path(self._config["diffusion_weights"]).name
+ model_weights = hf_hub_download(repo_id=repo_id, filename=model_name)
+ diffusion_weights = torch.load(model_weights, map_location=device, weights_only=False)["state_dict"]
+ diffusion_weights = {k: v for k, v in diffusion_weights.items() if "ae_model" not in k}
+ diffusion_weights = {k[6:]: v for k, v in diffusion_weights.items() if "model" in k}
+
+ # Load Autoencoder weights
+ AE_repo_id = Path(self._config["autoencoder_weights"]).parent.as_posix()
+ AE_model_name = Path(self._config["autoencoder_weights"]).name
+ AE_model_weights = hf_hub_download(repo_id=AE_repo_id, filename=AE_model_name)
+ autoencoder_weights = torch.load(AE_model_weights, map_location=device, weights_only=False)["state_dict"]
+ autoencoder_weights = {k[6:]: v for k, v in autoencoder_weights.items() if "model" in k}
+ autoencoder_weights = {"ae_model."+k: v for k, v in autoencoder_weights.items()}
+
+ # Combine ae with diffusor
+ diffusion_weights.update(autoencoder_weights)
+ diffusion_weights = {k: v for k, v in diffusion_weights.items() if "camera_embedding" not in k}
+
+ self._model_instance.load_state_dict(diffusion_weights, strict=False)
+ self._model_instance.to(device)
+ self._model_instance.eval()
+
+ return self._model_instance
+
+ def reset(self):
+ self._model_class = Diffusion_condition # This shouldn't exist. See set_up_model_template()
+ self._model_instance = None
+ # Basic model config()
+ self._config = {
+ "name": "Diffusion_condition",
+ "train_decoder": False,
+ "stored_z": False,
+ "use_mean": True,
+ "diffusion_latent": 768,
+ "diffusion_type": "epsilon",
+ "loss": "l2",
+ "pad_method": "random",
+ "num_max_faces": 30,
+ "beta_schedule": "squaredcos_cap_v2",
+ "beta_start": 0.0001,
+ "beta_end": 0.02,
+ "variance_type": "fixed_small",
+ "addition_tag": False,
+ "autoencoder": "AutoEncoder_1119_light",
+ "with_intersection": True,
+ "dim_latent": 8,
+ "dim_shape": 768,
+ "sigmoid": False,
+ "in_channels": 6,
+ "gaussian_weights": 1e-6,
+ "norm": "layer",
+ "autoencoder_weights": "",
+ "is_aug": False,
+ "condition": [],
+ "cond_prob": []
+ }
+
+ @property
+ def model(self):
+ model = self._model_instance
+ return model
\ No newline at end of file
diff --git a/app/ModelDirector/MVRDirector.py b/app/ModelDirector/MVRDirector.py
new file mode 100644
index 0000000000000000000000000000000000000000..abdc857129bdedd41c2f5e70c6d3d22c6b217d2d
--- /dev/null
+++ b/app/ModelDirector/MVRDirector.py
@@ -0,0 +1,18 @@
+from diffusion.diffusion_model import Diffusion_condition_mvr
+from app.ModelDirector import ModelDirector
+
+
+class MVRDirector(ModelDirector):
+ def get_ae_weights(self):
+ return 'YuXingyao/HoLa-Brep/AE_deepcad_1100k.ckpt'
+
+ def get_diffusion_weights(self):
+ return 'YuXingyao/HoLa-Brep/Diffusion_mvr_sq30_800k.ckpt'
+
+ def get_generating_condition(self):
+ return 'multi_img'
+ def config_setup(self):
+ # Bad smell, turly. Gonna refactor in the future... Hopefully...
+ super().config_setup()
+ self._builder.set_up_model_template(Diffusion_condition_mvr)
+
\ No newline at end of file
diff --git a/app/ModelDirector/ModelDirector.py b/app/ModelDirector/ModelDirector.py
new file mode 100644
index 0000000000000000000000000000000000000000..93e75c9192d07f1ea21dd78ee597cf0d96b99b3f
--- /dev/null
+++ b/app/ModelDirector/ModelDirector.py
@@ -0,0 +1,55 @@
+from typing import Optional, Callable
+from abc import ABC, abstractmethod
+from app import ModelBuilder
+
+'''
+Direct the ModelBuilder to build a model depending on the modal the user choose
+'''
+class ModelDirector(ABC):
+ def __init__(
+ self,
+ builder: ModelBuilder = None,
+ additional_setup_fn: Optional[Callable[['ModelBuilder'], None]] = None
+ ):
+ if builder is None:
+ self._builder = ModelBuilder()
+ else:
+ self._builder = builder
+ self._additional_setup_fn = additional_setup_fn
+ self._ae_weights = self.get_ae_weights()
+ self._diffusion_weights = self.get_diffusion_weights()
+ self._condition = self.get_generating_condition()
+
+ def config_setup(self):
+ self._builder.setup_autoencoder_weights(self._ae_weights)
+ self._builder.setup_diffusion_weights(self._diffusion_weights)
+
+ # User defined setup
+ if self._additional_setup_fn:
+ self._additional_setup_fn(self._builder)
+
+ self._builder.setup_condition(self._condition)
+
+ @property
+ def buider(self):
+ return self._builder
+
+ @abstractmethod
+ def get_ae_weights(self):
+ pass
+
+ @abstractmethod
+ def get_diffusion_weights(self):
+ pass
+
+ @abstractmethod
+ def get_generating_condition(self):
+ pass
+
+
+
+
+
+
+
+
diff --git a/app/ModelDirector/PointCloudDirector.py b/app/ModelDirector/PointCloudDirector.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8e441bb349612290a1f136c29d0495812a8fb2a
--- /dev/null
+++ b/app/ModelDirector/PointCloudDirector.py
@@ -0,0 +1,12 @@
+from app.ModelDirector import ModelDirector
+
+class PointCloudDirector(ModelDirector):
+ def get_ae_weights(self):
+ return 'YuXingyao/HoLa-Brep/AE_deepcad_1100k.ckpt'
+
+ def get_diffusion_weights(self):
+ return 'YuXingyao/HoLa-Brep/Diffusion_pc_sq30_1600k.ckpt'
+
+ def get_generating_condition(self):
+ return 'pc'
+
\ No newline at end of file
diff --git a/app/ModelDirector/SVRDirector.py b/app/ModelDirector/SVRDirector.py
new file mode 100644
index 0000000000000000000000000000000000000000..70bb530eba743184d28e4b84e35d7283e7b2cfbd
--- /dev/null
+++ b/app/ModelDirector/SVRDirector.py
@@ -0,0 +1,12 @@
+from app.ModelDirector import ModelDirector
+
+class SVRDirector(ModelDirector):
+ def get_ae_weights(self):
+ return 'YuXingyao/HoLa-Brep/AE_deepcad_1100k.ckpt'
+
+ def get_diffusion_weights(self):
+ return 'YuXingyao/HoLa-Brep/Diffusion_svr_sq30_1500k.ckpt'
+
+ def get_generating_condition(self):
+ return 'single_img'
+
\ No newline at end of file
diff --git a/app/ModelDirector/SketchDirector.py b/app/ModelDirector/SketchDirector.py
new file mode 100644
index 0000000000000000000000000000000000000000..169cd8ae44864bca960bc370f9aea790e5117c20
--- /dev/null
+++ b/app/ModelDirector/SketchDirector.py
@@ -0,0 +1,12 @@
+from app.ModelDirector import ModelDirector
+
+class SketchDirector(ModelDirector):
+ def get_ae_weights(self):
+ return 'YuXingyao/HoLa-Brep/AE_deepcad_1100k.ckpt'
+
+ def get_diffusion_weights(self):
+ return 'YuXingyao/HoLa-Brep/Diffusion_sketch_sq30_1500k.ckpt'
+
+ def get_generating_condition(self):
+ return 'sketch'
+
\ No newline at end of file
diff --git a/app/ModelDirector/TextDirector.py b/app/ModelDirector/TextDirector.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e97811b67a5efa68e84cf461380c64977b1ae66
--- /dev/null
+++ b/app/ModelDirector/TextDirector.py
@@ -0,0 +1,12 @@
+from app.ModelDirector import ModelDirector
+
+class TextDirector(ModelDirector):
+ def get_ae_weights(self):
+ return 'YuXingyao/HoLa-Brep/AE_deepcad_1100k.ckpt'
+
+ def get_diffusion_weights(self):
+ return 'YuXingyao/HoLa-Brep/Diffusion_txt_sq30_1000k.ckpt'
+
+ def get_generating_condition(self):
+ return 'txt'
+
\ No newline at end of file
diff --git a/app/ModelDirector/__init__.py b/app/ModelDirector/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..39e7c3103c8c036b59015d6d40b7e91e18aa3055
--- /dev/null
+++ b/app/ModelDirector/__init__.py
@@ -0,0 +1,8 @@
+from .ModelDirector import ModelDirector
+from .MVRDirector import MVRDirector
+from .PointCloudDirector import PointCloudDirector
+from .SketchDirector import SketchDirector
+from .SVRDirector import SVRDirector
+from .TextDirector import TextDirector
+
+__all__ = ["ModelDirector", "MVRDirector", "PointCloudDirector", "SketchDirector", "SVRDirector", "TextDirector"]
\ No newline at end of file
diff --git a/app/__init__.py b/app/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fa4cd7c2e09a344b663120f3bd50e2afb4851bb
--- /dev/null
+++ b/app/__init__.py
@@ -0,0 +1,3 @@
+from .ModelBuilder import ModelBuilder
+
+# Maybe I can add something to make this module better. BUT IT WORKS NOW.
\ No newline at end of file
diff --git a/app/inference.py b/app/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..b57117d0ebcf0c7d1eea1884058c2960ce58c319
--- /dev/null
+++ b/app/inference.py
@@ -0,0 +1,65 @@
+import os
+import ray
+import time
+from pathlib import Path
+
+from construct_brep import construct_brep_from_datanpz
+
+# This file still exists just because a UNSPEAKABLE evil class depends on it
+
+def inference_batch_postprocess(file_dir: Path ,output_dir: Path, num_cpus: int=4, drop_num: int=2, timeout: int=60):
+ print("Start post-processing")
+
+ if not ray.is_initialized():
+ ray.init(
+ num_cpus=num_cpus,
+ )
+
+ construct_brep_from_datanpz_ray = ray.remote(num_cpus=1, max_retries=0)(construct_brep_from_datanpz)
+
+ all_folders = sorted(os.listdir(file_dir))
+
+ tasks = [
+ construct_brep_from_datanpz_ray.remote(
+ data_root=file_dir,
+ out_root=output_dir,
+ folder_name=model_number,
+ v_drop_num=1,
+ use_cuda=False,
+ from_scratch=True,
+ is_log=False,
+ is_ray=True,
+ is_optimize_geom=True,
+ isdebug=False,
+ is_save_data=True
+ )
+ for model_number in all_folders
+ ]
+
+
+ results = []
+ success_count = 0
+ while tasks and success_count < 4:
+ done_ids, tasks = ray.wait(tasks, num_returns=1, timeout=60)
+ for done_id in done_ids:
+ try:
+ result = ray.get(done_id)
+ results.append(result)
+
+ # Delay just a bit to ensure file handles are released
+ time.sleep(0.2)
+ # Check for 'success.txt' in output folders
+ for done_folder in Path(output_dir).iterdir():
+ output_files = os.listdir(done_folder)
+ if 'success.txt' in output_files:
+ success_count += 1
+
+ except Exception as e:
+ print(f"Task failed or timed out: {e}")
+ results.append(None)
+ if success_count >= 4:
+ # Make sure the files are written successfully
+ time.sleep(5.0)
+ break
+ time.sleep(5.0)
+ print("Finished post-processing")
diff --git a/construct_brep.py b/construct_brep.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3fef040cff3c3dc7325421d85e7ed39dcab8bc1
--- /dev/null
+++ b/construct_brep.py
@@ -0,0 +1,431 @@
+import copy
+import itertools
+import math
+import os, sys, shutil, traceback
+from pathlib import Path
+
+import numpy as np
+import torch
+from OCC.Core import Message
+from OCC.Core.Geom import Geom_BSplineSurface
+from OCC.Core.IFSelect import IFSelect_ReturnStatus
+from OCC.Core.IGESControl import IGESControl_Writer
+from OCC.Core.Interface import Interface_Static
+from OCC.Core.Message import Message_PrinterOStream, Message_Alarm
+from OCC.Core.STEPControl import STEPControl_Writer, STEPControl_AsIs, STEPControl_ManifoldSolidBrep, \
+ STEPControl_FacetedBrep, STEPControl_ShellBasedSurfaceModel
+from OCC.Core.ShapeFix import ShapeFix_ShapeTolerance
+from OCC.Core.TopAbs import TopAbs_SHAPE
+from OCC.Core.TopoDS import TopoDS_Face
+from OCC.Extend.DataExchange import read_step_file
+
+from diffusion.utils import *
+
+import ray
+import argparse
+import trimesh
+
+import time
+
+
+def get_data(v_filename):
+ # specify the key to get the face points, edge points and edge_face_connectivity in data.npz
+ # data_npz = np.load(os.path.join(data_root, folder_name, 'data.npz'), allow_pickle=True)['arr_0'].item()
+ data_npz = np.load(v_filename, allow_pickle=True)
+ if 'sample_points_faces' in data_npz and 'edge_face_connectivity' in data_npz:
+ face_points = data_npz['sample_points_faces'] # Face sample points (num_faces*20*20*3)
+ edge_points = data_npz['sample_points_lines'] # Edge sample points (num_lines*20*3)
+ edge_face_connectivity = data_npz['edge_face_connectivity'] # (num_intersection, (id_edge, id_face1, id_face2))
+ elif 'pred_face' in data_npz and 'pred_edge_face_connectivity' in data_npz:
+ face_points = data_npz['pred_face']
+ edge_points = data_npz['pred_edge']
+ edge_face_connectivity = data_npz['pred_edge_face_connectivity']
+ elif 'pred_face' in data_npz and 'face_edge_adj' in data_npz:
+ face_points = data_npz['pred_face'].astype(np.float32)
+ edge_points = data_npz['pred_edge'].astype(np.float32)
+ face_edge_adj = data_npz['face_edge_adj']
+ edge_face_connectivity = []
+ N = face_points.shape[0]
+ for i in range(N):
+ for j in range(i + 1, N):
+ intersection = list(set(face_edge_adj[i]).intersection(set(face_edge_adj[j])))
+ if len(intersection) > 0:
+ edge_face_connectivity.append([intersection[0], i, j])
+ edge_face_connectivity = np.array(edge_face_connectivity)
+
+ else:
+ raise ValueError(f"Unknown data npz format {v_filename}")
+
+ face_points = face_points[..., :3]
+ edge_points = edge_points[..., :3]
+ shape = Shape(face_points, edge_points, edge_face_connectivity, False)
+ return shape
+
+
+def get_candidate_shapes(num_drop, v_faces, v_curves, v_conn):
+ if num_drop == 0:
+ new_faces = [item for item in v_faces]
+ new_curves = [item for item in v_curves]
+ new_edge_face_connectivity = [item for item in v_conn]
+ return [(new_faces, new_curves, new_edge_face_connectivity)]
+ num_faces = len(v_faces)
+ candidate_shapes = []
+ drop_ids = list(itertools.combinations(range(num_faces), num_drop))
+
+ for drop_id in drop_ids:
+ preserved_ids = np.array(list(set(range(num_faces)) - set(drop_id)))
+ prev_id_to_new_id = {prev_id: new_id for new_id, prev_id in enumerate(preserved_ids)}
+ new_faces = [v_faces[idx] for idx in preserved_ids]
+ new_curves = [item for item in v_curves]
+ new_edge_face_connectivity = []
+ for edge_id, face_id1, face_id2 in v_conn:
+ if face_id1 in preserved_ids and face_id2 in preserved_ids:
+ new_edge_face_connectivity.append([edge_id, prev_id_to_new_id[face_id1], prev_id_to_new_id[face_id2]])
+ candidate_shapes.append((new_faces, new_curves, new_edge_face_connectivity))
+ return candidate_shapes
+
+
+def construct_brep_from_datanpz(data_root, out_root, folder_name, v_drop_num=0,
+ is_ray=False, is_log=True,
+ is_optimize_geom=True, isdebug=False, use_cuda=False, from_scratch=True,
+ is_save_data=False):
+ disable_occ_log()
+ # is_log = False
+ # isdebug = False
+ time_records = [0, 0, 0, 0, 0, 0]
+ timer = time.time()
+ data_root = Path(data_root)
+ out_root = Path(out_root)
+ if from_scratch:
+ check_dir(out_root / folder_name)
+
+ # Check if it is already processed
+ if (out_root / folder_name / "success.txt").exists():
+ return time_records
+ safe_check_dir(out_root / folder_name)
+
+ debug_face_save_path = out_root / folder_name / "debug_face_loop"
+ if is_save_data:
+ safe_check_dir(debug_face_save_path)
+
+ if is_log:
+ print(
+ f"{Colors.GREEN}############################# Processing {folder_name} #############################{Colors.RESET}")
+
+ # Prepare the data
+ shape = get_data(os.path.join(data_root, folder_name, 'data.npz'))
+ if isdebug:
+ export_edges(shape.recon_edge_points, debug_face_save_path / 'edge_ori.obj')
+ shape.remove_half_edges()
+ shape.check_openness()
+ shape.build_fe()
+ shape.build_vertices(0.2)
+
+ if isdebug:
+ print(
+ f"{Colors.GREEN}Remove {len(shape.remove_edge_idx_src) + len(shape.remove_edge_idx_new)} edges{Colors.RESET}")
+
+ if is_save_data:
+ # export_point_cloud(os.path.join(debug_face_save_path, 'face.ply'), shape.recon_face_points.reshape(-1, 3))
+ updated_edge_points = np.delete(shape.recon_edge_points, shape.remove_edge_idx_new, axis=0)
+ export_edges(updated_edge_points, os.path.join(debug_face_save_path, 'edge.obj'))
+ # for face_idx in range(len(shape.face_edge_adj)):
+ # export_point_cloud(os.path.join(debug_face_save_path, f"face{face_idx}.ply"),
+ # shape.recon_face_points[face_idx].reshape(-1, 3))
+ # for edge_idx in shape.face_edge_adj[face_idx]:
+ # idx = np.where(shape.edge_face_connectivity[:, 0] == edge_idx)[0][0]
+ # adj_face = shape.edge_face_connectivity[idx][1:]
+ # export_point_cloud(
+ # os.path.join(debug_face_save_path, f"face{face_idx}_edge_idx{edge_idx}_face{adj_face}.ply"),
+ # shape.recon_edge_points[edge_idx].reshape(-1, 3),
+ # np.linspace([1, 0, 0], [0, 1, 0], shape.recon_edge_points[edge_idx].shape[0]))
+ # for edge_idx in range(len(shape.recon_edge_points)):
+ # if edge_idx in shape.remove_edge_idx_new:
+ # continue
+ # export_point_cloud(os.path.join(
+ # debug_face_save_path, f'edge{edge_idx}.ply'),
+ # shape.recon_edge_points[edge_idx].reshape(-1, 3),
+ # np.linspace([1, 0, 0], [0, 1, 0], shape.recon_edge_points[edge_idx].shape[0]))
+
+ # Optimize data
+ if is_optimize_geom:
+ interpolation_face = []
+ for item in shape.interpolation_face:
+ interpolation_face.append(item)
+
+ if not is_ray:
+ shape.recon_face_points, shape.recon_edge_points = optimize(
+ interpolation_face, shape.recon_edge_points, shape.recon_face_points,
+ shape.edge_face_connectivity, shape.is_end_point, shape.pair1,
+ shape.face_edge_adj, v_islog=isdebug, v_max_iter=50, use_cuda=use_cuda)
+ else:
+ shape.recon_face_points, shape.recon_edge_points = optimize(
+ shape.interpolation_face, shape.recon_edge_points, shape.recon_face_points,
+ shape.edge_face_connectivity, shape.is_end_point, shape.pair1,
+ shape.face_edge_adj, v_islog=False, v_max_iter=50, use_cuda=use_cuda)
+
+ if is_save_data:
+ updated_edge_points = np.delete(shape.recon_edge_points, shape.remove_edge_idx_new, axis=0)
+ export_edges(updated_edge_points, os.path.join(debug_face_save_path, 'optimized_edge.obj'))
+ # for face_idx in range(len(shape.face_edge_adj)):
+ # for edge_idx in shape.face_edge_adj[face_idx]:
+ # idx = np.where(shape.edge_face_connectivity[:, 0] == edge_idx)[0][0]
+ # adj_face = shape.edge_face_connectivity[idx][1:]
+ # export_point_cloud(
+ # os.path.join(debug_face_save_path,
+ # f"face{face_idx}_optim_edge_idx{edge_idx}_face{adj_face}.ply"),
+ # shape.recon_edge_points[edge_idx].reshape(-1, 3),
+ # np.linspace([1, 0, 0], [0, 1, 0], shape.recon_edge_points[edge_idx].shape[0]))
+ # export_point_cloud(debug_face_save_path / f'optim_face{face_idx}.ply',
+ # shape.recon_face_points[face_idx].reshape(-1, 3))
+ # for edge_idx in range(len(shape.recon_edge_points)):
+ # if edge_idx in shape.remove_edge_idx_new:
+ # continue
+ # export_point_cloud(
+ # os.path.join(debug_face_save_path, f'optim_edge{edge_idx}.ply'),
+ # shape.recon_edge_points[edge_idx].reshape(-1, 3),
+ # np.linspace([1, 0, 0], [0, 1, 0], shape.recon_edge_points[edge_idx].shape[0]))
+
+ ori_shape = copy.deepcopy(shape)
+
+ recon_geom_faces = [create_surface(points) for points in shape.recon_face_points]
+ recon_topo_faces = [
+ BRepBuilderAPI_MakeFace(geom_face, TRANSFER_PRECISION).Face() for geom_face in recon_geom_faces]
+ recon_geom_curves = [create_edge(points) for points in shape.recon_edge_points]
+ recon_topo_curves = [BRepBuilderAPI_MakeEdge(curve).Edge() for curve in recon_geom_curves]
+
+ shape.recon_geom_faces = [item for item in recon_geom_faces]
+ shape.recon_topo_faces = [item for item in recon_topo_faces]
+ shape.recon_geom_curves = [item for item in recon_geom_curves]
+ shape.recon_topo_curves = [item for item in recon_topo_curves]
+ shape.build_geom(is_replace_edge=True)
+ recon_topo_curves = [item for item in shape.recon_topo_curves]
+
+ # Write separate faces
+ v, f = get_separated_surface(shape.recon_topo_faces, v_precision1=0.1, v_precision2=0.2)
+ trimesh.Trimesh(vertices=v, faces=f).export(out_root / folder_name / "separate_faces.ply")
+
+ num_max_drop = min(v_drop_num, math.ceil(0.2 * len(ori_shape.recon_face_points)))
+ is_success = False
+
+ for num_drop in range(num_max_drop + 1):
+ candidate_shapes = get_candidate_shapes(num_drop, recon_geom_faces, recon_topo_curves, ori_shape.edge_face_connectivity)
+
+ for (faces, curves, connectivity) in candidate_shapes:
+ if len(faces) == 0:
+ if is_log:
+ print(f"{Colors.RED}No data in {folder_name}{Colors.RESET}")
+ # shutil.rmtree(os.path.join(out_root, folder_name))
+ continue
+
+ num_faces = len(faces)
+ face_edge_adj = [[] for _ in range(num_faces)]
+ for edge_face1_face2 in connectivity:
+ edge, face1, face2 = edge_face1_face2
+ if face1 == face2:
+ # raise ValueError("Face1 and Face2 should be different")
+ print("Face1 and Face2 should be different")
+ continue
+ assert edge not in face_edge_adj[face1]
+ face_edge_adj[face1].append(edge)
+ face_edge_adj[face2].append(edge)
+
+ # Construct trimmed surface
+ trimmed_faces = []
+ for i_face in range(num_faces):
+ if len(face_edge_adj[i_face]) == 0:
+ trimmed_faces.append(None)
+ continue
+ face_edge_idx = face_edge_adj[i_face]
+ geom_face = faces[i_face]
+ face_edges = [curves[edge_idx] for edge_idx in face_edge_idx]
+
+ # Build wire
+ trimmed_face = None
+ for threshold in CONNECT_TOLERANCE:
+ wire_list = create_wire_from_unordered_edges(face_edges, threshold)
+ if wire_list is None:
+ continue
+
+ trimmed_face = create_trimmed_face_from_wire(geom_face, face_edges, wire_list, threshold)
+ if trimmed_face is not None:
+ break
+
+ trimmed_faces.append(trimmed_face)
+
+ trimmed_faces = [face for face in trimmed_faces if face is not None]
+ if len(trimmed_faces) < 0.8 * num_faces:
+ continue
+
+ # Try construct solid from trimmed faces only
+ solid = None
+ if len(trimmed_faces) > 0.8 * num_faces:
+ for connected_tolerance in CONNECT_TOLERANCE:
+ if is_log:
+ print(f"Try construct solid with {connected_tolerance}")
+ solid = get_solid(trimmed_faces, connected_tolerance)
+ if solid is not None:
+ break
+
+ # Check solid
+ if solid is not None:
+ save_step_file(out_root / folder_name / 'recon_brep.step', solid)
+ if not check_step_valid_soild(str(out_root / folder_name / 'recon_brep.step')):
+ print("Inconsistent solid check in {}".format(folder_name))
+ os.remove(out_root / folder_name / 'recon_brep.step')
+ else:
+ write_stl_file(solid, str(out_root / folder_name / "recon_brep.stl"),
+ linear_deflection=0.1, angular_deflection=0.2)
+ open(out_root / folder_name / "success.txt", 'w').close()
+ is_success = True
+ break
+ if is_success:
+ break
+
+ # If solid is None, then try to obtain step file with all faces
+ if not is_success:
+ # Construct trimmed surface
+ num_faces = len(recon_topo_faces)
+ face_edge_adj = [[] for _ in range(num_faces)]
+ for edge_face1_face2 in ori_shape.edge_face_connectivity:
+ edge, face1, face2 = edge_face1_face2
+ if face1 == face2:
+ # raise ValueError("Face1 and Face2 should be different")
+ print("Face1 and Face2 should be different")
+ continue
+ assert edge not in face_edge_adj[face1]
+ face_edge_adj[face1].append(edge)
+ face_edge_adj[face2].append(edge)
+
+ trimmed_faces = []
+ for i_face in range(num_faces):
+ if len(face_edge_adj[i_face]) == 0:
+ trimmed_faces.append(None)
+ continue
+ face_edge_idx = face_edge_adj[i_face]
+ geom_face = recon_geom_faces[i_face]
+ face_edges = [recon_topo_curves[edge_idx] for edge_idx in face_edge_idx]
+
+ # Build wire
+ trimmed_face = None
+ for threshold in CONNECT_TOLERANCE:
+ wire_list = create_wire_from_unordered_edges(face_edges, threshold)
+ if wire_list is None:
+ continue
+
+ trimmed_face = create_trimmed_face_from_wire(geom_face, face_edges, wire_list, threshold)
+ if trimmed_face is not None:
+ break
+
+ trimmed_faces.append(trimmed_face)
+
+ mixed_faces = []
+ for i_face in range(num_faces):
+ if trimmed_faces[i_face] is None:
+ face = BRepBuilderAPI_MakeFace(recon_geom_faces[i_face], TRANSFER_PRECISION).Face()
+ mixed_faces.append(face)
+ else:
+ mixed_faces.append(trimmed_faces[i_face])
+
+ # trimmed_faces = [face for face in trimmed_faces if face is not None]
+ # if len(trimmed_faces) < 0.8 * num_faces:
+ # continue
+
+ compound = None
+ for connected_tolerance in CONNECT_TOLERANCE:
+ compound = get_compound(mixed_faces, connected_tolerance)
+ if compound is not None:
+ break
+
+ if compound is not None:
+ save_step_file(out_root / folder_name / 'recon_brep.step', compound)
+ else:
+ print(f"Failed to construct solid in {folder_name}")
+ return time_records
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Construct Brep From Data')
+ parser.add_argument('--data_root', type=str, required=True)
+ parser.add_argument('--list', type=str, default="")
+ parser.add_argument('--out_root', type=str, required=True)
+ parser.add_argument('--num_cpus', type=int, default=12)
+ parser.add_argument('--use_ray', action='store_true')
+ parser.add_argument('--prefix', type=str, default="")
+ parser.add_argument('--use_cuda', action='store_true')
+ parser.add_argument('--from_scratch', action='store_true')
+ parser.add_argument('--drop_num', type=int, default=0)
+ args = parser.parse_args()
+ v_data_root = args.data_root
+ v_out_root = args.out_root
+ filter_list = args.list
+ is_use_ray = args.use_ray
+ num_cpus = args.num_cpus
+ use_cuda = args.use_cuda
+ from_scratch = args.from_scratch
+ drop_num = args.drop_num
+ safe_check_dir(v_out_root)
+ if not os.path.exists(v_data_root):
+ raise ValueError(f"Data root path {v_data_root} does not exist.")
+
+ if args.prefix != "":
+ construct_brep_from_datanpz(v_data_root, v_out_root, args.prefix,
+ v_drop_num=drop_num,
+ use_cuda=use_cuda, is_optimize_geom=True, isdebug=True, is_save_data=True, )
+ exit()
+ all_folders = [folder for folder in os.listdir(v_data_root) if os.path.isdir(os.path.join(v_data_root, folder))]
+ if filter_list != "":
+ print(f"Use filter_list {filter_list}")
+ if not os.path.exists(filter_list):
+ raise ValueError(f"List {filter_list} does not exist.")
+ if os.path.isdir(filter_list):
+ valid_prefies = [f for f in os.listdir(filter_list) if os.path.isdir(os.path.join(filter_list, f))]
+ elif filter_list.endswith(".txt"):
+ valid_prefies = [item.strip() for item in open(filter_list).readlines()]
+ else:
+ raise ValueError(f"Invalid list {filter_list}")
+ all_folders = list(set(all_folders) & set(valid_prefies))
+
+ all_folders.sort()
+ all_folders = all_folders
+
+ print(f"Total {len(all_folders)} folders")
+
+ if not is_use_ray:
+ # random.shuffle(all_folders)
+ for i in tqdm(range(len(all_folders))):
+ construct_brep_from_datanpz(v_data_root, v_out_root, all_folders[i],
+ v_drop_num=drop_num,
+ use_cuda=use_cuda, from_scratch=from_scratch,
+ is_save_data=True, is_log=False, is_optimize_geom=True, is_ray=False, )
+ else:
+ ray.init(
+ dashboard_host="0.0.0.0",
+ dashboard_port=8080,
+ num_cpus=num_cpus,
+ # num_gpus=num_gpus,
+ # local_mode=True
+ )
+ construct_brep_from_datanpz_ray = ray.remote(num_gpus=0.1 if use_cuda else 0, max_retries=0)(
+ construct_brep_from_datanpz)
+
+ tasks = []
+ for i in range(len(all_folders)):
+ tasks.append(construct_brep_from_datanpz_ray.remote(
+ v_data_root, v_out_root,
+ all_folders[i],
+ v_drop_num=drop_num,
+ use_cuda=use_cuda, from_scratch=from_scratch,
+ is_log=False, is_ray=True, is_optimize_geom=True, isdebug=False,
+ ))
+ results = []
+ for i in tqdm(range(len(all_folders))):
+ try:
+ results.append(ray.get(tasks[i], timeout=60))
+ except:
+ results.append(None)
+ results = [item for item in results if item is not None]
+ print(len(results))
+ results = np.array(results)
+ print(results.mean(axis=0))
+ print("Done")
diff --git a/environment.yml b/environment.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2ea2a6b930154e70f532f8abe184262bafeb2952
--- /dev/null
+++ b/environment.yml
@@ -0,0 +1,10 @@
+name: HoLa-Brep
+channels:
+ - conda-forge
+ - nvidia
+ - pytorch
+dependencies:
+ - python=3.10
+ - pip=24.3.1
+ - numpy=2.2.2
+ - pythonocc-core==7.8.1
diff --git a/eval/__init__.py b/eval/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/eval/check_data_deduplicate.py b/eval/check_data_deduplicate.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ee13a53d497f631a33bfa23fc058320d9ed164e
--- /dev/null
+++ b/eval/check_data_deduplicate.py
@@ -0,0 +1,247 @@
+import multiprocessing
+
+import networkx as nx
+import numpy as np
+import argparse
+import os
+
+import trimesh
+from tqdm import tqdm
+import ray
+
+from check_valid import check_step_valid_soild, load_data_with_prefix
+from eval_brepgen import normalize_pc
+from eval_unique_novel import *
+
+
+def find_connected_components(matrix):
+ N = len(matrix)
+ visited = [False] * N
+ components = []
+
+ def dfs(idx, component):
+ stack = [idx]
+ while stack:
+ node = stack.pop()
+ if not visited[node]:
+ visited[node] = True
+ component.append(node)
+ for neighbor in range(N):
+ if matrix[node][neighbor] and not visited[neighbor]:
+ stack.append(neighbor)
+
+ for i in range(N):
+ if not visited[i]:
+ component = []
+ dfs(i, component)
+ components.append(component)
+
+ return components
+
+
+def compute_unique(graph_list, atol=None, is_use_ray=False, batch_size=100000, num_max_split_batch=128):
+ N = len(graph_list)
+ identical_pairs = []
+ unique_graph_idx = list(range(N))
+ pair_0, pair_1 = np.triu_indices(N, k=1)
+ check_pairs = np.column_stack((pair_0, pair_1))
+
+ num_split_batch = len(check_pairs) // batch_size
+ if num_split_batch > 64:
+ num_split_batch = num_max_split_batch
+ batch_size = len(check_pairs) // num_split_batch
+
+ if not is_use_ray:
+ for idx1, idx2 in tqdm(check_pairs):
+ is_identical = is_graph_identical(graph_list[idx1], graph_list[idx2], atol=atol)
+ if is_identical:
+ unique_graph_idx.remove(idx2) if idx2 in unique_graph_idx else None
+ else:
+ N_batch = len(check_pairs) // batch_size
+ futures = []
+ for i in tqdm(range(N_batch)):
+ batch_pairs = check_pairs[i * batch_size: (i + 1) * batch_size]
+ batch_graph_pair = [(graph_list[idx1], graph_list[idx2]) for idx1, idx2 in batch_pairs]
+ futures.append(is_graph_identical_remote.remote(batch_graph_pair, atol))
+ results = ray.get(futures)
+
+ for batch_idx in tqdm(range(N_batch)):
+ for idx, is_identical in enumerate(results[batch_idx]):
+ if not is_identical:
+ continue
+ idx1, idx2 = check_pairs[batch_idx * batch_size + idx]
+ if idx2 in unique_graph_idx:
+ unique_graph_idx.remove(idx2)
+ identical_pairs.append((idx1, idx2))
+
+ return unique_graph_idx, identical_pairs
+
+
+def test_check():
+ sample = np.random.rand(3, 32, 32, 3)
+ face1 = sample[[0, 1, 2]]
+ face2 = sample[[0, 2, 1]]
+ faces_adj1 = [[0, 1]]
+ faces_adj2 = [[0, 2]]
+
+ graph1 = build_graph(face1, faces_adj1)
+ graph2 = build_graph(face2, faces_adj2)
+
+ is_identical = is_graph_identical(graph1, graph2)
+ # 判断图是否相等
+ print("Graphs are equal" if is_identical else "Graphs are not equal")
+
+
+def load_data_from_npz(data_npz_file):
+ data_npz = np.load(data_npz_file, allow_pickle=True)
+ data_npz1 = np.load(data_npz_file.replace("deepcad_32", "deepcad_train_v6"), allow_pickle=True)
+ # Brepgen
+ if 'face_edge_adj' in data_npz:
+ faces = data_npz['pred_face']
+ face_edge_adj = data_npz['face_edge_adj']
+ faces_adj_pair = []
+ N = face_edge_adj.shape[0]
+ for face_idx1 in range(N):
+ for face_idx2 in range(face_idx1 + 1, N):
+ face_edges1 = face_edge_adj[face_idx1]
+ face_edges2 = face_edge_adj[face_idx2]
+ if sorted((face_idx1, face_idx2)) in faces_adj_pair:
+ continue
+ if len(set(face_edges1).intersection(set(face_edges2))) > 0:
+ faces_adj_pair.append(sorted((face_idx1, face_idx2)))
+ return faces, faces_adj_pair
+ # Ours
+ if 'sample_points_faces' in data_npz:
+ face_points = data_npz['sample_points_faces'] # Face sample points (num_faces*20*20*3)
+ edge_face_connectivity = data_npz['edge_face_connectivity'] # (num_intersection, (id_edge, id_face1, id_face2))
+ elif 'pred_face' in data_npz and 'pred_edge_face_connectivity' in data_npz:
+ face_points = data_npz['pred_face']
+ edge_face_connectivity = data_npz['pred_edge_face_connectivity']
+ else:
+ raise ValueError("Invalid data format")
+ faces_adj_pair = []
+ for edge_idx, face_idx1, face_idx2 in edge_face_connectivity:
+ faces_adj_pair.append([face_idx1, face_idx2])
+ if face_points.shape[-1] != 3:
+ face_points = face_points[..., :3]
+
+ src_shape = face_points.shape
+ face_points = normalize_pc(face_points.reshape(-1, 3)).reshape(src_shape)
+ return face_points, faces_adj_pair
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--train_root", type=str, required=True)
+ parser.add_argument("--n_bit", type=int)
+ parser.add_argument("--atol", type=float)
+ parser.add_argument("--use_ray", action='store_true')
+ parser.add_argument("--load_batch_size", type=int, default=100)
+ parser.add_argument("--compute_batch_size", type=int, default=10000)
+ parser.add_argument("--txt", type=str, default=None)
+ parser.add_argument("--num_cpus", type=int, default=32)
+ args = parser.parse_args()
+ train_data_root = args.train_root
+ is_use_ray = args.use_ray
+ n_bit = args.n_bit
+ atol = args.atol
+ load_batch_size = args.load_batch_size
+ compute_batch_size = args.compute_batch_size
+ folder_list_txt = args.txt
+ num_cpus = args.num_cpus
+
+ if not n_bit and not atol:
+ raise ValueError("Must set either n_bit or atol")
+ if n_bit and atol:
+ raise ValueError("Cannot set both n_bit and atol")
+
+ if n_bit:
+ atol = None
+ if atol:
+ n_bit = -1
+
+ if folder_list_txt:
+ with open(folder_list_txt, "r") as f:
+ check_folders = [line.strip() for line in f.readlines()]
+ else:
+ check_folders = None
+
+ ################################################## Unqiue #######################################################
+ # Load all the data files
+ print("Loading data files...")
+ data_npz_file_list = load_data_with_prefix(train_data_root, 'data.npz')
+ data_npz_file_list.sort()
+ if is_use_ray:
+ ray.init()
+ futures = []
+ graph_list = []
+ prefix_list = []
+ for i in tqdm(range(0, len(data_npz_file_list), load_batch_size)):
+ batch_data_npz_file_list = data_npz_file_list[i: i + load_batch_size]
+ futures.append(load_and_build_graph_remote.remote(batch_data_npz_file_list, check_folders, n_bit))
+ for future in tqdm(futures):
+ result = ray.get(future)
+ graph_list_batch, prefix_list_batch = result
+ graph_list.extend(graph_list_batch)
+ prefix_list.extend(prefix_list_batch)
+ ray.shutdown()
+ else:
+ graph_list, prefix_list = load_and_build_graph(data_npz_file_list, n_bit)
+ print(f"Loaded {len(graph_list)} data files")
+
+ # sort the graph list according the face num
+ graph_node_num = np.array([graph.number_of_nodes() for graph in graph_list])
+
+ identical_pairs_txt = train_data_root + f"_identical_pairs_{n_bit}bit.txt"
+ fp_identical_pairs = open(identical_pairs_txt, "w")
+ fp_identical_pairs.close()
+ novel_txt = train_data_root + f"_novel_{n_bit}bit.txt"
+ fp_novel = open(novel_txt, "w")
+ fp_novel.close()
+
+ if is_use_ray:
+ ray.init(_temp_dir=r"/mnt/d/img2brep/ray_temp")
+ unique_graph_idx_list = []
+ pbar = tqdm(range(3, 31))
+ for num_face in pbar:
+ print(f"Processing {num_face}")
+ pbar.set_description(f"Processing {num_face}")
+ fp_identical_pairs = open(identical_pairs_txt, "a")
+ fp_novel = open(novel_txt, "a")
+ print(f"face_num = {num_face}", file=fp_identical_pairs)
+
+ hits_graph_idx = np.where(graph_node_num == num_face)[0]
+ hits_graph = [graph_list[idx] for idx in tqdm(hits_graph_idx)]
+ hits_graph_prefix = [prefix_list[idx] for idx in hits_graph_idx]
+
+ if len(hits_graph) != 0:
+ local_unique_graph_idx_list, identical_pairs = compute_unique(hits_graph, atol, is_use_ray, compute_batch_size)
+ for unique_graph_idx in local_unique_graph_idx_list:
+ print(f"{hits_graph_prefix[unique_graph_idx]}", file=fp_novel)
+
+ local_unique_graph_idx_list = [hits_graph_idx[idx] for idx in local_unique_graph_idx_list]
+ unique_graph_idx_list.extend(local_unique_graph_idx_list)
+
+ if len(identical_pairs) > 0:
+ for idx1, idx2 in identical_pairs:
+ print(f"{hits_graph_prefix[idx1]} {hits_graph_prefix[idx2]}", file=fp_identical_pairs)
+ pbar.set_postfix({"Local Unique": len(local_unique_graph_idx_list) / len(hits_graph),
+ "Total Unique": len(unique_graph_idx_list) / len(graph_list), })
+ print(f"Unique: {len(local_unique_graph_idx_list)}/{len(hits_graph_idx)}"
+ f"={len(local_unique_graph_idx_list) / len(hits_graph_idx)}", file=fp_identical_pairs)
+ else:
+ print(f"face_num = {num_face} has no data", file=fp_identical_pairs)
+ fp_identical_pairs.close()
+ fp_novel.close()
+
+ if is_use_ray:
+ ray.shutdown()
+
+ print(f"Unique num: {len(unique_graph_idx_list)}/{len(graph_list)}={len(unique_graph_idx_list) / len(graph_list)}")
+ print(f"Identical pairs are saved to {identical_pairs_txt}")
+ print(f"Novel txt are saved to {novel_txt}")
+ print("Done")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/eval/check_deduplicate_dis.py b/eval/check_deduplicate_dis.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a5ab899ab4dd70196ee87a8dd2a2feed26493e7
--- /dev/null
+++ b/eval/check_deduplicate_dis.py
@@ -0,0 +1,317 @@
+import networkx as nx
+import numpy as np
+import argparse
+import os
+
+from tqdm import tqdm
+import ray
+
+from check_valid import check_step_valid_soild, load_data_with_prefix
+
+
+def real2bit(data, n_bits=8, min_range=-1, max_range=1):
+ """Convert vertices in [-1., 1.] to discrete values in [0, n_bits**2 - 1]."""
+ range_quantize = 2 ** n_bits - 1
+ data_quantize = (data - min_range) * range_quantize / (max_range - min_range)
+ data_quantize = np.clip(data_quantize, a_min=0, a_max=range_quantize) # clip values
+ return data_quantize.astype(int)
+
+
+def build_graph(faces, faces_adj, n_bit=4):
+ # faces1 and faces2 are np.array of shape (n_faces, n_points, n_points, 3)
+ # faces_adj1 and faces_adj2 are lists of (face_idx, face_idx) adjacency, ex. [[0, 1], [1, 2]]
+ faces_bits = real2bit(faces, n_bits=n_bit)
+ """Build a graph from a shape."""
+ G = nx.Graph()
+ for face_idx, face_bit in enumerate(faces_bits):
+ face_bit = face_bit.reshape(-1, 3)
+ face_bit_ordered = face_bit[np.lexsort((face_bit[:, 0], face_bit[:, 1], face_bit[:, 2]))]
+ G.add_node(face_idx, shape_geometry=face_bit_ordered)
+ for pair in faces_adj:
+ G.add_edge(pair[0], pair[1])
+ return G
+
+
+def is_graph_identical(graph1, graph2):
+ """Check if two shapes are identical."""
+ # Check if the two graphs are isomorphic considering node attributes
+ return nx.is_isomorphic(
+ graph1, graph2,
+ node_match=lambda n1, n2: np.array_equal(n1['shape_geometry'], n2['shape_geometry'])
+ )
+
+
+def is_graph_identical_batch(graph_pair_list):
+ is_identical_list = []
+ for graph1, graph2 in graph_pair_list:
+ is_identical = is_graph_identical(graph1, graph2)
+ is_identical_list.append(is_identical)
+ return is_identical_list
+
+
+is_graph_identical_remote = ray.remote(is_graph_identical_batch)
+
+
+def find_connected_components(matrix):
+ N = len(matrix)
+ visited = [False] * N
+ components = []
+
+ def dfs(idx, component):
+ stack = [idx]
+ while stack:
+ node = stack.pop()
+ if not visited[node]:
+ visited[node] = True
+ component.append(node)
+ for neighbor in range(N):
+ if matrix[node][neighbor] and not visited[neighbor]:
+ stack.append(neighbor)
+
+ for i in range(N):
+ if not visited[i]:
+ component = []
+ dfs(i, component)
+ components.append(component)
+
+ return components
+
+
+def compute_gen_unique(graph_list, is_use_ray=False, batch_size=100000):
+ N = len(graph_list)
+ unique_graph_idx = list(range(N))
+ pair_0, pair_1 = np.triu_indices(N, k=1)
+ check_pairs = list(zip(pair_0, pair_1))
+ deduplicate_matrix = np.zeros((N, N), dtype=bool)
+
+ if not is_use_ray:
+ for idx1, idx2 in tqdm(check_pairs):
+ is_identical = is_graph_identical(graph_list[idx1], graph_list[idx2])
+ if is_identical:
+ unique_graph_idx.remove(idx2) if idx2 in unique_graph_idx else None
+ deduplicate_matrix[idx1, idx2] = True
+ deduplicate_matrix[idx2, idx1] = True
+ else:
+ ray.init()
+ N_batch = len(check_pairs) // batch_size
+ futures = []
+ for i in tqdm(range(N_batch)):
+ batch_pairs = check_pairs[i * batch_size: (i + 1) * batch_size]
+ batch_graph_pair = [(graph_list[idx1], graph_list[idx2]) for idx1, idx2 in batch_pairs]
+ futures.append(is_graph_identical_remote.remote(batch_graph_pair))
+ results = ray.get(futures)
+
+ for batch_idx in tqdm(range(N_batch)):
+ for idx, is_identical in enumerate(results[batch_idx]):
+ if not is_identical:
+ continue
+ idx1, idx2 = check_pairs[batch_idx * batch_size + idx]
+ deduplicate_matrix[idx1, idx2] = True
+ deduplicate_matrix[idx2, idx1] = True
+ if idx2 in unique_graph_idx:
+ unique_graph_idx.remove(idx2)
+ ray.shutdown()
+
+ unique = len(unique_graph_idx)
+ print(f"Unique: {unique}/{N}")
+ unique_ratio = unique / N
+
+ return unique_ratio, deduplicate_matrix
+
+
+def compute_gen_novel(gen_graph_list, train_graph_list, is_use_ray=False, batch_size=100000):
+ M, N = len(gen_graph_list), len(train_graph_list)
+ deduplicate_matrix = np.zeros((M, N), dtype=bool)
+ pair_0, pair_1 = np.triu_indices_from(deduplicate_matrix, k=1)
+ check_pairs = list(zip(pair_0, pair_1))
+ non_novel_graph_idx = np.zeros(M, dtype=bool)
+
+ if not is_use_ray:
+ for idx1, idx2 in tqdm(check_pairs):
+ if non_novel_graph_idx[idx1]:
+ continue
+ is_identical = is_graph_identical(gen_graph_list[idx1], train_graph_list[idx2])
+ if is_identical:
+ non_novel_graph_idx[idx1] = True
+ deduplicate_matrix[idx1, idx2] = True
+ else:
+ ray.init()
+ N_batch = len(check_pairs) // batch_size
+ futures = []
+ for i in tqdm(range(N_batch)):
+ batch_pairs = check_pairs[i * batch_size: (i + 1) * batch_size]
+ batch_graph_pair = [(gen_graph_list[idx1], train_graph_list[idx2]) for idx1, idx2 in batch_pairs]
+ futures.append(is_graph_identical_remote.remote(batch_graph_pair))
+ results = ray.get(futures)
+
+ for batch_idx in tqdm(range(N_batch)):
+ for idx, is_identical in enumerate(results[batch_idx]):
+ if not is_identical:
+ continue
+ idx1, idx2 = check_pairs[batch_idx * batch_size + idx]
+ deduplicate_matrix[idx1, idx2] = True
+ non_novel_graph_idx[idx1] = True
+ ray.shutdown()
+
+ novel = M - np.sum(non_novel_graph_idx)
+ print(f"Novel: {novel}/{M}")
+ novel_ratio = novel / M
+ return novel_ratio, deduplicate_matrix
+
+
+def test_check():
+ sample = np.random.rand(3, 32, 32, 3)
+ face1 = sample[[0, 1, 2]]
+ face2 = sample[[0, 2, 1]]
+ faces_adj1 = [[0, 1]]
+ faces_adj2 = [[0, 2]]
+
+ graph1 = build_graph(face1, faces_adj1)
+ graph2 = build_graph(face2, faces_adj2)
+
+ is_identical = is_graph_identical(graph1, graph2)
+ # 判断图是否相等
+ print("Graphs are equal" if is_identical else "Graphs are not equal")
+
+
+def load_data_from_npz(data_npz_file):
+ data_npz = np.load(data_npz_file, allow_pickle=True)
+ # Brepgen
+ if 'face_edge_adj' in data_npz:
+ faces = data_npz['pred_face']
+ face_edge_adj = data_npz['face_edge_adj']
+ faces_adj_pair = []
+ N = face_edge_adj.shape[0]
+ for face_idx1 in range(N):
+ for face_idx2 in range(face_idx1 + 1, N):
+ face_edges1 = face_edge_adj[face_idx1]
+ face_edges2 = face_edge_adj[face_idx2]
+ if sorted((face_idx1, face_idx2)) in faces_adj_pair:
+ continue
+ if len(set(face_edges1).intersection(set(face_edges2))) > 0:
+ faces_adj_pair.append(sorted((face_idx1, face_idx2)))
+ return faces, faces_adj_pair
+ # Ours
+ if 'sample_points_faces' in data_npz and 'edge_face_connectivity' in data_npz:
+ face_points = data_npz['sample_points_faces'] # Face sample points (num_faces*20*20*3)
+ edge_points = data_npz['sample_points_lines'] # Edge sample points (num_lines*20*3)
+ edge_face_connectivity = data_npz['edge_face_connectivity'] # (num_intersection, (id_edge, id_face1, id_face2))
+ elif 'pred_face' in data_npz and 'pred_edge_face_connectivity' in data_npz:
+ face_points = data_npz['pred_face']
+ edge_points = data_npz['pred_edge']
+ edge_face_connectivity = data_npz['pred_edge_face_connectivity']
+ else:
+ raise ValueError("Invalid data format")
+ faces_adj_pair = []
+ for edge_idx, face_idx1, face_idx2 in edge_face_connectivity:
+ faces_adj_pair.append([face_idx1, face_idx2])
+ return face_points, faces_adj_pair
+
+
+def load_and_build_graph(data_npz_file_list, gen_post_data_root=None, n_bit=4):
+ gen_graph_list = []
+ prefix_list = []
+ for data_npz_file in data_npz_file_list:
+ folder_name = os.path.basename(os.path.dirname(data_npz_file))
+ if gen_post_data_root:
+ step_file_list = load_data_with_prefix(os.path.join(gen_post_data_root, folder_name), ".step")
+ if len(step_file_list) == 0:
+ continue
+ if not check_step_valid_soild(step_file_list[0]):
+ continue
+ prefix_list.append(folder_name)
+ faces, faces_adj_pair = load_data_from_npz(data_npz_file)
+ graph = build_graph(faces, faces_adj_pair, n_bit)
+ gen_graph_list.append(graph)
+ return gen_graph_list, prefix_list
+
+
+load_and_build_graph_remote = ray.remote(load_and_build_graph)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--fake", type=str, required=True)
+ parser.add_argument("--fake_post", type=str, required=False)
+ parser.add_argument("--train_root", type=str, required=False)
+ parser.add_argument("--n_bit", type=int, default=4)
+ parser.add_argument("--use_ray", action='store_true')
+ parser.add_argument("--load_batch_size", type=int, default=400)
+ parser.add_argument("--compute_batch_size", type=int, default=200000)
+ parser.add_argument("--txt", type=str, default=None)
+ parser.add_argument("--num_cpus", type=int, default=16)
+ args = parser.parse_args()
+ gen_data_root = args.fake
+ gen_post_data_root = args.fake_post
+ train_data_root = args.train_root
+ is_use_ray = args.use_ray
+ n_bit = args.n_bit
+ load_batch_size = args.load_batch_size
+ compute_batch_size = args.compute_batch_size
+ folder_list_txt = args.txt
+ num_cpus = args.num_cpus
+
+ # Load all the generated data files
+ print("Loading generated data files...")
+ data_npz_file_list = load_data_with_prefix(gen_data_root, 'data.npz')
+ if is_use_ray:
+ ray.init(num_cpus=num_cpus)
+ futures = []
+ gen_graph_list = []
+ gen_prefix_list = []
+ for i in tqdm(range(0, len(data_npz_file_list), load_batch_size)):
+ batch_data_npz_file_list = data_npz_file_list[i: i + load_batch_size]
+ futures.append(load_and_build_graph_remote.remote(batch_data_npz_file_list, gen_post_data_root, n_bit))
+ for future in tqdm(futures):
+ result = ray.get(future)
+ gen_graph_list_batch, gen_prefix_list_batch = result
+ gen_graph_list.extend(gen_graph_list_batch)
+ gen_prefix_list.extend(gen_prefix_list_batch)
+ ray.shutdown()
+ else:
+ gen_graph_list, gen_prefix_list = load_and_build_graph(data_npz_file_list, gen_post_data_root, n_bit)
+ print(f"Loaded {len(gen_graph_list)} generated data files")
+
+ print("Loading training data files...")
+ data_npz_file_list = load_data_with_prefix(train_data_root, 'data.npz', folder_list_txt=folder_list_txt)
+ load_batch_size = load_batch_size * 5
+ if is_use_ray:
+ ray.init(num_cpus=num_cpus)
+ futures = []
+ train_graph_list = []
+ train_prefix_list = []
+ for i in tqdm(range(0, len(data_npz_file_list), load_batch_size)):
+ batch_data_npz_file_list = data_npz_file_list[i: i + load_batch_size]
+ futures.append(load_and_build_graph_remote.remote(batch_data_npz_file_list, None, n_bit))
+ for future in tqdm(futures):
+ result = ray.get(future)
+ train_graph_list_batch, train_prefix_list_batch = result
+ train_graph_list.extend(train_graph_list_batch)
+ train_prefix_list.extend(train_prefix_list_batch)
+ ray.shutdown()
+ else:
+ train_graph_list, train_prefix_list = load_and_build_graph(data_npz_file_list, None, n_bit)
+ print(f"Loaded {len(train_graph_list)} training data files")
+
+ print("Computing Unique ratio...")
+ unique_ratio, deduplicate_matrix = compute_gen_unique(gen_graph_list, is_use_ray, compute_batch_size)
+ print(f"Unique ratio: {unique_ratio}")
+
+ deduplicate_components_txt = gen_data_root + f"_deduplicate_components_{n_bit}bit.txt"
+ fp = open(deduplicate_components_txt, "w")
+ print(f"Unique ratio: {unique_ratio}", file=fp)
+ deduplicate_components = find_connected_components(deduplicate_matrix)
+ for component in deduplicate_components:
+ if len(component) > 1:
+ component = [gen_prefix_list[idx] for idx in component]
+ print(f"Component: {component}", file=fp)
+ print(f"Deduplicate components are saved to {deduplicate_components_txt}")
+
+ print("Computing Novel ratio...")
+ novel_ratio = compute_gen_novel(gen_graph_list, train_graph_list, is_use_ray, compute_batch_size)
+ print(f"Novel ratio: {novel_ratio}")
+ print("Done")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/eval/check_valid.py b/eval/check_valid.py
new file mode 100644
index 0000000000000000000000000000000000000000..91398ea244297d7c0411bcf755aa5d9f7024d956
--- /dev/null
+++ b/eval/check_valid.py
@@ -0,0 +1,159 @@
+import random
+import shutil
+import sys
+
+from OCC.Core.BRepBuilderAPI import BRepBuilderAPI_MakeSolid
+from OCC.Core.BRepCheck import BRepCheck_Analyzer
+from OCC.Core.IGESControl import IGESControl_Reader
+from OCC.Core.Interface import Interface_Static
+from OCC.Core.STEPControl import STEPControl_Reader, STEPControl_Writer, STEPControl_AsIs
+from OCC.Core.StepData import StepData_StepModel
+from OCC.Core.TopAbs import TopAbs_SOLID, TopAbs_COMPOUND, TopAbs_SHELL, TopAbs_FACE, TopAbs_EDGE
+from OCC.Extend.DataExchange import read_step_file
+from OCC.Core.ShapeFix import ShapeFix_ShapeTolerance
+import os
+import argparse
+import glob
+from tqdm import tqdm
+from matplotlib import pyplot as plt
+import numpy as np
+from diffusion.utils import get_primitives
+
+os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
+
+Interface_Static.SetIVal("read.precision.mode", 1)
+Interface_Static.SetRVal("read.precision.val", 1e-1)
+# Interface_Static.SetIVal("read.stdsameparameter.mode", 1)
+# Interface_Static.SetIVal("read.surfacecurve.mode", 3)
+#
+# Interface_Static.SetCVal("write.step.schema", "DIS")
+Interface_Static.SetIVal("write.precision.mode", 2)
+Interface_Static.SetRVal("write.precision.val", 1e-1)
+
+
+# Interface_Static.SetIVal("write.surfacecurve.mode", 1)
+
+
+def check_step_valid_soild(step_file, precision=1e-1, return_shape=False):
+ try:
+ shape = read_step_file(str(step_file), as_compound=False, verbosity=False)
+ except:
+ if return_shape:
+ return False, None
+ else:
+ return False
+ if shape.ShapeType() != TopAbs_SOLID:
+ if return_shape:
+ return False, shape
+ else:
+ return False
+ shape_tol_setter = ShapeFix_ShapeTolerance()
+ shape_tol_setter.SetTolerance(shape, precision)
+ analyzer = BRepCheck_Analyzer(shape)
+ is_valid = analyzer.IsValid()
+ if return_shape:
+ return is_valid, shape
+ return is_valid
+
+
+def load_data_with_prefix(root_folder, prefix, folder_list_txt=None):
+ data_files = []
+ folder_list = []
+ if folder_list_txt is not None:
+ with open(folder_list_txt, "r") as f:
+ folder_list = f.read().splitlines()
+ # Walk through the directory tree starting from the root folder
+ for root, dirs, files in os.walk(root_folder):
+ if folder_list_txt is not None and os.path.basename(root) not in folder_list:
+ continue
+ is_found = False
+ for filename in files:
+ # Check if the file ends with the specified prefix
+ if filename.endswith(prefix):
+ file_path = os.path.join(root, filename)
+ is_found = True
+ data_files.append(file_path)
+ if not is_found:
+ print(f"No {prefix} file found in {root}")
+
+ return data_files
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--data_root", type=str, required=True)
+ parser.add_argument("--prefix", type=str, required=False, default="")
+ parser.add_argument("--only_success", action="store_true", default=False)
+ args = parser.parse_args()
+ data_root = args.data_root
+ only_success = args.only_success
+ folders = [f for f in os.listdir(data_root) if os.path.isdir(os.path.join(data_root, f))]
+
+ if args.prefix:
+ step_file_list = load_data_with_prefix(os.path.join(data_root, args.prefix), ".step")
+ assert len(step_file_list) > 0
+ print(f"Checking CAD solids in {args.prefix}...")
+ isvalid = check_step_valid_soild(step_file_list[0], is_set_gloabl=True)
+ print("Valid" if isvalid else "Invalid")
+ exit(0)
+
+ step_file_list = load_data_with_prefix(data_root, ".step")
+
+ print(f"Total sample features: {len(folders)}")
+ print(f"Total CAD solids: {len(step_file_list)}")
+
+ print("Start checking CAD solids...")
+
+ exception_folders = []
+ exception_out_root = data_root + "_exception"
+ if os.path.exists(exception_out_root):
+ shutil.rmtree(exception_out_root)
+ os.makedirs(exception_out_root, exist_ok=False)
+
+ # Load cad data
+ valid_count = 0
+ pbar = tqdm(step_file_list)
+ num_faces = []
+ num_edges = []
+ for step_file in pbar:
+ is_valid, shape = check_step_valid_soild(step_file, return_shape=True)
+ if os.path.exists(os.path.join(os.path.dirname(step_file), "success.txt")) and not is_valid:
+ folder_name = os.path.basename(os.path.dirname(step_file))
+ exception_folders.append(folder_name)
+ shutil.copytree(os.path.dirname(step_file), os.path.join(exception_out_root, folder_name))
+
+ if is_valid:
+ if only_success and not os.path.exists(os.path.join(os.path.dirname(step_file), "success.txt")):
+ continue
+ valid_count += 1
+ num_faces.append(len(get_primitives(shape, TopAbs_FACE)))
+ num_edges.append(len(get_primitives(shape, TopAbs_EDGE)) // 2)
+ pbar.set_postfix({"valid_count": valid_count})
+ # else:
+ # print(f"Invalid CAD solid: {step_file}")
+
+ fig, ax = plt.subplots(1, 2, layout="constrained")
+ ax[0].set_title("Num. faces")
+ ax[1].set_title("Num. edges")
+ hist_f, bin_f = np.histogram(num_faces, bins=5, range=(0, 30))
+ hist_e, bin_e = np.histogram(num_edges, bins=5, range=(0, 50))
+ # Normalize
+ hist_f = hist_f / np.sum(hist_f)
+ hist_e = hist_e / np.sum(hist_e)
+ ax[0].plot(bin_f[:-1], hist_f, "-")
+ ax[1].plot(bin_e[:-1], hist_e, "-")
+ ax[0].set_aspect(1. / ax[0].get_data_ratio())
+ ax[1].set_aspect(1. / ax[1].get_data_ratio())
+ plt.savefig(data_root + "_num_faces_edges.png", dpi=600)
+
+ print(f"Number of valid CAD solids: {valid_count}")
+ print(f"Valid rate: {valid_count / len(folders) * 100:.2f}%")
+
+ if len(exception_folders) > 0:
+ with open(os.path.join(exception_out_root, "exception_folders.txt"), "w") as f:
+ for folder in exception_folders:
+ f.write(folder + "\n")
+ print(f"Exception folders are saved to {exception_out_root}")
+ if len(exception_folders) == 0:
+ shutil.rmtree(exception_out_root)
+ print("No exception folders found.")
diff --git a/eval/eval_brepgen.py b/eval/eval_brepgen.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c98248d02faf799b701bbd17c15c2bbbb26972d
--- /dev/null
+++ b/eval/eval_brepgen.py
@@ -0,0 +1,409 @@
+import torch
+import argparse
+import os
+import numpy as np
+from lightning_fabric import seed_everything
+from tqdm import tqdm
+import random
+import warnings
+from scipy.stats import entropy
+from sklearn.neighbors import NearestNeighbors
+from plyfile import PlyData
+from pathlib import Path
+import multiprocessing
+from chamfer_distance import ChamferDistance
+from eval.eval_pc_set import *
+
+N_POINTS = 2000
+
+
+def find_files(folder, extension):
+ return sorted([Path(os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith(extension)])
+
+
+def read_ply(path):
+ with open(path, 'rb') as f:
+ plydata = PlyData.read(f)
+ x = np.array(plydata['vertex']['x'])
+ y = np.array(plydata['vertex']['y'])
+ z = np.array(plydata['vertex']['z'])
+ vertex = np.stack([x, y, z], axis=1)
+ return vertex
+
+
+def distChamfer(a, b):
+ x, y = a, b
+ bs, num_points, points_dim = x.size()
+ xx = torch.bmm(x, x.transpose(2, 1))
+ yy = torch.bmm(y, y.transpose(2, 1))
+ zz = torch.bmm(x, y.transpose(2, 1))
+ diag_ind = torch.arange(0, num_points).to(a).long()
+ rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
+ ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
+ P = (rx.transpose(2, 1) + ry - 2 * zz)
+ return P.min(1)[0], P.min(2)[0]
+
+
+def _pairwise_CD(sample_pcs, ref_pcs, batch_size):
+ N_sample = sample_pcs.shape[0]
+ N_ref = ref_pcs.shape[0]
+ all_cd = []
+ all_emd = []
+ iterator = range(N_sample)
+ matched_gt = []
+ pbar = tqdm(iterator)
+ chamfer_dist = ChamferDistance()
+
+ for sample_b_start in pbar:
+ sample_batch = sample_pcs[sample_b_start]
+
+ cd_lst = []
+ emd_lst = []
+ for ref_b_start in range(0, N_ref, batch_size):
+ ref_b_end = min(N_ref, ref_b_start + batch_size)
+ ref_batch = ref_pcs[ref_b_start:ref_b_end]
+
+ batch_size_ref = ref_batch.size(0)
+ sample_batch_exp = sample_batch.view(1, -1, 3).expand(batch_size_ref, -1, -1)
+ sample_batch_exp = sample_batch_exp.contiguous()
+
+ dl, dr, idx1, idx2 = chamfer_dist(sample_batch_exp, ref_batch)
+ cd_lst.append((dl.mean(dim=1) + dr.mean(dim=1)).view(1, -1))
+
+ cd_lst = torch.cat(cd_lst, dim=1)
+ all_cd.append(cd_lst)
+
+ hit = np.argmin(cd_lst.detach().cpu().numpy()[0])
+ matched_gt.append(hit)
+ pbar.set_postfix({"cov": len(np.unique(matched_gt)) * 1.0 / N_ref})
+
+ all_cd = torch.cat(all_cd, dim=0) # N_sample, N_ref
+
+ return all_cd
+
+
+def compute_cov_mmd(sample_pcs, ref_pcs, batch_size):
+ all_dist = _pairwise_CD(sample_pcs, ref_pcs, batch_size)
+ N_sample, N_ref = all_dist.size(0), all_dist.size(1)
+ min_val_fromsmp, min_idx = torch.min(all_dist, dim=1)
+ min_val, _ = torch.min(all_dist, dim=0)
+ mmd = min_val.mean()
+ cov = float(min_idx.unique().view(-1).size(0)) / float(N_ref)
+ cov = torch.tensor(cov).to(all_dist)
+
+ return {
+ 'MMD-CD': mmd.item(),
+ 'COV-CD': cov.item(),
+ }, min_idx.cpu().numpy()
+
+def jsd_between_point_cloud_sets(sample_pcs, ref_pcs, in_unit_sphere, resolution=28):
+ '''Computes the JSD between two sets of point-clouds, as introduced in the paper ```Learning Representations And Generative Models
+ For 3D Point Clouds```.
+ Args:
+ sample_pcs: (np.ndarray S1xR2x3) S1 point-clouds, each of R1 points.
+ ref_pcs: (np.ndarray S2xR2x3) S2 point-clouds, each of R2 points.
+ resolution: (int) grid-resolution. Affects granularity of measurements.
+ '''
+ sample_grid_var = entropy_of_occupancy_grid(sample_pcs, resolution, in_unit_sphere)[1]
+ ref_grid_var = entropy_of_occupancy_grid(ref_pcs, resolution, in_unit_sphere)[1]
+ return jensen_shannon_divergence(sample_grid_var, ref_grid_var)
+
+
+def entropy_of_occupancy_grid(pclouds, grid_resolution, in_sphere=False):
+ '''Given a collection of point-clouds, estimate the entropy of the random variables
+ corresponding to occupancy-grid activation patterns.
+ Inputs:
+ pclouds: (numpy array) #point-clouds x points per point-cloud x 3
+ grid_resolution (int) size of occupancy grid that will be used.
+ '''
+ epsilon = 10e-4
+ bound = 1 + epsilon
+ if abs(np.max(pclouds)) > bound or abs(np.min(pclouds)) > bound:
+ print(abs(np.max(pclouds)), abs(np.min(pclouds)))
+ warnings.warn('Point-clouds are not in unit cube.')
+
+ if in_sphere and np.max(np.sqrt(np.sum(pclouds ** 2, axis=2))) > bound:
+ warnings.warn('Point-clouds are not in unit sphere.')
+
+ grid_coordinates, _ = unit_cube_grid_point_cloud(grid_resolution, in_sphere)
+ grid_coordinates = grid_coordinates.reshape(-1, 3)
+ grid_counters = np.zeros(len(grid_coordinates))
+ grid_bernoulli_rvars = np.zeros(len(grid_coordinates))
+ nn = NearestNeighbors(n_neighbors=1).fit(grid_coordinates)
+
+ for pc in pclouds:
+ _, indices = nn.kneighbors(pc)
+ indices = np.squeeze(indices)
+ for i in indices:
+ grid_counters[i] += 1
+ indices = np.unique(indices)
+ for i in indices:
+ grid_bernoulli_rvars[i] += 1
+
+ acc_entropy = 0.0
+ n = float(len(pclouds))
+ for g in grid_bernoulli_rvars:
+ p = 0.0
+ if g > 0:
+ p = float(g) / n
+ acc_entropy += entropy([p, 1.0 - p])
+
+ return acc_entropy / len(grid_counters), grid_counters
+
+
+def unit_cube_grid_point_cloud(resolution, clip_sphere=False):
+ '''Returns the center coordinates of each cell of a 3D grid with resolution^3 cells,
+ that is placed in the unit-cube.
+ If clip_sphere it True it drops the "corner" cells that lie outside the unit-sphere.
+ '''
+ grid = np.ndarray((resolution, resolution, resolution, 3), np.float32)
+ spacing = 1.0 / float(resolution - 1) * 2
+ for i in range(resolution):
+ for j in range(resolution):
+ for k in range(resolution):
+ grid[i, j, k, 0] = i * spacing - 0.5 * 2
+ grid[i, j, k, 1] = j * spacing - 0.5 * 2
+ grid[i, j, k, 2] = k * spacing - 0.5 * 2
+
+ if clip_sphere:
+ grid = grid.reshape(-1, 3)
+ grid = grid[np.linalg.norm(grid, axis=1) <= 0.5]
+
+ return grid, spacing
+
+
+def jensen_shannon_divergence(P, Q):
+ if np.any(P < 0) or np.any(Q < 0):
+ raise ValueError('Negative values.')
+ if len(P) != len(Q):
+ raise ValueError('Non equal size.')
+
+ P_ = P / np.sum(P) # Ensure probabilities.
+ Q_ = Q / np.sum(Q)
+
+ e1 = entropy(P_, base=2)
+ e2 = entropy(Q_, base=2)
+ e_sum = entropy((P_ + Q_) / 2.0, base=2)
+ res = e_sum - ((e1 + e2) / 2.0)
+
+ res2 = _jsdiv(P_, Q_)
+
+ if not np.allclose(res, res2, atol=10e-5, rtol=0):
+ warnings.warn('Numerical values of two JSD methods don\'t agree.')
+
+ return res
+
+
+def _jsdiv(P, Q):
+ '''another way of computing JSD'''
+
+ def _kldiv(A, B):
+ a = A.copy()
+ b = B.copy()
+ idx = np.logical_and(a > 0, b > 0)
+ a = a[idx]
+ b = b[idx]
+ return np.sum([v for v in a * np.log2(a / b)])
+
+ P_ = P / np.sum(P)
+ Q_ = Q / np.sum(Q)
+
+ M = 0.5 * (P_ + Q_)
+
+ return 0.5 * (_kldiv(P_, M) + _kldiv(Q_, M))
+
+
+def downsample_pc(points, n):
+ sample_idx = random.sample(list(range(points.shape[0])), n)
+ return points[sample_idx]
+
+
+def normalize_pc(points):
+ # normalize
+ mean = np.mean(points, axis=0)
+ points = (points - mean)
+ # fit to unit cube
+ scale = np.max(np.abs(points))
+ points = points / scale
+ return points
+
+
+def align_pc(points):
+ # 1. Center the point cloud
+ centroid = np.mean(points, axis=0)
+ centered_points = points - centroid
+
+ # 2. Calculate the three edge lengths of bbox
+ min_coords = np.min(centered_points, axis=0)
+ max_coords = np.max(centered_points, axis=0)
+ dimensions = max_coords - min_coords
+
+ # 3. Sort axes by dimension length to get axis order
+ axis_order = np.argsort(dimensions)[::-1] # sort from longest to shortest
+
+ # 4. Create permutation matrix (align longest edge to x, shortest to y)
+ perm_matrix = np.zeros((3, 3))
+ perm_matrix[0, axis_order[0]] = 1 # longest edge -> x
+ perm_matrix[1, axis_order[2]] = 1 # shortest edge -> y
+ perm_matrix[2, axis_order[1]] = 1 # medium edge -> z
+
+ # 5. Apply transformation
+ aligned_points = np.dot(centered_points, perm_matrix.T)
+
+ # 6. Ensure same centroid faces direction
+ if np.mean(aligned_points[:, 2]) < 0:
+ aligned_points[:, 2] *= -1
+
+ return aligned_points
+
+
+def collect_pc(cad_folder):
+ pc_path = find_files(os.path.join(cad_folder, 'pcd'), 'final_pcd.ply')
+ if len(pc_path) == 0:
+ return []
+ pc_path = pc_path[-1] # final pcd
+ pc = read_ply(pc_path)
+ if pc.shape[0] > N_POINTS:
+ pc = downsample_pc(pc, N_POINTS)
+ pc = normalize_pc(pc)
+ return pc
+
+
+def collect_pc2(cad_folder):
+ pc = read_ply(cad_folder)
+ if pc.shape[0] > N_POINTS:
+ pc = downsample_pc(pc, N_POINTS)
+ pc = normalize_pc(pc)
+ pc = align_pc(pc)
+ return pc
+
+
+theta_x = np.radians(90) # Rotation angle around X-axis
+theta_y = np.radians(90) # Rotation angle around Y-axis
+theta_z = np.radians(180) # Rotation angle around Z-axis
+
+# Create individual rotation matrices
+Rx = np.array([[1, 0, 0],
+ [0, np.cos(theta_x), -np.sin(theta_x)],
+ [0, np.sin(theta_x), np.cos(theta_x)]])
+
+Ry = np.array([[np.cos(theta_y), 0, np.sin(theta_y)],
+ [0, 1, 0],
+ [-np.sin(theta_y), 0, np.cos(theta_y)]])
+
+Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0],
+ [np.sin(theta_z), np.cos(theta_z), 0],
+ [0, 0, 1]])
+
+rotation_matrix = np.dot(np.dot(Rz, Ry), Rx)
+
+
+def collect_pc3(cad_folder):
+ pc = read_ply(cad_folder)
+ if pc.shape[0] > N_POINTS:
+ pc = downsample_pc(pc, N_POINTS)
+ pc = normalize_pc(pc)
+ rotated_point_cloud = np.dot(pc, rotation_matrix.T).astype(np.float32) # Transpose the rotation matrix to apply it correctly
+ return rotated_point_cloud
+
+
+def load_data_with_prefix(root_folder, prefix):
+ data_files = []
+
+ # Walk through the directory tree starting from the root folder
+ for root, dirs, files in os.walk(root_folder):
+ for filename in files:
+ # Check if the file ends with the specified prefix
+ if filename.endswith(prefix):
+ file_path = os.path.join(root, filename)
+ data_files.append(file_path)
+
+ data_files.sort()
+ return data_files
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--fake", type=str)
+ parser.add_argument("--real", type=str)
+ parser.add_argument("--n_test", type=int, default=1000)
+ parser.add_argument("--multi", type=float, default=3)
+ parser.add_argument("--times", type=int, default=10)
+ parser.add_argument("--batch_size", type=int, default=64)
+ args = parser.parse_args()
+
+ seed_everything(0)
+ print("n_test: {}, multiplier: {}, repeat times: {}".format(args.n_test, args.multi, args.times))
+
+ args.output = args.fake + '_results.txt'
+
+ seed_everything(0)
+ # Load reference pcd
+ num_cpus = multiprocessing.cpu_count()
+ ref_pcs = []
+ gt_shape_paths = load_data_with_prefix(args.real, '.ply')
+ load_iter = multiprocessing.Pool(num_cpus).imap(collect_pc2, gt_shape_paths)
+ for pc in tqdm(load_iter, total=len(gt_shape_paths)):
+ if len(pc) > 0:
+ ref_pcs.append(pc)
+ ref_pcs = np.stack(ref_pcs, axis=0)
+ print("real point clouds: {}".format(ref_pcs.shape))
+
+ # Load fake pcd
+ sample_pcs = []
+ shape_paths = load_data_with_prefix(args.fake, '.ply')
+ load_iter = multiprocessing.Pool(num_cpus).imap(collect_pc2, shape_paths)
+ for pc in tqdm(load_iter, total=len(shape_paths)):
+ if len(pc) > 0:
+ sample_pcs.append(pc)
+ sample_pcs = np.stack(sample_pcs, axis=0)
+
+ print("fake point clouds: {}".format(sample_pcs.shape))
+
+ # Testing
+ cov_on_gt = []
+ fp = open(args.output, "w")
+ result_list = []
+ for i in range(args.times):
+ print("iteration {}...".format(i))
+ select_idx1 = random.sample(list(range(len(sample_pcs))), int(args.multi * args.n_test))
+ rand_sample_pcs = sample_pcs[select_idx1]
+
+ select_idx2 = random.sample(list(range(len(ref_pcs))), args.n_test)
+ rand_ref_pcs = ref_pcs[select_idx2]
+
+ jsd = jsd_between_point_cloud_sets(rand_sample_pcs, rand_ref_pcs, in_unit_sphere=False)
+ with torch.no_grad():
+ rand_sample_pcs = torch.tensor(rand_sample_pcs).cuda().float()
+ rand_ref_pcs = torch.tensor(rand_ref_pcs).cuda().float()
+ result, idx = compute_cov_mmd(rand_sample_pcs, rand_ref_pcs, batch_size=args.batch_size)
+ result.update({"JSD": jsd})
+
+ cov_on_gt.extend(list(np.array(select_idx2)[np.unique(idx)]))
+
+ if False:
+ unique_idx = np.unique(idx, return_counts=True)
+ id_gts = unique_idx[0][np.argsort(unique_idx[1])[::-1][:100]]
+ gt_prefixes = [os.path.basename(gt_shape_paths[i])[:8] for i in select_idx2]
+ pred_prefixes = [os.path.basename(shape_paths[i])[:8] for i in select_idx1]
+
+ gt_prefixes[403]
+ print(result)
+ print(result, file=fp)
+ result_list.append(result)
+
+ avg_result = {}
+ for k in result_list[0].keys():
+ avg_result.update({"avg-" + k: np.mean([x[k] for x in result_list])})
+ print("average result:")
+ print(avg_result)
+ print(avg_result, file=fp)
+ fp.close()
+
+ cov_on_gt = list(set(cov_on_gt))
+ cov_on_gt = [gt_shape_paths[i] for i in cov_on_gt]
+ np.save(args.fake + '_cov_on_gt.npy', cov_on_gt)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/eval/eval_complexity.py b/eval/eval_complexity.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7b82991c4357b289bd666b99a5155bac27473b3
--- /dev/null
+++ b/eval/eval_complexity.py
@@ -0,0 +1,194 @@
+import glob
+import ray
+import numpy as np
+import argparse
+
+from OCC.Core.BRepAdaptor import BRepAdaptor_Surface
+from OCC.Core.BRepGProp import brepgprop
+from OCC.Core.BRepLProp import BRepLProp_SLProps
+from OCC.Core.GProp import GProp_GProps
+from lightning_fabric import seed_everything
+
+from eval.eval_condition import *
+
+import networkx as nx
+from OCC.Core.STEPControl import STEPControl_Reader
+from OCC.Core.TopExp import TopExp_Explorer
+from OCC.Core.TopAbs import TopAbs_VERTEX, TopAbs_EDGE
+from OCC.Core.BRep import BRep_Tool
+from OCC.Core.gp import gp_Pnt
+
+
+def remove_outliers_zscore(data, threshold=3, max_value=50):
+ if len(data) == 0 or sum(data) == 0:
+ return data
+ mean = np.mean(data)
+ std_dev = np.std(data)
+ return [x for x in data if abs((x - mean) / (std_dev + 1e-8)) <= threshold and x < max_value]
+
+
+def extract_edges_and_vertices(shape):
+ explorer_edges = TopExp_Explorer(shape, TopAbs_EDGE)
+ explorer_vertices = TopExp_Explorer(shape, TopAbs_VERTEX)
+
+ vertex_map = {}
+ edges = []
+
+ while explorer_edges.More():
+ edge = explorer_edges.Current()
+
+ vertices_on_edge = []
+ vertex_explorer = TopExp_Explorer(edge, TopAbs_VERTEX)
+ while vertex_explorer.More():
+ vertex = vertex_explorer.Current()
+ point = BRep_Tool.Pnt(vertex)
+ coord = (round(point.X(), 6), round(point.Y(), 6), round(point.Z(), 6))
+
+ if coord not in vertex_map:
+ vertex_map[coord] = len(vertex_map)
+
+ vertices_on_edge.append(vertex_map[coord])
+ vertex_explorer.Next()
+
+ if len(vertices_on_edge) == 2:
+ edges.append(tuple(vertices_on_edge))
+
+ explorer_edges.Next()
+
+ return vertex_map, edges
+
+
+def create_nx_graph(vertex_map, edges):
+ graph = nx.Graph()
+
+ for coord, node_id in vertex_map.items():
+ graph.add_node(node_id, coord=coord)
+
+ for edge in edges:
+ graph.add_edge(edge[0], edge[1])
+
+ return graph
+
+
+def calculate_cyclomatic_complexity(graph):
+ num_nodes = graph.number_of_nodes() # N
+ num_edges = graph.number_of_edges() # E
+ if graph.is_directed():
+ num_components = nx.number_strongly_connected_components(graph)
+ else:
+ num_components = nx.number_connected_components(graph)
+ # M = E - N + 2P
+ cyclomatic_complexity = num_edges - num_nodes + 2 * num_components
+ return cyclomatic_complexity
+
+
+def eval_complexity_one(step_file_path):
+ isvalid, shape = check_step_valid_soild(step_file_path, return_shape=True)
+ if not isvalid:
+ return None
+
+ vertex_map, edges = extract_edges_and_vertices(shape)
+ graph = create_nx_graph(vertex_map, edges)
+ cyclomatic_complexity = calculate_cyclomatic_complexity(graph)
+
+ face_list = get_primitives(shape, TopAbs_FACE)
+ num_face = len(face_list)
+ num_edge = len(vertex_map.keys())
+ num_vertex = len(edges)
+
+ sample_point_curvature = []
+ num_samples = 256
+ for face in face_list:
+ surf_adaptor = BRepAdaptor_Surface(face)
+ u_min, u_max, v_min, v_max = (surf_adaptor.FirstUParameter(), surf_adaptor.LastUParameter(), surf_adaptor.FirstVParameter(),
+ surf_adaptor.LastVParameter())
+
+ u_samples = np.linspace(u_min, u_max, int(np.sqrt(num_samples)))
+ v_samples = np.linspace(v_min, v_max, int(np.sqrt(num_samples)))
+
+ face_sample_point_curvature = []
+ for u in u_samples:
+ for v in v_samples:
+ props = BRepLProp_SLProps(surf_adaptor, u, v, 2, 1e-8)
+ if props.IsCurvatureDefined():
+ mean_curvature = props.MeanCurvature()
+ face_sample_point_curvature.append(abs(mean_curvature))
+ face_sample_point_curvature = remove_outliers_zscore(face_sample_point_curvature)
+ if len(face_sample_point_curvature) == 0:
+ continue
+ sample_point_curvature.append(np.median(face_sample_point_curvature))
+
+ mean_curvature = np.mean(sample_point_curvature) if len(sample_point_curvature) > 0 else np.nan
+
+ if num_face == 0 or mean_curvature == np.nan:
+ return None
+
+ return {
+ 'num_face' : int(num_face),
+ 'num_edge' : int(num_edge),
+ 'num_vertex' : int(num_vertex),
+ 'cyclomatic_complexity': cyclomatic_complexity,
+ 'mean_curvature' : mean_curvature,
+ }
+
+
+eval_complexity_one_remote = ray.remote(eval_complexity_one)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Evaluate Brep Complexity')
+ parser.add_argument('--eval_root', type=str)
+ parser.add_argument('--only_valid', action='store_true')
+ args = parser.parse_args()
+
+ # 设置随机种子
+ seed_everything(0)
+ ray.init(ignore_reinit_error=True, local_mode=False)
+
+ all_folders = os.listdir(args.eval_root)
+ is_valid_list = []
+ futures = []
+ for folder in tqdm(all_folders):
+ step_path_list = glob.glob(os.path.join(args.eval_root, folder, '*.step'))
+ if len(step_path_list) == 0:
+ is_valid_list.append(False)
+ futures.append(None)
+ continue
+ is_valid_list.append(check_step_valid_soild(step_path_list[0], return_shape=False))
+ futures.append(eval_complexity_one_remote.remote(step_path_list[0]))
+
+ assert len(is_valid_list) == len(futures) == len(all_folders)
+ all_result = {}
+ for i, future in enumerate(tqdm(futures)):
+ if future is None:
+ continue
+ result = ray.get(future)
+ if args.only_valid and not is_valid_list[i]:
+ continue
+ all_result[all_folders[i]] = result
+
+ num_face_list = []
+ num_edge_list = []
+ num_vertex_list = []
+ cyclomatic_complexity_list = []
+ mean_curvature_list = []
+ exception_folder = []
+
+ for folder, result in tqdm(all_result.items()):
+ if result is None:
+ continue
+ result = dict(result)
+ num_face_list.append(result['num_face'])
+ num_edge_list.append(result['num_edge'])
+ num_vertex_list.append(result['num_vertex'])
+ cyclomatic_complexity_list.append(result['cyclomatic_complexity'])
+ mean_curvature_list.append(result['mean_curvature'])
+ exception_folder.append(folder)
+
+ print(f'Num Face: {np.mean(num_face_list)}')
+ print(f'Num Edge: {np.mean(num_edge_list)}')
+ print(f'Num Vertex: {np.mean(num_vertex_list)}')
+ print(f'Cyclomatic Complexity: {np.mean(cyclomatic_complexity_list)}')
+ print(f'Mean Curvature: {np.mean(mean_curvature_list)}')
+ print(f"{np.mean(num_face_list)} {np.mean(num_edge_list)} {np.mean(num_vertex_list)} "
+ f"{np.mean(cyclomatic_complexity_list)} {np.mean(mean_curvature_list)}")
+ ray.shutdown()
diff --git a/eval/eval_cond.sh b/eval/eval_cond.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c1d11247beaa183cecd4ac97b828d1c12d08265c
--- /dev/null
+++ b/eval/eval_cond.sh
@@ -0,0 +1,13 @@
+if [ -z "$TYPE" ]; then
+ echo "Error: 'CONDITION' variable is not set."
+ exit 1
+fi
+
+# Eval
+python -m eval.eval_condition \
+ --eval_root ./outputs/${TYPE}_post \
+ --gt_root ./data/organized_data/ \
+ --list ./data/data_index/deduplicated_deepcad_testing_7_30.txt \
+ --use_ray \
+ --from_scratch \
+ --num_cpus 24
diff --git a/eval/eval_condition.py b/eval/eval_condition.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4639311f1b060efa7356d07b2276a397e8979d6
--- /dev/null
+++ b/eval/eval_condition.py
@@ -0,0 +1,479 @@
+import time, os, random, traceback, sys
+from pathlib import Path
+
+import matplotlib.pyplot as plt
+import torch
+import numpy as np
+
+from OCC.Core.BRepAdaptor import BRepAdaptor_Curve
+from tqdm import tqdm
+import trimesh
+import argparse
+
+# import pandas as pd
+from chamferdist import ChamferDistance
+
+from OCC.Core.STEPControl import STEPControl_Reader
+from OCC.Core.TopExp import TopExp_Explorer
+from OCC.Core.TopAbs import TopAbs_VERTEX, TopAbs_EDGE, TopAbs_FACE
+from OCC.Core.BRep import BRep_Tool
+from OCC.Core.gp import gp_Pnt
+from OCC.Core.IFSelect import IFSelect_RetDone
+from OCC.Extend.DataExchange import read_step_file, write_step_file, write_stl_file
+from OCC.Core.BRepCheck import BRepCheck_Analyzer
+
+import ray
+import shutil
+
+from OCC.Core.TopoDS import TopoDS_Solid, TopoDS_Shell
+from OCC.Core.TopAbs import TopAbs_COMPOUND, TopAbs_SHELL, TopAbs_SOLID
+
+from diffusion.utils import get_primitives, get_triangulations, get_points_along_edge, get_curve_length
+from eval.check_valid import check_step_valid_soild
+
+
+def is_vertex_close(p1, p2, tol=1e-3):
+ return np.linalg.norm(np.array(p1) - np.array(p2)) < tol
+
+
+def compute_statistics(eval_root, v_only_valid, listfile):
+ all_folders = [folder for folder in os.listdir(eval_root) if os.path.isdir(os.path.join(eval_root, folder))]
+ if listfile != '':
+ valid_names = [item.strip() for item in open(listfile, 'r').readlines()]
+ all_folders = list(set(all_folders) & set(valid_names))
+ all_folders.sort()
+ exception_folders = []
+ results = {
+ "prefix": []
+ }
+ for folder_name in tqdm(all_folders):
+ if not os.path.exists(os.path.join(eval_root, folder_name, 'eval.npz')):
+ exception_folders.append(folder_name)
+ continue
+
+ item = np.load(os.path.join(eval_root, folder_name, 'eval.npz'), allow_pickle=True)['results'].item()
+ if item['num_recon_face'] == 1:
+ exception_folders.append(folder_name)
+ if v_only_valid:
+ continue
+
+ if v_only_valid and not os.path.exists(os.path.join(eval_root, folder_name, 'success.txt')):
+ continue
+
+ results["prefix"].append(folder_name)
+ for key in item:
+ if key not in results:
+ results[key] = []
+ results[key].append(item[key])
+
+ if len(exception_folders) != 0:
+ print(f"Found exception folders: {exception_folders}")
+
+ for key in results:
+ results[key] = np.array(results[key])
+
+ results_str = ""
+ results_str += "Number\n"
+ results_str += f"Vertices: {np.mean(results['num_recon_vertex'])}/{np.mean(results['num_gt_vertex'])}\n"
+ results_str += f"Edge: {np.mean(results['num_recon_edge'])}/{np.mean(results['num_gt_edge'])}\n"
+ results_str += f"Face: {np.mean(results['num_recon_face'])}/{np.mean(results['num_gt_face'])}\n"
+
+ results_str += "Chamfer\n"
+ results_str += f"Vertices: {np.mean(results['vertex_cd'])}\n"
+ results_str += f"Edge: {np.mean(results['edge_cd'])}\n"
+ results_str += f"Face: {np.mean(results['face_cd'])}\n"
+
+ results_str += "Detection\n"
+ results_str += f"Vertices: {np.mean(results['vertex_fscore'])}\n"
+ results_str += f"Edge: {np.mean(results['edge_fscore'])}\n"
+ results_str += f"Face: {np.mean(results['face_fscore'])}\n"
+
+ results_str += "Topology\n"
+ results_str += f"FE: {np.mean(results['fe_fscore'])}\n"
+ results_str += f"EV: {np.mean(results['ev_fscore'])}\n"
+
+ results_str += "Accuracy\n"
+ results_str += f"Vertices: {np.mean(results['vertex_acc_cd'])}\n"
+ results_str += f"Edge: {np.mean(results['edge_acc_cd'])}\n"
+ results_str += f"Face: {np.mean(results['face_acc_cd'])}\n"
+ results_str += f"FE: {np.mean(results['fe_pre'])}\n"
+ results_str += f"EV: {np.mean(results['ev_pre'])}\n"
+
+ results_str += "Completeness\n"
+ results_str += f"Vertices: {np.mean(results['vertex_com_cd'])}\n"
+ results_str += f"Edge: {np.mean(results['edge_com_cd'])}\n"
+ results_str += f"Face: {np.mean(results['face_com_cd'])}\n"
+ results_str += f"FE: {np.mean(results['fe_rec'])}\n"
+ results_str += f"EV: {np.mean(results['ev_rec'])}\n"
+ print(results_str)
+ print("{:.4f} {:.4f} {:.4f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(
+ np.mean(results['vertex_cd']), np.mean(results['edge_cd']), np.mean(results['face_cd']),
+ np.mean(results['vertex_fscore']), np.mean(results['edge_fscore']), np.mean(results['face_fscore']),
+ np.mean(results['fe_fscore']), np.mean(results['ev_fscore']),
+ ))
+ print(
+ "{:.0f}/{:.0f} {:.0f}/{:.0f} {:.0f}/{:.0f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}".format(
+ np.mean(results['num_recon_vertex']), np.mean(results['num_gt_vertex']),
+ np.mean(results['num_recon_edge']), np.mean(results['num_gt_edge']),
+ np.mean(results['num_recon_face']), np.mean(results['num_gt_face']),
+ np.mean(results['vertex_acc_cd']), np.mean(results['edge_acc_cd']), np.mean(results['face_acc_cd']),
+ np.mean(results['vertex_com_cd']), np.mean(results['edge_com_cd']), np.mean(results['face_com_cd']),
+ np.mean(results['vertex_pre']), np.mean(results['edge_pre']), np.mean(results['face_pre']),
+ np.mean(results['fe_pre']), np.mean(results['ev_pre']),
+ np.mean(results['vertex_rec']), np.mean(results['edge_rec']), np.mean(results['face_rec']),
+ np.mean(results['fe_rec']), np.mean(results['ev_rec'])
+ ))
+ # print(f"{len(all_folders)-len(exception_folders)}/{len(all_folders)} are valid")
+ print(f"{results['face_cd'].shape[0]}/{len(all_folders)} are valid")
+
+ def draw():
+ face_chamfer = results['face_cd']
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6))
+ ax.hist(face_chamfer, bins=50, range=(0, 0.05), density=True, alpha=0.5, color='b', label='Face')
+ ax.set_title('Face Chamfer Distance')
+ ax.set_xlabel('Chamfer Distance')
+ ax.set_ylabel('Density')
+ ax.legend()
+ plt.savefig(str(eval_root) + "_face_chamfer.png", dpi=600)
+ # plt.show()
+
+ draw()
+ pass
+
+
+def get_data(v_shape, v_num_per_m=100):
+ faces, face_points, edges, edge_points, vertices, vertex_points = [], [], [], [], [], []
+ for face in get_primitives(v_shape, TopAbs_FACE, v_remove_half=True):
+ try:
+ v, f = get_triangulations(face, 0.1, 0.1)
+ if len(f) == 0:
+ print("Ignore 0 face")
+ continue
+ except:
+ print("Ignore 1 face")
+ continue
+ mesh_item = trimesh.Trimesh(vertices=v, faces=f)
+ area = mesh_item.area
+ num_samples = min(max(int(v_num_per_m * v_num_per_m * area), 5), 10000)
+ pc_item, id_face = trimesh.sample.sample_surface(mesh_item, num_samples)
+ normals = mesh_item.face_normals[id_face]
+ faces.append(face)
+ face_points.append(np.concatenate((pc_item, normals), axis=1))
+ for edge in get_primitives(v_shape, TopAbs_EDGE, v_remove_half=True):
+ length = get_curve_length(edge)
+ num_samples = min(max(int(v_num_per_m * length), 5), 10000)
+ v = get_points_along_edge(edge, num_samples)
+ edges.append(edge)
+ edge_points.append(v)
+ for vertex in get_primitives(v_shape, TopAbs_VERTEX, v_remove_half=True):
+ vertices.append(vertex)
+ vertex_points.append(np.asarray([BRep_Tool.Pnt(vertex).Coord()]))
+ vertex_points = np.stack(vertex_points, axis=0)
+ return faces, face_points, edges, edge_points, vertices, vertex_points
+
+
+def get_chamfer(v_recon_points, v_gt_points):
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
+ chamfer_distance = ChamferDistance()
+ recon_fp = torch.from_numpy(np.concatenate(v_recon_points, axis=0)).float().to(device)[:, :3]
+ gt_fp = torch.from_numpy(np.concatenate(v_gt_points, axis=0)).float().to(device)[:, :3]
+ fp_acc_cd = chamfer_distance(recon_fp.unsqueeze(0), gt_fp.unsqueeze(0),
+ bidirectional=False, point_reduction='mean').cpu().item()
+ fp_com_cd = chamfer_distance(gt_fp.unsqueeze(0), recon_fp.unsqueeze(0),
+ bidirectional=False, point_reduction='mean').cpu().item()
+ fp_cd = fp_acc_cd + fp_com_cd
+ return fp_acc_cd, fp_com_cd, fp_cd
+
+
+def get_match_ids(v_recon_points, v_gt_points):
+ from scipy.optimize import linear_sum_assignment
+
+ cost = np.zeros([len(v_recon_points), len(v_gt_points)]) # recon to gt
+ for i in range(cost.shape[0]):
+ for j in range(cost.shape[1]):
+ _, _, cost[i][j] = get_chamfer(
+ v_recon_points[i][..., :3][None, ..., :3],
+ v_gt_points[j][..., :3][None, ..., :3]
+ )
+
+ recon_indices, recon_to_gt = linear_sum_assignment(cost)
+
+ result_recon2gt = -1 * np.ones(len(v_recon_points), dtype=np.int32)
+ result_gt2recon = -1 * np.ones(len(v_gt_points), dtype=np.int32)
+
+ result_recon2gt[recon_indices] = recon_to_gt
+ result_gt2recon[recon_to_gt] = recon_indices
+ return result_recon2gt, result_gt2recon, cost
+
+
+def get_detection(id_recon_gt, id_gt_recon, cost_matrix, v_threshold=0.1):
+ true_positive = 0
+ for i in range(len(id_recon_gt)):
+ if id_recon_gt[i] != -1 and cost_matrix[i, id_recon_gt[i]] < v_threshold:
+ true_positive += 1
+ precision = true_positive / (len(id_recon_gt) + 1e-6)
+ recall = true_positive / (len(id_gt_recon) + 1e-6)
+ return 2 * precision * recall / (precision + recall + 1e-6), precision, recall
+
+
+def get_topology(faces, edges, vertices):
+ recon_face_edge, recon_edge_vertex = {}, {}
+ for i_face, face in enumerate(faces):
+ face_edge = []
+ for edge in get_primitives(face, TopAbs_EDGE):
+ face_edge.append(edges.index(edge) if edge in edges else edges.index(edge.Reversed()))
+ recon_face_edge[i_face] = list(set(face_edge))
+
+ for i_edge, edge in enumerate(edges):
+ edge_vertex = []
+ for vertex in get_primitives(edge, TopAbs_VERTEX):
+ edge_vertex.append(vertices.index(vertex) if vertex in vertices else vertices.index(vertex.Reversed()))
+ recon_edge_vertex[i_edge] = list(set(edge_vertex))
+ return recon_face_edge, recon_edge_vertex
+
+
+def get_topo_detection(recon_face_edge, gt_face_edge, id_recon_gt_face, id_recon_gt_edge):
+ positive = 0
+ for i_recon_face, edges in recon_face_edge.items():
+ i_gt_face = id_recon_gt_face[i_recon_face]
+ if i_gt_face == -1:
+ continue
+ for i_edge in edges:
+ if id_recon_gt_edge[i_edge] in gt_face_edge[i_gt_face]:
+ positive += 1
+ precision = positive / (sum([len(edges) for edges in recon_face_edge.values()]) + 1e-6)
+ recall = positive / (sum([len(edges) for edges in gt_face_edge.values()]) + 1e-6)
+ return 2 * precision * recall / (precision + recall + 1e-6), precision, recall
+
+def eval_one_with_try(eval_root, gt_root, folder_name, is_point2cad=False, v_num_per_m=100):
+ try:
+ eval_one(eval_root, gt_root, folder_name, is_point2cad, v_num_per_m)
+ except:
+ pass
+
+def eval_one(eval_root, gt_root, folder_name, is_point2cad=False, v_num_per_m=100):
+ if os.path.exists(eval_root / folder_name / 'error.txt'):
+ os.remove(eval_root / folder_name / 'error.txt')
+ if os.path.exists(eval_root / folder_name / 'eval.npz'):
+ os.remove(eval_root / folder_name / 'eval.npz')
+
+ # At least have fall_back_mesh
+ step_name = "recon_brep.step"
+
+ if is_point2cad:
+ if not (eval_root / folder_name / "clipped/mesh_transformed.ply").exists():
+ print(f"Error: {folder_name} does not have mesh_transformed")
+ return
+ mesh = trimesh.load(eval_root / folder_name / "clipped/mesh_transformed.ply")
+ color = np.stack((
+ [item[1] for item in mesh.metadata['_ply_raw']['face']['data']],
+ [item[2] for item in mesh.metadata['_ply_raw']['face']['data']],
+ [item[3] for item in mesh.metadata['_ply_raw']['face']['data']],
+ ), axis=1)
+ color_map = [list(map(lambda item:int(item),item.strip().split(" "))) for item in open("src/brepnet/eval/point2cad_color.txt").readlines()]
+ index = np.asarray([color_map.index(item.tolist()) for item in color])
+ recon_face_points = [None]*(index.max()+1)
+ for i in range(index.max() + 1):
+ item_faces = mesh.faces[index == i]
+ item_mesh = trimesh.Trimesh(vertices=mesh.vertices, faces=item_faces)
+ num_samples = min(max(int(item_mesh.area * v_num_per_m * v_num_per_m), 5), 10000)
+ pc_item, id_face = trimesh.sample.sample_surface(item_mesh, num_samples)
+ normals = item_mesh.face_normals[id_face]
+ recon_face_points[i] = np.concatenate((pc_item, normals), axis=1)
+
+ if not (eval_root / folder_name / "clipped/curve_points.xyzc").exists():
+ print(f"Error: {folder_name} does not have curve_points")
+ return
+ curve_points = np.asarray([list(map(lambda item: float(item),item.strip().split(" "))) for item in open(eval_root / folder_name / "clipped/curve_points.xyzc").readlines()])
+ num_curves = int(curve_points.max(axis=0)[3]) + 1
+ recon_edge_points = [None]*num_curves
+ for i in range(num_curves):
+ item_points = curve_points[curve_points[:,3] == i][:,:3]
+ recon_edge_points[i] = item_points
+
+ if (eval_root / folder_name / "clipped/remove_duplicates_corners.ply").exists():
+ pc = trimesh.load(eval_root / folder_name / "clipped/remove_duplicates_corners.ply")
+ recon_vertex_points = pc.vertices[:,None]
+ else:
+ recon_vertex_points = np.asarray((0,0,0), dtype=np.float32)[None,None]
+
+ recon_face_edge = {}
+ recon_edge_vertex = {}
+ EV_mode = False
+ for items in open(eval_root / folder_name / 'topo/topo_fix.txt', 'r').readlines():
+ items = items.strip().split(" ")
+ if items[0] == "EV":
+ EV_mode = True
+ continue
+ if len(items) == 1:
+ continue
+ if not EV_mode:
+ recon_face_edge[int(items[0])] = list(map(lambda item: int(item), items[1:]))
+ else:
+ recon_edge_vertex[int(items[0])] = list(map(lambda item: int(item), items[1:]))
+ pass
+ else:
+ try:
+ # Face chamfer distance
+ if (eval_root / folder_name / step_name).exists():
+ valid, recon_shape = check_step_valid_soild(eval_root / folder_name / step_name, return_shape=True)
+ else:
+ print(f"Error: {folder_name} does not have {step_name}")
+ raise
+ if recon_shape is None:
+ print(f"Error: {folder_name} 's {step_name} is not valid")
+ raise
+
+ # Get data
+ recon_faces, recon_face_points, recon_edges, recon_edge_points, recon_vertices, recon_vertex_points = get_data(
+ recon_shape, v_num_per_m)
+
+ # Topology
+ recon_face_edge, recon_edge_vertex = get_topology(recon_faces, recon_edges, recon_vertices)
+ except:
+ recon_face_points = [np.zeros((1, 6), dtype=np.float32)]
+ recon_edge_points = [np.zeros((1, 6), dtype=np.float32)]
+ recon_vertex_points = [np.zeros((1, 3), dtype=np.float32)]
+ recon_face_edge = {}
+ recon_edge_vertex = {}
+
+ # GT
+ _, gt_shape = check_step_valid_soild(gt_root / folder_name / "normalized_shape.step", return_shape=True)
+ gt_faces, gt_face_points, gt_edges, gt_edge_points, gt_vertices, gt_vertex_points = get_data(gt_shape, v_num_per_m)
+ gt_face_edge, gt_edge_vertex = get_topology(gt_faces, gt_edges, gt_vertices)
+
+ # Chamfer
+ face_acc_cd, face_com_cd, face_cd = get_chamfer(recon_face_points, gt_face_points)
+ edge_acc_cd, edge_com_cd, edge_cd = get_chamfer(recon_edge_points, gt_edge_points)
+ vertex_acc_cd, vertex_com_cd, vertex_cd = get_chamfer(recon_vertex_points, gt_vertex_points)
+
+ # Detection
+ id_recon_gt_face, id_gt_recon_face, cost_face = get_match_ids(recon_face_points, gt_face_points)
+ id_recon_gt_edge, id_gt_recon_edge, cost_edge = get_match_ids(recon_edge_points, gt_edge_points)
+ id_recon_gt_vertex, id_gt_recon_vertex, cost_vertices = get_match_ids(recon_vertex_points, gt_vertex_points)
+
+ face_fscore, face_pre, face_rec = get_detection(id_recon_gt_face, id_gt_recon_face, cost_face)
+ edge_fscore, edge_pre, edge_rec = get_detection(id_recon_gt_edge, id_gt_recon_edge, cost_edge)
+ vertex_fscore, vertex_pre, vertex_rec = get_detection(id_recon_gt_vertex, id_gt_recon_vertex, cost_vertices)
+
+ fe_fscore, fe_pre, fe_rec = get_topo_detection(recon_face_edge, gt_face_edge, id_recon_gt_face, id_recon_gt_edge)
+ ev_fscore, ev_pre, ev_rec = get_topo_detection(recon_edge_vertex, gt_edge_vertex, id_recon_gt_edge,
+ id_recon_gt_vertex)
+
+ results = {
+ "face_cd": face_cd,
+ "edge_cd": edge_cd,
+ "vertex_cd": vertex_cd,
+
+ "face_fscore": face_fscore,
+ "edge_fscore": edge_fscore,
+ "vertex_fscore": vertex_fscore,
+ "fe_fscore": fe_fscore,
+ "ev_fscore": ev_fscore,
+
+ "face_acc_cd": face_acc_cd,
+ "edge_acc_cd": edge_acc_cd,
+ "vertex_acc_cd": vertex_acc_cd,
+
+ "face_com_cd": face_com_cd,
+ "edge_com_cd": edge_com_cd,
+ "vertex_com_cd": vertex_com_cd,
+
+ "fe_pre": fe_pre,
+ "ev_pre": ev_pre,
+ "fe_rec": fe_rec,
+ "ev_rec": ev_rec,
+
+ "vertex_pre": vertex_pre,
+ "edge_pre": edge_pre,
+ "face_pre": face_pre,
+
+ "vertex_rec": vertex_rec,
+ "edge_rec": edge_rec,
+ "face_rec": face_rec,
+
+ "num_recon_face": len(recon_face_points),
+ "num_gt_face": len(gt_face_points),
+ "num_recon_edge": len(recon_edge_points),
+ "num_gt_edge": len(gt_edge_points),
+ "num_recon_vertex": len(recon_vertex_points),
+ "num_gt_vertex": len(gt_vertex_points),
+ }
+ np.savez_compressed(eval_root / folder_name / 'eval.npz', results=results, allow_pickle=True)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Evaluate The Generated Brep')
+ parser.add_argument('--eval_root', type=str, required=True)
+ parser.add_argument('--gt_root', type=str, required=True)
+ parser.add_argument('--use_ray', action='store_true')
+ parser.add_argument('--num_cpus', type=int, default=16)
+ parser.add_argument('--prefix', type=str, default='')
+ parser.add_argument('--list', type=str, default='')
+ parser.add_argument('--from_scratch', action='store_true')
+ parser.add_argument('--is_point2cad', action='store_true')
+ parser.add_argument('--only_valid', action='store_true')
+ args = parser.parse_args()
+ eval_root = Path(args.eval_root)
+ gt_root = Path(args.gt_root)
+ is_use_ray = args.use_ray
+ num_cpus = args.num_cpus
+ listfile = args.list
+ from_scratch = args.from_scratch
+ is_point2cad = args.is_point2cad
+ only_valid = args.only_valid
+
+ if not os.path.exists(eval_root):
+ raise ValueError(f"Data root path {eval_root} does not exist.")
+ if not os.path.exists(gt_root):
+ raise ValueError(f"Output root path {gt_root} does not exist.")
+
+ if args.prefix != '':
+ eval_one(eval_root, gt_root, args.prefix, is_point2cad)
+ exit()
+
+ all_folders = [folder for folder in os.listdir(eval_root) if os.path.isdir(eval_root / folder)]
+ ori_length = len(all_folders)
+ if listfile != '':
+ valid_names = [item.strip() for item in open(listfile, 'r').readlines()]
+ all_folders = list(set(all_folders) & set(valid_names))
+ all_folders.sort()
+ print(f"Total {len(all_folders)}/{ori_length} folders to evaluate")
+
+ if not from_scratch:
+ print("Filtering the folders that have eval.npz")
+ all_folders = [folder for folder in all_folders if not os.path.exists(eval_root / folder / 'eval.npz')]
+ print(f"Total {len(all_folders)} folders to compute after caching")
+
+ if not is_use_ray:
+ # random.shuffle(self.folder_names)
+ for i in tqdm(range(len(all_folders))):
+ eval_one(eval_root, gt_root, all_folders[i], is_point2cad)
+ else:
+ ray.init(
+ dashboard_host="0.0.0.0",
+ dashboard_port=8080,
+ num_cpus=num_cpus,
+ # local_mode=True
+ )
+ eval_one_remote = ray.remote(max_retries=0)(eval_one_with_try)
+ tasks = []
+ timeout_cancel_list = []
+ for i in range(len(all_folders)):
+ tasks.append(eval_one_remote.remote(eval_root, gt_root, all_folders[i], is_point2cad))
+ results = []
+ for i in tqdm(range(len(all_folders))):
+ try:
+ results.append(ray.get(tasks[i], timeout=60 * 3))
+ except ray.exceptions.GetTimeoutError:
+ results.append(None)
+ timeout_cancel_list.append(all_folders[i])
+ ray.cancel(tasks[i])
+ except:
+ results.append(None)
+ results = [item for item in results if item is not None]
+ print(f"Cancel for timeout: {timeout_cancel_list}")
+
+ print("Computing statistics...")
+ compute_statistics(eval_root, only_valid, listfile)
+ print("Done")
diff --git a/eval/eval_lfd.sh b/eval/eval_lfd.sh
new file mode 100644
index 0000000000000000000000000000000000000000..930fb0fec0d33eb86141dc0222faa31a82ce721a
--- /dev/null
+++ b/eval/eval_lfd.sh
@@ -0,0 +1,11 @@
+if [ -z "$TYPE" ]; then
+ echo "Error: 'CONDITION' variable is not set."
+ exit 1
+fi
+
+cd ./eval/lfd/evaluation_scripts/compute_lfd_feat
+python -m compute_lfd_feat_multiprocess --gen_path ../../../../outputs/${TYPE}_post --save_path ../../../../outputs/${TYPE}_lfd_feat --prefix recon_brep.stl
+cd ..
+python -m compute_lfd --dataset_path ../../../data/data_lfd_feat --gen_path ../../../outputs/${TYPE}_lfd_feat --save_name ../../../outputs/${TYPE}_lfd.pkl --num_workers 8 --list ../../../data/data_index/deduplicated_deepcad_training_7_30.txt
+cd ../../..
+python -m eval.viz_lfd ./outputs/${TYPE}_lfd.pkl ./outputs/${TYPE}_lfd.png ./outputs/${TYPE}_post
\ No newline at end of file
diff --git a/eval/eval_pc_set.py b/eval/eval_pc_set.py
new file mode 100644
index 0000000000000000000000000000000000000000..1df8d48941b32c9d3d146e04a2a8911626093034
--- /dev/null
+++ b/eval/eval_pc_set.py
@@ -0,0 +1,44 @@
+import numpy as np
+
+
+def evaluate_uniformity_nnd(points):
+ """
+ Evaluate point cloud uniformity using Nearest Neighbor Distance (NND)
+ Args:
+ points: numpy array of shape (N, 3)
+ Returns:
+ dict containing NND statistics
+ """
+ # 1. 计算每个点到其最近邻的距离
+ diff = points[:, None, :] - points[None, :, :] # (N, N, 3)
+ distances = np.sqrt(np.sum(diff * diff, axis=-1)) # (N, N)
+
+ # 将自身距离设为无穷大
+ np.fill_diagonal(distances, np.inf)
+
+ # 获取每个点的最近邻距离
+ min_distances = np.min(distances, axis=1) # (N,)
+
+ # 2. 计算统计指标
+ metrics = {
+ 'mean_nnd': np.mean(min_distances),
+ 'std_nnd' : np.std(min_distances),
+ 'cv_nnd' : np.std(min_distances) / np.mean(min_distances), # 变异系数
+ 'min_nnd' : np.min(min_distances),
+ 'max_nnd' : np.max(min_distances),
+
+ # Clark-Evans R统计量: R = 实际平均最近邻距离 / 期望平均最近邻距离
+ # R接近1表示随机分布,R<1表示聚集,R>1表示均匀
+ 'density' : len(points) / np.prod(np.max(points, axis=0) - np.min(points, axis=0)),
+ }
+
+ # 计算Clark-Evans R统计量
+ expected_mean_dist = 0.5 / np.sqrt(metrics['density'])
+ metrics['clark_evans_r'] = metrics['mean_nnd'] / expected_mean_dist
+
+ # 3. 计算直方图数据(可用于可视化)
+ hist, bins = np.histogram(min_distances, bins='auto', density=True)
+ metrics['hist_values'] = hist
+ metrics['hist_bins'] = bins
+
+ return metrics
diff --git a/eval/eval_uncond.sh b/eval/eval_uncond.sh
new file mode 100644
index 0000000000000000000000000000000000000000..52aab0e713b74f9c008c1ece23ec42ad6933d24b
--- /dev/null
+++ b/eval/eval_uncond.sh
@@ -0,0 +1,15 @@
+# Eval
+python -m eval.sample_points --data_root ./outputs/unconditional_post --out_root ./outputs/unconditional_pcd --valid;
+python -m eval.eval_brepgen --real ./data/organized_data --fake ./outputs/unconditional_pcd;
+python -m eval.eval_complexity --eval_root ./outputs/unconditional_post --only_valid;
+python -m eval.eval_condition \
+ --eval_root ./outputs/unconditional_post \
+ --gt_root ./data/organized_data/ \
+ --list ./data/data_index/deduplicated_deepcad_testing_7_30.txt \
+ --num_cpus 24 \
+ --use_ray \
+ --from_scratch \
+ --only_valid
+
+# Validness
+python -m eval.check_valid --data_root ./outputs/unconditional_post
\ No newline at end of file
diff --git a/eval/eval_unique_novel.py b/eval/eval_unique_novel.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddabd693b0170afb99b85d6ee4e4ea3b64e0aad9
--- /dev/null
+++ b/eval/eval_unique_novel.py
@@ -0,0 +1,395 @@
+import multiprocessing
+
+import networkx as nx
+import numpy as np
+import argparse
+import os
+
+import trimesh
+from tqdm import tqdm
+import ray
+
+from check_valid import check_step_valid_soild, load_data_with_prefix
+from eval_brepgen import normalize_pc
+
+
+def real2bit(data, n_bits=8, min_range=-1, max_range=1):
+ """Convert vertices in [-1., 1.] to discrete values in [0, n_bits**2 - 1]."""
+ range_quantize = 2 ** n_bits - 1
+ data_quantize = (data - min_range) * range_quantize / (max_range - min_range)
+ data_quantize = np.clip(data_quantize, a_min=0, a_max=range_quantize) # clip values
+ return data_quantize.astype(int)
+
+
+def build_graph(faces, faces_adj, n_bit=4):
+ # faces1 and faces2 are np.array of shape (n_faces, n_points, n_points, 3)
+ # faces_adj1 and faces_adj2 are lists of (face_idx, face_idx) adjacency, ex. [[0, 1], [1, 2]]
+ if n_bit < 0:
+ faces_bits = faces
+ else:
+ faces_bits = real2bit(faces, n_bits=n_bit)
+ """Build a graph from a shape."""
+ G = nx.Graph()
+ for face_idx, face_bit in enumerate(faces_bits):
+ G.add_node(face_idx, shape_geometry=face_bit)
+ for pair in faces_adj:
+ G.add_edge(pair[0], pair[1])
+ return G
+
+
+def is_graph_identical(graph1, graph2, atol=None):
+ """Check if two shapes are identical."""
+ # Check if the two graphs are isomorphic considering node attributes
+ if atol is None:
+ return nx.is_isomorphic(
+ graph1, graph2,
+ node_match=lambda n1, n2: np.array_equal(n1['shape_geometry'], n2['shape_geometry'])
+ )
+ else:
+ return nx.is_isomorphic(
+ graph1, graph2,
+ node_match=lambda n1, n2: np.allclose(n1['shape_geometry'], n2['shape_geometry'], atol=atol, rtol=0)
+ )
+
+
+def is_graph_identical_batch(graph_pair_list, atol=None):
+ is_identical_list = []
+ for graph1, graph2 in graph_pair_list:
+ is_identical = is_graph_identical(graph1, graph2, atol=atol)
+ is_identical_list.append(is_identical)
+ return is_identical_list
+
+
+is_graph_identical_remote = ray.remote(is_graph_identical_batch)
+
+
+def find_connected_components(matrix):
+ N = len(matrix)
+ visited = [False] * N
+ components = []
+
+ def dfs(idx, component):
+ stack = [idx]
+ while stack:
+ node = stack.pop()
+ if not visited[node]:
+ visited[node] = True
+ component.append(node)
+ for neighbor in range(N):
+ if matrix[node][neighbor] and not visited[neighbor]:
+ stack.append(neighbor)
+
+ for i in range(N):
+ if not visited[i]:
+ component = []
+ dfs(i, component)
+ components.append(component)
+
+ return components
+
+
+def compute_gen_unique(graph_list, is_use_ray=False, batch_size=100000, atol=None):
+ N = len(graph_list)
+ unique_graph_idx = list(range(N))
+ pair_0, pair_1 = np.triu_indices(N, k=1)
+ check_pairs = list(zip(pair_0, pair_1))
+ deduplicate_matrix = np.zeros((N, N), dtype=bool)
+
+ if not is_use_ray:
+ for idx1, idx2 in tqdm(check_pairs):
+ is_identical = is_graph_identical(graph_list[idx1], graph_list[idx2], atol=atol)
+ if is_identical:
+ unique_graph_idx.remove(idx2) if idx2 in unique_graph_idx else None
+ deduplicate_matrix[idx1, idx2] = True
+ deduplicate_matrix[idx2, idx1] = True
+ else:
+ ray.init()
+ N_batch = len(check_pairs) // batch_size
+ futures = []
+ for i in tqdm(range(N_batch)):
+ batch_pairs = check_pairs[i * batch_size: (i + 1) * batch_size]
+ batch_graph_pair = [(graph_list[idx1], graph_list[idx2]) for idx1, idx2 in batch_pairs]
+ futures.append(is_graph_identical_remote.remote(batch_graph_pair, atol))
+ results = ray.get(futures)
+
+ for batch_idx in tqdm(range(N_batch)):
+ for idx, is_identical in enumerate(results[batch_idx]):
+ if not is_identical:
+ continue
+ idx1, idx2 = check_pairs[batch_idx * batch_size + idx]
+ deduplicate_matrix[idx1, idx2] = True
+ deduplicate_matrix[idx2, idx1] = True
+ if idx2 in unique_graph_idx:
+ unique_graph_idx.remove(idx2)
+ ray.shutdown()
+
+ unique = len(unique_graph_idx)
+ print(f"Unique: {unique}/{N}")
+ unique_ratio = unique / N
+
+ return unique_ratio, deduplicate_matrix
+
+
+def compute_gen_novel_bk(gen_graph_list, train_graph_list, is_use_ray=False, batch_size=100000):
+ M, N = len(gen_graph_list), len(train_graph_list)
+ deduplicate_matrix = np.zeros((M, N), dtype=bool)
+ pair_0, pair_1 = np.triu_indices_from(deduplicate_matrix, k=1)
+ check_pairs = list(zip(pair_0, pair_1))
+ non_novel_graph_idx = np.zeros(M, dtype=bool)
+
+ if not is_use_ray:
+ for idx1, idx2 in tqdm(check_pairs):
+ if non_novel_graph_idx[idx1]:
+ continue
+ is_identical = is_graph_identical(gen_graph_list[idx1], train_graph_list[idx2])
+ if is_identical:
+ non_novel_graph_idx[idx1] = True
+ deduplicate_matrix[idx1, idx2] = True
+ else:
+ ray.init()
+ N_batch = len(check_pairs) // batch_size
+ futures = []
+ for i in tqdm(range(N_batch)):
+ batch_pairs = check_pairs[i * batch_size: (i + 1) * batch_size]
+ batch_graph_pair = [(gen_graph_list[idx1], train_graph_list[idx2]) for idx1, idx2 in batch_pairs]
+ futures.append(is_graph_identical_remote.remote(batch_graph_pair))
+ results = ray.get(futures)
+
+ for batch_idx in tqdm(range(N_batch)):
+ for idx, is_identical in enumerate(results[batch_idx]):
+ if not is_identical:
+ continue
+ idx1, idx2 = check_pairs[batch_idx * batch_size + idx]
+ deduplicate_matrix[idx1, idx2] = True
+ non_novel_graph_idx[idx1] = True
+ ray.shutdown()
+
+ novel = M - np.sum(non_novel_graph_idx)
+ print(f"Novel: {novel}/{M}")
+ novel_ratio = novel / M
+ return novel_ratio, deduplicate_matrix
+
+
+def is_graph_identical_list(graph1, graph2_path_list):
+ """Check if two shapes are identical."""
+ # Check if the two graphs are isomorphic considering node attributes
+ graph2_list, graph2_prefix_list = load_and_build_graph(graph2_path_list)
+ for graph2 in graph2_list:
+ if nx.is_isomorphic(graph1, graph2,
+ node_match=lambda n1, n2: np.array_equal(n1['shape_geometry'], n2['shape_geometry'])):
+ return True
+ return False
+
+
+is_graph_identical_list_remote = ray.remote(is_graph_identical_list)
+
+
+def test_check():
+ sample = np.random.rand(3, 32, 32, 3)
+ face1 = sample[[0, 1, 2]]
+ face2 = sample[[0, 2, 1]]
+ faces_adj1 = [[0, 1]]
+ faces_adj2 = [[0, 2]]
+
+ graph1 = build_graph(face1, faces_adj1)
+ graph2 = build_graph(face2, faces_adj2)
+
+ is_identical = is_graph_identical(graph1, graph2)
+ # 判断图是否相等
+ print("Graphs are equal" if is_identical else "Graphs are not equal")
+
+
+def load_data_from_npz(data_npz_file):
+ data_npz = np.load(data_npz_file, allow_pickle=True)
+ data_npz1 = np.load(data_npz_file.replace("deepcad_32", "deepcad_train_v6"), allow_pickle=True)
+ # Brepgen
+ if 'face_edge_adj' in data_npz:
+ faces = data_npz['pred_face']
+ face_edge_adj = data_npz['face_edge_adj']
+ faces_adj_pair = []
+ N = face_edge_adj.shape[0]
+ for face_idx1 in range(N):
+ for face_idx2 in range(face_idx1 + 1, N):
+ face_edges1 = face_edge_adj[face_idx1]
+ face_edges2 = face_edge_adj[face_idx2]
+ if sorted((face_idx1, face_idx2)) in faces_adj_pair:
+ continue
+ if len(set(face_edges1).intersection(set(face_edges2))) > 0:
+ faces_adj_pair.append(sorted((face_idx1, face_idx2)))
+ return faces, faces_adj_pair
+ # Ours
+ if 'sample_points_faces' in data_npz:
+ face_points = data_npz['sample_points_faces'] # Face sample points (num_faces*20*20*3)
+ edge_face_connectivity = data_npz['edge_face_connectivity'] # (num_intersection, (id_edge, id_face1, id_face2))
+ elif 'pred_face' in data_npz and 'pred_edge_face_connectivity' in data_npz:
+ face_points = data_npz['pred_face']
+ edge_face_connectivity = data_npz['pred_edge_face_connectivity']
+ else:
+ raise ValueError("Invalid data format")
+ faces_adj_pair = []
+ for edge_idx, face_idx1, face_idx2 in edge_face_connectivity:
+ faces_adj_pair.append([face_idx1, face_idx2])
+ if face_points.shape[-1] != 3:
+ face_points = face_points[..., :3]
+
+ src_shape = face_points.shape
+ face_points = normalize_pc(face_points.reshape(-1, 3)).reshape(src_shape)
+ return face_points, faces_adj_pair
+
+
+def load_and_build_graph(data_npz_file_list, gen_post_data_root=None, n_bit=4):
+ gen_graph_list = []
+ prefix_list = []
+ for data_npz_file in data_npz_file_list:
+ folder_name = os.path.basename(os.path.dirname(data_npz_file))
+ if gen_post_data_root:
+ step_file_list = load_data_with_prefix(os.path.join(gen_post_data_root, folder_name), ".step")
+ if len(step_file_list) == 0:
+ continue
+ if not check_step_valid_soild(step_file_list[0]):
+ continue
+ prefix_list.append(folder_name)
+ faces, faces_adj_pair = load_data_from_npz(data_npz_file)
+ graph = build_graph(faces, faces_adj_pair, n_bit)
+ gen_graph_list.append(graph)
+ return gen_graph_list, prefix_list
+
+
+load_and_build_graph_remote = ray.remote(load_and_build_graph)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--fake_root", type=str, required=True)
+ parser.add_argument("--fake_post", type=str, required=True)
+ parser.add_argument("--train_root", type=str, required=False)
+ parser.add_argument("--n_bit", type=int, required=False)
+ parser.add_argument("--atol", type=float, required=False)
+ parser.add_argument("--use_ray", action='store_true')
+ parser.add_argument("--load_batch_size", type=int, default=400)
+ parser.add_argument("--compute_batch_size", type=int, default=200000)
+ parser.add_argument("--txt", type=str, default=None)
+ parser.add_argument("--num_cpus", type=int, default=32)
+ parser.add_argument("--min_face", type=int, required=False)
+ parser.add_argument("--only_unique", action='store_true')
+ args = parser.parse_args()
+ gen_data_root = args.fake_root
+ gen_post_data_root = args.fake_post
+ train_data_root = args.train_root
+ is_use_ray = args.use_ray
+ n_bit = args.n_bit
+ atol = args.atol
+ load_batch_size = args.load_batch_size
+ compute_batch_size = args.compute_batch_size
+ folder_list_txt = args.txt
+ num_cpus = args.num_cpus
+
+ if not n_bit and not atol:
+ raise ValueError("Must set either n_bit or atol")
+ if n_bit and atol:
+ raise ValueError("Cannot set both n_bit and atol")
+
+ if not args.only_unique and not train_data_root:
+ raise ValueError("Must set train_data_root when not only_unique")
+
+ if n_bit:
+ atol = None
+ if atol:
+ n_bit = -1
+
+ ################################################## Unqiue #######################################################
+ # Load all the generated data files
+ print("Loading generated data files...")
+ gen_data_npz_file_list = load_data_with_prefix(gen_data_root, 'data.npz')
+ if is_use_ray:
+ ray.init()
+ futures = []
+ gen_graph_list = []
+ gen_prefix_list = []
+ for i in tqdm(range(0, len(gen_data_npz_file_list), load_batch_size)):
+ batch_gen_data_npz_file_list = gen_data_npz_file_list[i: i + load_batch_size]
+ futures.append(load_and_build_graph_remote.remote(batch_gen_data_npz_file_list, gen_post_data_root, n_bit))
+ for future in tqdm(futures):
+ result = ray.get(future)
+ gen_graph_list_batch, gen_prefix_list_batch = result
+ gen_graph_list.extend(gen_graph_list_batch)
+ gen_prefix_list.extend(gen_prefix_list_batch)
+ ray.shutdown()
+ else:
+ gen_graph_list, gen_prefix_list = load_and_build_graph(gen_data_npz_file_list, gen_post_data_root, n_bit)
+ print(f"Loaded {len(gen_graph_list)} generated data files")
+
+ if args.min_face:
+ graph_node_num = [len(graph.nodes) for graph in gen_graph_list]
+ gen_graph_list = [gen_graph_list[idx] for idx, num in enumerate(graph_node_num) if num >= args.min_face]
+ gen_prefix_list = [gen_prefix_list[idx] for idx, num in enumerate(graph_node_num) if num >= args.min_face]
+ print(f"Filtered sample that face_num < {args.min_face}, remain {len(gen_graph_list)}")
+
+ print("Computing Unique ratio...")
+ unique_ratio, deduplicate_matrix = compute_gen_unique(gen_graph_list, is_use_ray, compute_batch_size, atol=atol)
+ print(f"Unique ratio: {unique_ratio}")
+
+ if n_bit == -1:
+ unique_txt = gen_data_root + f"_unique_atol_{atol}_results.txt"
+ else:
+ unique_txt = gen_data_root + f"_unique_{n_bit}bit_results.txt"
+ fp = open(unique_txt, "w")
+ print(f"Unique ratio: {unique_ratio}", file=fp)
+ deduplicate_components = find_connected_components(deduplicate_matrix)
+ for component in deduplicate_components:
+ if len(component) > 1:
+ component = [gen_prefix_list[idx] for idx in component]
+ print(f"Component: {component}", file=fp)
+ print(f"Deduplicate components are saved to {unique_txt}")
+ fp.close()
+
+ if args.only_unique:
+ exit(0)
+
+ # For accelerate, please first run the find_nerest.py to find the nearest item in train data for each fake sample
+ ################################################### Novel ########################################################
+ print("Computing Novel ratio...")
+ print("Loading training data files...")
+ # data_npz_file_list = load_data_with_prefix(train_data_root, 'data.npz', folder_list_txt=folder_list_txt)
+ # load_batch_size = load_batch_size * 5
+
+ is_identical = np.zeros(len(gen_graph_list), dtype=bool)
+ if is_use_ray:
+ ray.init()
+ futures = []
+ for gen_graph_idx, gen_graph in enumerate(tqdm(gen_graph_list)):
+ nearest_txt = os.path.join(gen_post_data_root, gen_prefix_list[gen_graph_idx], "nearest.txt")
+ if not os.path.exists(nearest_txt):
+ continue
+ with open(nearest_txt, "r+") as f:
+ lines = f.readlines()
+ train_folders = [os.path.join(train_data_root, line.strip().split(" ")[0], 'data.npz') for line in lines[2:]]
+ futures.append(is_graph_identical_list_remote.remote(gen_graph, train_folders))
+ results = ray.get(futures)
+ for gen_graph_idx, result in enumerate(results):
+ is_identical[gen_graph_idx] = result
+ ray.shutdown()
+ else:
+ pbar = tqdm(gen_graph_list)
+ for gen_graph_idx, gen_graph in enumerate(pbar):
+ nearest_txt = os.path.join(gen_post_data_root, gen_prefix_list[gen_graph_idx], "nearest.txt")
+ if not os.path.exists(nearest_txt):
+ continue
+ with open(nearest_txt, "r+") as f:
+ lines = f.readlines()
+ train_folders = [os.path.join(train_data_root, line.strip().split(" ")[0], 'data.npz') for line in lines[2:]]
+ is_identical[gen_graph_idx] = is_graph_identical_list(gen_graph, train_folders)
+ pbar.set_postfix({"novel_count": np.sum(~is_identical)})
+
+ identical_folder = np.array(gen_prefix_list)[is_identical]
+ print(f"Novel ratio: {np.sum(~is_identical) / len(gen_graph_list)}")
+ novel_txt = gen_data_root + f"_novel_{n_bit}bit_results.txt"
+ with open(novel_txt, "w") as f:
+ f.write(f"Novel ratio: {np.sum(~is_identical) / len(gen_graph_list)}\n")
+ for folder in identical_folder:
+ f.write(folder + "\n")
+ print("Done")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/eval/eval_validity.py b/eval/eval_validity.py
new file mode 100644
index 0000000000000000000000000000000000000000..84a67ade9cd43c5a058a983333538d08dce94cb6
--- /dev/null
+++ b/eval/eval_validity.py
@@ -0,0 +1,81 @@
+import numpy as np
+import argparse
+
+from lightning_fabric import seed_everything
+
+from eval_condition import *
+from OCC.Core.GCPnts import GCPnts_AbscissaPoint
+from OCC.Core.GeomAdaptor import GeomAdaptor_Curve
+
+
+def get_fluxEE(vertices: np.ndarray, facets: np.ndarray) -> float:
+ points = vertices[facets]
+ a = points[:, 1] - points[:, 0]
+ b = points[:, 2] - points[:, 0]
+ normals = np.cross(a, b)
+ norms = np.linalg.norm(normals)
+ assert np.all(norms != 0)
+ normals /= norms[:, None]
+ d_S = 0.5 * norms
+ fluxEE = np.sum(np.sum(normals, axis=1) * d_S)
+ return abs(fluxEE)
+
+
+def get_NormalC(v_recon_points: np.ndarray, v_gt_points: np.ndarray) -> float:
+ # ACC
+ acc_l1norm = np.sum(np.abs(v_gt_points[:, None, :3] - v_recon_points[:, :3]), axis=2)
+ min_dist_index = np.argmin(acc_l1norm, axis=0)
+ acc = np.mean(np.sum(v_recon_points[:, 3:] * v_gt_points[min_dist_index][:, 3:], axis=1))
+
+ # Comp
+ comp_l1norm = np.sum(np.abs(v_recon_points[:, None, :3] - v_gt_points[:, :3]), axis=2)
+ min_dist_index = np.argmin(comp_l1norm, axis=0)
+ comp = np.mean(np.sum(v_gt_points[:, 3:] * v_recon_points[min_dist_index][:, 3:], axis=1))
+
+ return acc, comp, (acc + comp) / 2.0
+
+
+def get_danglingEdgeLength(shape):
+ no_directions = True
+ edges = get_primitives(shape, TopAbs_EDGE, no_directions)
+ faces = get_primitives(shape, TopAbs_FACE, no_directions)
+ connection = {edge: set() for edge in edges}
+
+ def EdgeBelongsToFace(edge, face):
+ edgeOnFace = get_primitives(face, TopAbs_EDGE, True)
+ for _edge in edgeOnFace:
+ if _edge.IsSame(edge):
+ return True
+ return False
+
+ # Get edge-face connections
+ for edge in edges:
+ for face in faces:
+ if EdgeBelongsToFace(edge, face):
+ connection[edge].add(face)
+
+ # Get dangling edge length
+ danglingEdgeLength = 0.0
+ for edge, faces in connection.items():
+ if len(faces) < 2:
+ curve, _, _ = BRep_Tool.Curve(edge)
+ if len(faces) == 1 and (BRep_Tool.Surface(list(faces)[0]).IsUPeriodic() or BRep_Tool.Surface(list(faces)[0]).IsVPeriodic()):
+ continue
+ else:
+ danglingEdgeLength += GCPnts_AbscissaPoint.Length(GeomAdaptor_Curve(curve))
+
+ return danglingEdgeLength
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Evaluate The Generated Brep')
+ parser.add_argument('--eval_root', type=str)
+ parser.add_argument('--gt_root', type=str)
+ parser.add_argument('--use_ray', action='store_true')
+ parser.add_argument('--num_cpus', type=int, default=16)
+ parser.add_argument('--prefix', type=str, default='')
+ parser.add_argument('--list', type=str, default='')
+ parser.add_argument('--from_scratch', action='store_true')
+ args = parser.parse_args()
+
+ seed_everything(0)
diff --git a/eval/lfd/evaluation_scripts/README.md b/eval/lfd/evaluation_scripts/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..8f39949adb44802bcd110f647f3a6191bfd2a8c7
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/README.md
@@ -0,0 +1,75 @@
+## Scripts for Evaluating GET3D
+
+#### Compute Light Field Distance
+
+We thanks the authors for releasing the source code of
+LFD [official repo](https://github.com/Sunwinds/ShapeDescriptor) and
+It's [python extension](https://github.com/kacperkan/light-field-distance).
+
+- Step 0: Download the all the files
+ from [official repo](https://github.com/Sunwinds/ShapeDescriptor/tree/master/LightField/3DRetrieval_v1.8/3DRetrieval_v1.8/Executable)
+ , and save it into `evaluation_scripts/load_data`.
+- Step 1: Compile the files for light fild distance
+
+```bash
+cd evaluation_scripts/load_data
+bash do_all.sh
+cd ../..
+git clone https://github.com/kacperkan/light-field-distance
+cd light-field-distance
+bash compile.sh
+python setup.py install
+cd ..
+```
+
+- Step 2: To compute LFD on a server, we need to set up a dummy screen
+
+```bash
+apt-get install -y freeglut3 libglu1-mesa xserver-xorg-video-dummy
+X -config evaluation_scripts/compute_lfd_feat/dummy-1920x1080.conf
+```
+
+- Step 3: On a separate console, `export DISPLAY=:0`
+
+- Step 4: We first generat the Light Field feature for each object by running
+
+```bash
+ python compute_lfd_feat_multiprocess.py --gen_path PATH_TO_THE_MODEL_PREDICTION --save_path PATH_FOR_LFD_OUTPUT_FOR_PRED
+```
+
+- Step 5: Do the same for the ground truth data
+
+```bash
+ python compute_lfd_feat_multiprocess.py --gen_path PATH_TO_GT_MODEL --save_path PATH_FOR_LFD_OUTPUT_FOR_GT
+```
+
+- Step 6: Compute the metric: LFD
+
+```bash
+python compute_lfd.py --split_path PATH_TO_TEST_SPLIT --dataset_path PATH_FOR_LFD_OUTPUT_FOR_GT --gen_path PATH_FOR_LFD_OUTPUT_FOR_PRED --save_name results/our/lfd.pkl
+```
+
+### Compute Chamfer Distance
+
+- Step 1: Download original shapenet obj files from Shapenet Webpage
+- Step 2: Running scripts to compute the chamfer distance
+
+```bash
+python compute_cd.py --dataset_path PATH_TO_GT_OBJS --gen_path PATH_TO_THE_MODEL_PREDICTION --split_path PATH_TO_TEST_SPLIT --save_name results/our/cd.pkl
+```
+
+(Optional) For shapenet car, since the GT dataset contains intern structures, we thus only
+sample the points from the outer surface of the object for both our prediction and ground
+truth. To achieve this:
+
+```bash
+python sample_surface.py --n_points 5000 --n_proc 2 --shape_root PATH_TO_OBJS --save_root PATH_TO_THE_SAMPLE_POINTS
+```
+
+### Compute Cov and MMD score:
+
+After compute the chamfer distance and LFD, to compute the Coverage score and MMD score:
+
+```bash
+python compute_cov_mmd.py
+```
diff --git a/eval/lfd/evaluation_scripts/compute_lfd.py b/eval/lfd/evaluation_scripts/compute_lfd.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a9945abadb198cced5760aa01bd5baeab5deb11
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/compute_lfd.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
+
+import random
+import numpy as np
+import ray
+import torch
+import os
+from tqdm import tqdm
+from load_data.interface import LoadData
+
+
+def read_all_data(folder_list, load_data, add_model_str=True, add_ori_name=False):
+ all_data = []
+
+ for f in folder_list:
+ if add_model_str:
+ result = load_data.run(os.path.join(f, 'model', 'mesh'))
+ elif add_ori_name:
+ result = load_data.run(os.path.join(f, f.split('/')[-1], 'mesh'))
+ else:
+ result = load_data.run(os.path.join(f, 'mesh'))
+
+ all_data.append(result)
+ q8_table = all_data[0][0]
+ align_10 = all_data[0][1]
+ dest_ArtCoeff = [r[2][np.newaxis, :] for r in all_data]
+ dest_FdCoeff_q8 = [r[3][np.newaxis, :] for r in all_data]
+ dest_CirCoeff_q8 = [r[4][np.newaxis, :] for r in all_data]
+ dest_EccCoeff_q8 = [r[5][np.newaxis, :] for r in all_data]
+ SRC_ANGLE = 10
+ ANGLE = 10
+ CAMNUM = 10
+ ART_COEF = 35
+ FD_COEF = 10
+ n_shape = len(all_data)
+ dest_ArtCoeff = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_ArtCoeff, axis=0))).int().cuda().reshape(n_shape, SRC_ANGLE, CAMNUM, ART_COEF)
+ dest_FdCoeff_q8 = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_FdCoeff_q8, axis=0))).int().cuda().reshape(n_shape, ANGLE, CAMNUM, FD_COEF)
+ dest_CirCoeff_q8 = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_CirCoeff_q8, axis=0))).int().cuda().reshape(n_shape, ANGLE, CAMNUM)
+ dest_EccCoeff_q8 = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_EccCoeff_q8, axis=0))).int().cuda().reshape(n_shape, ANGLE, CAMNUM)
+ q8_table = torch.from_numpy(np.ascontiguousarray(q8_table)).int().cuda().reshape(256, 256)
+ align_10 = torch.from_numpy(np.ascontiguousarray(align_10)).int().cuda().reshape(60, 20) ##
+ return q8_table.contiguous(), align_10.contiguous(), dest_ArtCoeff.contiguous(), \
+ dest_FdCoeff_q8.contiguous(), dest_CirCoeff_q8.contiguous(), dest_EccCoeff_q8.contiguous()
+
+def compute_lfd_all(src_folder_list, tgt_folder_list, log):
+ load_data = LoadData()
+
+ add_ori_name = False
+ add_model_str = False
+ src_folder_list.sort()
+ tgt_folder_list.sort()
+
+ q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8 = read_all_data(src_folder_list, load_data, add_model_str=False)
+ q8_table, align_10, tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8 = read_all_data(tgt_folder_list, load_data, add_model_str=add_model_str, add_ori_name=add_ori_name) ###
+
+ from lfd_all_compute.lfd import LFD
+ lfd = LFD()
+ lfd_matrix = lfd.forward(
+ q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8,
+ tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8, log)
+ # print(lfd_matrix)
+ # print(lfd_matrix.shape)
+ mmd = lfd_matrix.float().min(dim=0)[0].mean()
+ mmd_swp = lfd_matrix.float().min(dim=1)[0].mean()
+ # print(mmd)
+ # print(mmd_swp)
+ return lfd_matrix.data.cpu().numpy()
+
+
+
+if __name__ == '__main__':
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--save_name", type=str, required=True, help="path to the save resules shapenet dataset")
+ parser.add_argument("--dataset_path", type=str, required=True, help="path to the preprocessed shapenet dataset")
+ parser.add_argument("--gen_path", type=str, required=True, help="path to the generated models")
+ parser.add_argument("--num_workers", type=int, default=1, help="number of workers to run in parallel")
+ parser.add_argument("--list", type=str, default=None, help="list file in the training set")
+ args = parser.parse_args()
+ save_path = '/'.join(args.save_name.split('/')[:-1])
+ os.makedirs(save_path, exist_ok=True)
+ num_workers = args.num_workers
+ listfile = args.list
+ ray.init(
+ num_cpus=os.cpu_count(),
+ num_gpus=num_workers,
+ )
+ print(f"dataset_path: {args.dataset_path}")
+ print(f"gen_path: {args.gen_path}")
+ assert os.path.exists(args.dataset_path) and os.path.exists(args.gen_path)
+
+ tgt_folder_list = sorted(os.listdir(args.dataset_path))
+ if listfile is not None:
+ valid_folders = [item.strip() for item in open(listfile, 'r').readlines()]
+ tgt_folder_list = sorted(list(set(valid_folders) & set(tgt_folder_list)))
+ tgt_folder_list = [os.path.join(args.dataset_path, f) for f in tgt_folder_list]
+ else:
+ tgt_folder_list = [os.path.join(args.dataset_path, f) for f in tgt_folder_list]
+
+ src_folder_list = os.listdir(args.gen_path)
+ random.shuffle(src_folder_list)
+ src_folder_list = sorted(src_folder_list[:3000])
+ src_folder_list = [os.path.join(args.gen_path, f) for f in src_folder_list]
+
+ compute_lfd_all_remote = ray.remote(num_gpus=1, num_cpus=os.cpu_count() // num_workers)(compute_lfd_all)
+
+ print("Check data")
+ print(f"len of src_folder_list: {len(src_folder_list)}")
+ print(f"len of tgt_folder_list: {len(tgt_folder_list)}")
+ # print(src_folder_list[0])
+ # print(tgt_folder_list[0])
+
+ results = []
+ for i in range(num_workers):
+ i_start = i * len(src_folder_list) // num_workers
+ i_end = (i + 1) * len(src_folder_list) // num_workers
+ # print(i, i_start, i_end)
+ results.append(compute_lfd_all_remote.remote(
+ src_folder_list[i_start:i_end],
+ tgt_folder_list,
+ i==0))
+
+ lfd_matrix = ray.get(results)
+ lfd_matrix = np.concatenate(lfd_matrix, axis=0)
+ import pickle
+ save_name = args.save_name
+ nearest_name = [tgt_folder_list[idx].split("/")[-1] for idx in lfd_matrix.argmin(axis=1)]
+ src_folder_list = [src_folder_list[idx].split("/")[-1] for idx in range(len(src_folder_list))]
+ pickle.dump([src_folder_list, nearest_name, lfd_matrix], open(save_name, 'wb'))
+ print(f"pkl is saved to {save_name}")
diff --git a/eval/lfd/evaluation_scripts/compute_lfd_check_data.py b/eval/lfd/evaluation_scripts/compute_lfd_check_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..21d0897ecd9b72162f17601a8ca1bd0a4ad59765
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/compute_lfd_check_data.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
+
+import random
+import shutil
+
+import numpy as np
+import ray
+import torch
+import os
+from tqdm import tqdm
+from load_data.interface import LoadData
+import pickle
+from multiprocessing import Pool, cpu_count
+
+def read_all_data(folder_list, load_data, add_model_str=True, add_ori_name=False):
+ all_data = []
+
+ for f in folder_list:
+ if add_model_str:
+ result = load_data.run(os.path.join(f, 'model', 'mesh'))
+ elif add_ori_name:
+ result = load_data.run(os.path.join(f, f.split('/')[-1], 'mesh'))
+ else:
+ result = load_data.run(os.path.join(f, 'mesh'))
+
+ all_data.append(result)
+ q8_table = all_data[0][0]
+ align_10 = all_data[0][1]
+ dest_ArtCoeff = [r[2][np.newaxis, :] for r in all_data]
+ dest_FdCoeff_q8 = [r[3][np.newaxis, :] for r in all_data]
+ dest_CirCoeff_q8 = [r[4][np.newaxis, :] for r in all_data]
+ dest_EccCoeff_q8 = [r[5][np.newaxis, :] for r in all_data]
+ SRC_ANGLE = 10
+ ANGLE = 10
+ CAMNUM = 10
+ ART_COEF = 35
+ FD_COEF = 10
+ n_shape = len(all_data)
+ dest_ArtCoeff = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_ArtCoeff, axis=0))).int().cuda().reshape(n_shape, SRC_ANGLE,
+ CAMNUM, ART_COEF)
+ dest_FdCoeff_q8 = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_FdCoeff_q8, axis=0))).int().cuda().reshape(n_shape, ANGLE,
+ CAMNUM, FD_COEF)
+ dest_CirCoeff_q8 = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_CirCoeff_q8, axis=0))).int().cuda().reshape(n_shape, ANGLE,
+ CAMNUM)
+ dest_EccCoeff_q8 = torch.from_numpy(np.ascontiguousarray(np.concatenate(dest_EccCoeff_q8, axis=0))).int().cuda().reshape(n_shape, ANGLE,
+ CAMNUM)
+ q8_table = torch.from_numpy(np.ascontiguousarray(q8_table)).int().cuda().reshape(256, 256)
+ align_10 = torch.from_numpy(np.ascontiguousarray(align_10)).int().cuda().reshape(60, 20) ##
+ return q8_table.contiguous(), align_10.contiguous(), dest_ArtCoeff.contiguous(), \
+ dest_FdCoeff_q8.contiguous(), dest_CirCoeff_q8.contiguous(), dest_EccCoeff_q8.contiguous()
+
+
+def compute_lfd_all(src_folder_list, tgt_folder_list, log):
+ load_data = LoadData()
+
+ add_ori_name = False
+ add_model_str = False
+ src_folder_list.sort()
+ tgt_folder_list.sort()
+
+ q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8 = read_all_data(src_folder_list, load_data,
+ add_model_str=False)
+ q8_table, align_10, tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8 = read_all_data(tgt_folder_list, load_data,
+ add_model_str=add_model_str,
+ add_ori_name=add_ori_name) ###
+
+ from lfd_all_compute.lfd import LFD
+ lfd = LFD()
+ lfd_matrix = lfd.forward(
+ q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8,
+ tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8, log)
+ # print(lfd_matrix)
+ # print(lfd_matrix.shape)
+ mmd = lfd_matrix.float().min(dim=0)[0].mean()
+ mmd_swp = lfd_matrix.float().min(dim=1)[0].mean()
+ # print(mmd)
+ # print(mmd_swp)
+ return lfd_matrix.data.cpu().numpy()
+
+def get_file_size_kb(mesh_path):
+ return int(os.path.getsize(mesh_path) / 1024)
+
+
+if __name__ == '__main__':
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--mesh_path", type=str, required=True, help="path to the mesh folder")
+ parser.add_argument("--lfd_feat", type=str, required=True, help="path to the preprocessed shapenet dataset")
+ parser.add_argument("--save_root", type=str, required=True, help="path to the save resules shapenet dataset")
+ parser.add_argument("--num_workers", type=int, default=1, help="number of workers to run in parallel")
+ parser.add_argument("--list", type=str, default=None, help="list file in the training set")
+ args = parser.parse_args()
+ num_workers = args.num_workers
+ listfile = args.list
+
+ mesh_folder_path = args.mesh_path
+ lfd_feat_path = args.lfd_feat
+ save_root = args.save_root
+ os.makedirs(save_root, exist_ok=True)
+
+
+ print(f"mesh_path: {mesh_folder_path}")
+ print(f"lfd_feat_path: {lfd_feat_path}")
+
+ all_folders = os.listdir(mesh_folder_path)
+ all_folders.sort()
+ print("Get mesh_size")
+ mesh_folder_list = []
+ mesh_path_list = []
+ # mesh_size_list = []
+ for mesh_folder in tqdm(all_folders):
+ mesh_path = os.path.join(mesh_folder_path, mesh_folder, "mesh.stl")
+ mesh_folder_list.append(mesh_folder)
+ mesh_path_list.append(mesh_path)
+ # mesh_size_list.append(int(os.path.getsize(mesh_path) / 1024))
+
+ with Pool(processes=cpu_count()) as pool:
+ mesh_size_list = list(tqdm(pool.imap(get_file_size_kb, mesh_path_list), total=len(mesh_path_list)))
+
+ # sort according to the size of the mesh file
+ assert len(mesh_size_list) == len(mesh_folder_list)
+ # mesh_folder_list = [x for _, x in sorted(zip(mesh_size_list, mesh_folder_list))]
+ # mesh_size_list = sorted(mesh_size_list)
+ mesh_size_list = np.array(mesh_size_list)
+ print(f"Max size: {mesh_size_list.max()}")
+ print(f"Min size: {mesh_size_list.min()}")
+ print(f"Total {mesh_size_list.shape} mesh_folder to process")
+
+ tgt_folder_list = mesh_folder_list
+
+ if listfile is not None:
+ valid_folders = [item.strip() for item in open(listfile, 'r').readlines()]
+ tgt_folder_list = sorted(list(set(valid_folders) & set(tgt_folder_list)))
+ tgt_folder_list = [os.path.join(lfd_feat_path, f) for f in tgt_folder_list]
+ else:
+ tgt_folder_list = [os.path.join(lfd_feat_path, f) for f in tgt_folder_list]
+
+ src_folder_list = tgt_folder_list
+
+ start_from_size_end = 0
+ print(f"Start from size_end: {start_from_size_end}")
+ print((mesh_size_list>start_from_size_end).sum()/mesh_size_list.shape[0])
+
+ ray.init(
+ num_cpus=os.cpu_count(),
+ num_gpus=num_workers,
+ )
+
+ compute_lfd_all_remote = ray.remote(num_gpus=1, num_cpus=os.cpu_count() // num_workers)(compute_lfd_all)
+
+ print("Check data")
+ print(f"len of src_folder_list: {len(src_folder_list)}")
+ print(f"len of tgt_folder_list: {len(tgt_folder_list)}")
+ print(src_folder_list[0])
+ print(tgt_folder_list[0])
+
+ batch_size = 1
+ offset = 2
+
+ for size_start in tqdm(range(mesh_size_list.min(), mesh_size_list.max(), batch_size)):
+ size_end = size_start + offset
+ print(f"size_start: {size_start}, size_end: {size_end}, max_size: {mesh_size_list.max()}")
+ if size_end <= start_from_size_end:
+ continue
+ # get the folder list for the current batch
+ hitted_idx = np.where((mesh_size_list >= size_start) & (mesh_size_list <= size_end))[0]
+ print(f"len of hitted folder: {len(hitted_idx)}")
+ if len(hitted_idx) == 0:
+ continue
+ local_num_workers = min(num_workers, len(hitted_idx))
+ local_tgt_folder_list = [tgt_folder_list[i] for i in hitted_idx]
+ local_src_folder_list = local_tgt_folder_list
+ results = []
+ for i in range(local_num_workers):
+ local_i_start = i * len(local_src_folder_list) // local_num_workers
+ local_i_end = (i + 1) * len(local_src_folder_list) // local_num_workers
+ results.append(compute_lfd_all_remote.remote(
+ local_src_folder_list[local_i_start:local_i_end],
+ local_tgt_folder_list,
+ i == 0))
+ lfd_matrix = ray.get(results)
+ lfd_matrix = np.concatenate(lfd_matrix, axis=0)
+
+ save_name = os.path.join(save_root, f"lfd_{size_start:07d}kb_{size_end:07d}kb.pkl")
+ pickle.dump([local_tgt_folder_list, lfd_matrix], open(save_name, 'wb'))
+ print(f"pkl is saved to {save_name}\n\n")
diff --git a/eval/lfd/evaluation_scripts/compute_lfd_feat/compute_lfd_feat_multiprocess.py b/eval/lfd/evaluation_scripts/compute_lfd_feat/compute_lfd_feat_multiprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..02dd89e3004f3f2dbdef169729593073ad71add7
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/compute_lfd_feat/compute_lfd_feat_multiprocess.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
+
+import argparse
+import glob
+
+import numpy as np
+import torch
+import os
+import random
+from tqdm import tqdm
+from pathlib import Path
+from multiprocessing import Pool
+# import kaolin as kal
+import point_cloud_utils as pcu
+import trimesh
+
+from tqdm import tqdm
+
+
+def seed_everything(seed):
+ if seed < 0:
+ return
+ torch.manual_seed(seed)
+ np.random.seed(seed)
+ random.seed(seed)
+
+
+def load_mesh_v(mesh_name, normalized_scale=0.9):
+ if mesh_name.endswith('obj') or mesh_name.endswith('OBJ'):
+ mesh_1 = kal.io.obj.import_mesh(mesh_name)
+ vertices = mesh_1.vertices.cpu().numpy()
+ mesh_f1 = mesh_1.faces.cpu().numpy()
+ # elif mesh_name.endswith('ply'):
+ # vertices, mesh_f1 = pcu.load_mesh_vf(mesh_name)
+ elif mesh_name.endswith('stl') or mesh_name.endswith('ply'):
+ mesh = trimesh.load_mesh(mesh_name, force='mesh')
+ if isinstance(mesh, trimesh.Scene):
+ # we lose texture information here
+ mesh = trimesh.util.concatenate(
+ tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
+ for g in mesh.geometry.values()))
+ vertices = np.asarray(mesh.vertices)
+ mesh_f1 = np.asarray(mesh.faces)
+ else:
+ raise NotImplementedError
+
+ if vertices.shape[0] == 0:
+ return None, None
+
+ scale = (vertices.max(axis=0) - vertices.min(axis=0)).max()
+ mesh_v1 = vertices / (scale+1e-6) * normalized_scale
+ return mesh_v1, mesh_f1
+
+
+from lfd_me import MeshEncoder
+from functools import partial
+
+
+def align_mesh_feature(mesh_name, align_feature_sample_folder):
+ # mesh_fodler = mesh_name.split('/')[-3:]
+ # print(mesh_fodler)
+ # mesh_fodler[-1] = mesh_fodler[-1].split('.')[0]
+ # print(mesh_fodler)
+ # mesh_fodler = '/'.join(mesh_fodler)
+ mesh_fodler = os.path.basename(os.path.dirname(mesh_name))
+ mesh_fodler = os.path.join(align_feature_sample_folder, mesh_fodler)
+ # print(mesh_fodler)
+
+ if not os.path.exists(mesh_fodler):
+ os.makedirs(mesh_fodler)
+ if os.path.exists(os.path.join(mesh_fodler, 'mesh_q4_v1.8.art')) and os.path.getsize(
+ os.path.join(mesh_fodler, 'mesh_q4_v1.8.art')) > 1000:
+ temp_dir_path = Path(mesh_fodler)
+ file_name = 'mesh'
+ temp_path = temp_dir_path / "{}.obj".format(file_name)
+ path = temp_path.with_suffix("").as_posix()
+ return path
+
+ mesh_v, mesh_f = load_mesh_v(mesh_name, normalized_scale=1.0)
+ if mesh_v is None:
+ return None # No face here
+
+ mesh = MeshEncoder(mesh_v, mesh_f, folder=mesh_fodler, file_name='mesh', )
+ mesh.align_mesh()
+ return mesh.get_path()
+
+
+def compute_lfd_feture(sample_pcs, n_process, save_path):
+ align_feature_sample_folder = save_path
+ os.makedirs(align_feature_sample_folder, exist_ok=True)
+ print('==> one model')
+ align_mesh_feature(sample_pcs[0], align_feature_sample_folder)
+ N_process = n_process
+ path_list = []
+ if n_process == 0:
+ for i in tqdm(range(len(sample_pcs))):
+ align_mesh_feature(sample_pcs[i], align_feature_sample_folder)
+ exit()
+ print('==> multi process')
+ pool = Pool(N_process)
+ for x in tqdm(
+ pool.imap_unordered(partial(align_mesh_feature, align_feature_sample_folder=align_feature_sample_folder), sample_pcs),
+ total=len(sample_pcs)):
+ path_list.append(x)
+ pool.close()
+ pool.join()
+
+
+def load_data_with_prefix(root_folder, prefix, folder_list_txt=None):
+ data_files = []
+ folder_list = []
+ if folder_list_txt is not None:
+ with open(folder_list_txt, "r") as f:
+ folder_list = f.read().splitlines()
+ # Walk through the directory tree starting from the root folder
+ for root, dirs, files in os.walk(root_folder):
+ if folder_list_txt is not None and os.path.basename(root) not in folder_list:
+ continue
+ for filename in files:
+ # Check if the file ends with the specified prefix
+ if filename.endswith(prefix):
+ file_path = os.path.join(root, filename)
+ data_files.append(file_path)
+
+ return data_files
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--gen_path", type=str, required=True, help="path to the generated models")
+ parser.add_argument("--save_path", type=str, required=True, help="path to save the generated features for each model")
+ parser.add_argument("--n_models", type=int, default=-1, help="Number of models used for evaluation")
+ parser.add_argument("--n_process", type=int, default=-1, help="Number of process used for evaluation")
+ parser.add_argument("--prefix", type=str, required=False, default="mesh.ply")
+
+ args = parser.parse_args()
+ if args.n_process == -1:
+ num_cpus = min(64, os.cpu_count())
+ else:
+ num_cpus = args.n_process
+ models = []
+ all_folders = os.listdir(args.gen_path)
+ for folder in tqdm(all_folders):
+ if not os.path.isdir(os.path.join(args.gen_path, folder)):
+ continue
+ files = glob.glob(os.path.join(args.gen_path, folder, args.prefix))
+ if len(files) == 0:
+ continue
+ models.append(os.path.abspath(files[0]))
+ models.sort()
+ print(f"Loading {len(models)} models")
+ compute_lfd_feture(models, num_cpus, os.path.abspath(args.save_path))
diff --git a/eval/lfd/evaluation_scripts/compute_lfd_feat/dummy-1920x1080.conf b/eval/lfd/evaluation_scripts/compute_lfd_feat/dummy-1920x1080.conf
new file mode 100644
index 0000000000000000000000000000000000000000..75ce5412efdbfe0066e7ff56d4ca5d4275f3740e
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/compute_lfd_feat/dummy-1920x1080.conf
@@ -0,0 +1,25 @@
+Section "Monitor"
+ Identifier "Monitor0"
+ HorizSync 28.0-80.0
+ VertRefresh 48.0-75.0
+ # https://arachnoid.com/modelines/
+ # 1920x1080 @ 60.00 Hz (GTF) hsync: 67.08 kHz; pclk: 172.80 MHz
+ Modeline "1920x1080_60.00" 172.80 1920 2040 2248 2576 1080 1081 1084 1118 -HSync +Vsync
+EndSection
+
+Section "Device"
+ Identifier "Card0"
+ Driver "dummy"
+ VideoRam 256000
+EndSection
+
+Section "Screen"
+ DefaultDepth 24
+ Identifier "Screen0"
+ Device "Card0"
+ Monitor "Monitor0"
+ SubSection "Display"
+ Depth 24
+ Modes "1920x1080_60.00"
+ EndSubSection
+EndSection
\ No newline at end of file
diff --git a/eval/lfd/evaluation_scripts/compute_lfd_feat/lfd_me.py b/eval/lfd/evaluation_scripts/compute_lfd_feat/lfd_me.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e01944724bf4098ddb63d804abf5de923e4131a
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/compute_lfd_feat/lfd_me.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
+'''
+Function is modified based on https://github.com/kacperkan/light-field-distance
+'''
+import argparse
+import sys
+import os
+import shutil
+import subprocess
+import tempfile
+import uuid
+from pathlib import Path
+from typing import Optional
+
+import numpy as np
+import trimesh
+
+SIMILARITY_TAG = b"SIMILARITY:"
+CURRENT_DIR = Path(__file__).parent.parent.parent / 'light-field-distance/lfd/Executable'
+
+GENERATED_FILES_NAMES = [
+ "all_q4_v1.8.art",
+ "all_q8_v1.8.art",
+ "all_q8_v1.8.cir",
+ "all_q8_v1.8.ecc",
+ "all_q8_v1.8.fd",
+]
+
+OUTPUT_NAME_TEMPLATES = [
+ "{}_q4_v1.8.art",
+ "{}_q8_v1.8.art",
+ "{}_q8_v1.8.cir",
+ "{}_q8_v1.8.ecc",
+ "{}_q8_v1.8.fd",
+]
+
+
+class MeshEncoder:
+ """Class holding an object and preprocessing it using an external cmd."""
+
+ def __init__(self, vertices: np.ndarray, triangles: np.ndarray, folder=None, file_name=None):
+ """Instantiate the class.
+
+ It instantiates an empty, temporary folder that will hold any
+ intermediate data necessary to calculate Light Field Distance.
+
+ Args:
+ vertices: np.ndarray of vertices consisting of 3 coordinates each.
+ triangles: np.ndarray where each entry is a vector with 3 elements.
+ Each element correspond to vertices that create a triangle.
+ """
+ self.mesh = trimesh.Trimesh(vertices=vertices, faces=triangles)
+ if folder is None:
+ folder = tempfile.mkdtemp()
+ if file_name is None:
+ file_name = uuid.uuid4()
+ self.temp_dir_path = Path(folder)
+ self.file_name = file_name
+ self.temp_path = self.temp_dir_path / "{}.obj".format(self.file_name)
+ self.mesh.export(self.temp_path.as_posix())
+
+ def get_path(self) -> str:
+ """Get path of the object.
+
+ Commands require that an object is represented without any extension.
+
+ Returns:
+ Path to the temporary object created in the file system that
+ holds the Wavefront OBJ data of the object.
+ """
+ return self.temp_path.with_suffix("").as_posix()
+
+ def align_mesh(self):
+ """Create data of a 3D mesh to calculate Light Field Distance.
+
+ It runs an external command that create intermediate files and moves
+ these files to created temporary folder.
+
+ Returns:
+ None
+ """
+ run_dir = self.temp_dir_path
+ # copy_file = []
+ copy_file = ['3DAlignment', 'align10.txt', 'q8_table', '12_0.obj',
+ '12_1.obj',
+ '12_2.obj',
+ '12_3.obj',
+ '12_4.obj',
+ '12_5.obj',
+ '12_6.obj',
+ '12_7.obj',
+ '12_8.obj',
+ '12_9.obj', ]
+ for f in copy_file:
+ os.system(
+ 'cp %s %s' % (os.path.join(CURRENT_DIR, f),
+ os.path.join(run_dir, f)))
+ env = os.environ.copy()
+ env["DISPLAY"] = ":0"
+ process = subprocess.Popen(
+ ['./3DAlignment', self.temp_path.with_suffix("").as_posix()],
+ cwd=run_dir,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env
+ )
+
+ output, err = process.communicate()
+
+ if len(err) > 0:
+ print(err)
+ sys.exit(1)
+
+ for file, out_file in zip(
+ GENERATED_FILES_NAMES, OUTPUT_NAME_TEMPLATES
+ ):
+ shutil.move(
+ os.path.join(run_dir, file),
+ (
+ self.temp_dir_path / out_file.format(self.file_name)
+ ).as_posix(),
+ )
+ for f in copy_file:
+ os.system('rm -rf %s' % (os.path.join(run_dir, f)))
+
+ os.system('rm -rf %s' % (self.temp_path.as_posix()))
diff --git a/eval/lfd/evaluation_scripts/lfd_all_compute/lfd.py b/eval/lfd/evaluation_scripts/lfd_all_compute/lfd.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2ebf7d19e4a1f9037dee4d186f201eb1d947eb8
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/lfd_all_compute/lfd.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
+
+# !/usr/bin/env python
+# -*- coding:utf-8 -*-
+import torch
+from tqdm import tqdm
+
+
+def calculate_lfd_distance(
+ q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8,
+ tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8):
+ with torch.no_grad():
+ src_ArtCoeff = src_ArtCoeff.unsqueeze(dim=1).unsqueeze(dim=1).expand(-1, 10, 10, -1, -1, -1)
+ tgt_ArtCoeff = tgt_ArtCoeff.unsqueeze(dim=3).unsqueeze(dim=3).expand(-1, -1, -1, 10, 10, -1)
+ art_distance = q8_table[src_ArtCoeff.reshape(-1).long(), tgt_ArtCoeff.reshape(-1).long()]
+ art_distance = art_distance.reshape(
+ src_ArtCoeff.shape[0], src_ArtCoeff.shape[1], src_ArtCoeff.shape[2],
+ src_ArtCoeff.shape[3],
+ src_ArtCoeff.shape[4], src_ArtCoeff.shape[5])
+ art_distance = torch.sum(art_distance, dim=-1)
+
+ src_FdCoeff_q8 = src_FdCoeff_q8.unsqueeze(dim=1).unsqueeze(dim=1).expand(-1, 10, 10, -1, -1, -1)
+ tgt_FdCoeff_q8 = tgt_FdCoeff_q8.unsqueeze(dim=3).unsqueeze(dim=3).expand(-1, -1, -1, 10, 10, -1)
+ fd_distance = q8_table[src_FdCoeff_q8.reshape(-1).long(), tgt_FdCoeff_q8.reshape(-1).long()]
+ fd_distance = fd_distance.reshape(
+ src_FdCoeff_q8.shape[0], src_FdCoeff_q8.shape[1], src_FdCoeff_q8.shape[2],
+ src_FdCoeff_q8.shape[3], src_FdCoeff_q8.shape[4], src_FdCoeff_q8.shape[5])
+ fd_distance = torch.sum(fd_distance, dim=-1) * 2.0
+
+ src_CirCoeff_q8 = src_CirCoeff_q8.unsqueeze(dim=1).unsqueeze(dim=1).expand(-1, 10, 10, -1, -1)
+ tgt_CirCoeff_q8 = tgt_CirCoeff_q8.unsqueeze(dim=3).unsqueeze(dim=3).expand(-1, -1, -1, 10, 10)
+ cir_distance = q8_table[src_CirCoeff_q8.reshape(-1).long(), tgt_CirCoeff_q8.reshape(-1).long()]
+ cir_distance = cir_distance.reshape(
+ src_CirCoeff_q8.shape[0], src_CirCoeff_q8.shape[1],
+ src_CirCoeff_q8.shape[2],
+ src_CirCoeff_q8.shape[3], src_CirCoeff_q8.shape[4])
+ cir_distance = cir_distance * 2.0
+ src_EccCoeff_q8 = src_EccCoeff_q8.unsqueeze(dim=1).unsqueeze(dim=1).expand(-1, 10, 10, -1, -1)
+ tgt_EccCoeff_q8 = tgt_EccCoeff_q8.unsqueeze(dim=3).unsqueeze(dim=3).expand(-1, -1, -1, 10, 10)
+ ecc_distance = q8_table[src_EccCoeff_q8.reshape(-1).long(), tgt_EccCoeff_q8.reshape(-1).long()]
+ ecc_distance = ecc_distance.reshape(
+ src_EccCoeff_q8.shape[0], src_EccCoeff_q8.shape[1],
+ src_EccCoeff_q8.shape[2], src_EccCoeff_q8.shape[3],
+ src_EccCoeff_q8.shape[4])
+ cost = art_distance + fd_distance + cir_distance + ecc_distance
+ # find the cloest matching
+ # cost shape: batch_size x src_camera x src_angle x dst_camera x dst_angle
+ cost = cost.permute(0, 1, 3, 2, 4).long()
+ align_n = align_10[:, :10].reshape(-1)
+ cost_bxsrc_cxdst_cxsrc_axdst_a = cost
+ align_err = torch.gather(
+ input=cost_bxsrc_cxdst_cxsrc_axdst_a,
+ index=align_n.reshape(1, 1, 1, 60 * 10, 1).expand(
+ cost.shape[0], cost.shape[1],
+ cost.shape[2], 60 * 10, 10).long(),
+ dim=3)
+ align_err = align_err.reshape(cost.shape[0], cost.shape[1], cost.shape[2], 60, 10, 10)
+ sum_diag = 0
+ for i in range(10):
+ sum_diag += align_err[:, :, :, :, i, i]
+ sum_diag = sum_diag.reshape(cost.shape[0], -1)
+ dist = torch.min(sum_diag, dim=-1)[0]
+ return dist
+
+
+class LightFieldDistanceFunction(torch.autograd.Function):
+ @staticmethod
+ def forward(
+ ctx, q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8,
+ tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8, log):
+ n = src_ArtCoeff.shape[0]
+ m = tgt_ArtCoeff.shape[0]
+ ##############
+ # This is only calculating one pair of distance
+ print(f"src_size: {n}")
+ print(f"tgt_size: {m}")
+ all_dist = []
+ with torch.no_grad():
+ for i in tqdm(range(n), mininterval=60, disable=not log):
+ start_idx = 0
+ n_all_run = tgt_ArtCoeff.shape[0]
+ n_each_run = 1000
+ one_run_d = []
+ while start_idx < n_all_run:
+ end_idx = min(n_all_run, start_idx + n_each_run)
+ run_length = end_idx - start_idx
+ d = calculate_lfd_distance(
+ q8_table, align_10,
+ src_ArtCoeff[i:i + 1].expand(run_length, -1, -1, -1),
+ src_FdCoeff_q8[i:i + 1].expand(run_length, -1, -1, -1),
+ src_CirCoeff_q8[i:i + 1].expand(run_length, -1, -1),
+ src_EccCoeff_q8[i:i + 1].expand(run_length, -1, -1),
+ tgt_ArtCoeff[start_idx:end_idx],
+ tgt_FdCoeff_q8[start_idx:end_idx],
+ tgt_CirCoeff_q8[start_idx:end_idx],
+ tgt_EccCoeff_q8[start_idx:end_idx])
+ start_idx = end_idx
+ one_run_d.append(d)
+ d = torch.cat(one_run_d, dim=0)
+ all_dist.append(d.unsqueeze(dim=0))
+ dist = torch.cat(all_dist, dim=0)
+
+ return dist
+
+ @staticmethod
+ def backward(ctx, graddist):
+ raise NotImplementedError
+ return None, None, None, None, None, None, None, None, None, None
+
+
+class LFD(torch.nn.Module):
+ def forward(
+ self, q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8,
+ tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8, log):
+ return LightFieldDistanceFunction.apply(
+ q8_table, align_10, src_ArtCoeff, src_FdCoeff_q8, src_CirCoeff_q8, src_EccCoeff_q8,
+ tgt_ArtCoeff, tgt_FdCoeff_q8, tgt_CirCoeff_q8, tgt_EccCoeff_q8, log)
diff --git a/eval/lfd/evaluation_scripts/load_data/12_0.OBJ b/eval/lfd/evaluation_scripts/load_data/12_0.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..6d5a49d4aa6237c56f8042aa845fd356811b2a79
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_0.OBJ
@@ -0,0 +1,33 @@
+v 0.52573 0.38197 0.85065
+v -0.20081 0.61803 0.85065
+v -0.64984 0.00000 0.85065
+v -0.20081 -0.61803 0.85065
+v 0.52573 -0.38197 0.85065
+v 0.85065 -0.61803 0.20081
+v 1.0515 0.00000 -0.20081
+v 0.85065 0.61803 0.20081
+v 0.32492 1.00000 -0.20081
+v -0.32492 1.00000 0.20081
+v -0.85065 0.61803 -0.20081
+v -1.0515 0.00000 0.20081
+v -0.85065 -0.61803 -0.20081
+v -0.32492 -1.00000 0.20081
+v 0.32492 -1.00000 -0.20081
+v 0.64984 0.00000 -0.85065
+v 0.20081 0.61803 -0.85065
+v -0.52573 0.38197 -0.85065
+v -0.52573 -0.38197 -0.85065
+v 0.20081 -0.61803 -0.85065
+
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
\ No newline at end of file
diff --git a/eval/lfd/evaluation_scripts/load_data/12_1.OBJ b/eval/lfd/evaluation_scripts/load_data/12_1.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..3502fdc95f2df7c133b20b3e1b039964a2db6f71
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_1.OBJ
@@ -0,0 +1,32 @@
+v 0.701802 0.689752 0.421444
+v 0.036426 0.714903 0.795914
+v -0.109796 0.014936 1.064716
+v 0.465216 -0.442823 0.856372
+v 0.966813 -0.025774 0.458810
+v 0.994836 -0.304541 -0.251889
+v 0.747181 0.238692 -0.728507
+v 0.566046 0.853184 -0.312347
+v -0.183238 0.979358 -0.391383
+v -0.510564 0.893894 0.293558
+v -0.994836 0.304541 0.251889
+v -0.747181 -0.238692 0.728507
+v -0.566046 -0.853184 0.312347
+v 0.183238 -0.979358 0.391383
+v 0.510564 -0.893894 -0.293558
+v 0.109796 -0.014936 -1.064716
+v -0.465216 0.442823 -0.856372
+v -0.966813 0.025774 -0.458810
+v -0.701802 -0.689752 -0.421444
+v -0.036426 -0.714903 -0.795914
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_2.OBJ b/eval/lfd/evaluation_scripts/load_data/12_2.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..6ce6671aa38e3d2478e49e40df11a28b1886a779
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_2.OBJ
@@ -0,0 +1,32 @@
+v -0.265671 0.364304 0.970876
+v -0.889128 0.225538 0.551793
+v -0.875770 -0.509765 0.345058
+v -0.244050 -0.825444 0.636375
+v 0.133016 -0.285250 1.023151
+v 0.807144 -0.176679 0.680583
+v 0.825125 0.539979 0.416615
+v 0.162066 0.874304 0.596002
+v -0.197046 1.050748 -0.054765
+v -0.846719 0.649789 -0.082092
+v -0.807144 0.176679 -0.680583
+v -0.825125 -0.539979 -0.416615
+v -0.162066 -0.874304 -0.596002
+v 0.197046 -1.050748 0.054765
+v 0.846719 -0.649789 0.082092
+v 0.875770 0.509765 -0.345058
+v 0.244050 0.825444 -0.636375
+v -0.133016 0.285250 -1.023151
+v 0.265671 -0.364304 -0.970876
+v 0.889128 -0.225538 -0.551793
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_3.OBJ b/eval/lfd/evaluation_scripts/load_data/12_3.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..b820aa0872f1d9ca51f9b67dc456518330773053
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_3.OBJ
@@ -0,0 +1,32 @@
+v 0.079042 0.242783 1.039571
+v -0.349675 0.756841 0.671425
+v -0.935122 0.387767 0.347966
+v -0.868230 -0.354398 0.516207
+v -0.241448 -0.444012 0.943639
+v 0.249304 -0.881208 0.554266
+v 0.873115 -0.464646 0.409574
+v 0.767860 0.230031 0.709484
+v 0.764860 0.736223 0.137338
+v 0.074186 1.061810 0.113812
+v -0.249304 0.881208 -0.554266
+v -0.873115 0.464646 -0.409574
+v -0.767860 -0.230031 -0.709484
+v -0.764860 -0.736223 -0.137338
+v -0.074186 -1.061810 -0.113812
+v 0.935122 -0.387767 -0.347966
+v 0.868230 0.354398 -0.516207
+v 0.241448 0.444012 -0.943639
+v -0.079042 -0.242783 -1.039571
+v 0.349675 -0.756841 -0.671425
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_4.OBJ b/eval/lfd/evaluation_scripts/load_data/12_4.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..bb145376d4d94302cfda2abee8c43bbbd287fca5
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_4.OBJ
@@ -0,0 +1,32 @@
+v 0.041829 0.375558 1.001551
+v -0.620967 0.559182 0.669034
+v -0.965001 -0.088384 0.454818
+v -0.514823 -0.672232 0.654945
+v 0.107431 -0.385510 0.992844
+v 0.713218 -0.565332 0.563570
+v 1.022046 0.084586 0.306992
+v 0.607073 0.666081 0.577659
+v 0.293612 1.029275 -0.016836
+v -0.465359 0.963207 0.039633
+v -0.713218 0.565332 -0.563570
+v -1.022046 -0.084586 -0.306992
+v -0.607073 -0.666081 -0.577659
+v -0.293612 -1.029275 0.016836
+v 0.465359 -0.963207 -0.039633
+v 0.965001 0.088384 -0.454818
+v 0.514823 0.672232 -0.654945
+v -0.107431 0.385510 -0.992844
+v -0.041829 -0.375558 -1.001551
+v 0.620967 -0.559182 -0.669034
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_5.OBJ b/eval/lfd/evaluation_scripts/load_data/12_5.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..3608f00d6f11799de17f767bab6d2fd9f5fd72cd
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_5.OBJ
@@ -0,0 +1,32 @@
+v 0.433241 -0.134526 0.969589
+v -0.274930 0.146734 1.024096
+v -0.750567 -0.343284 0.681691
+v -0.336349 -0.927398 0.415564
+v 0.395281 -0.798387 0.593489
+v 0.786987 -0.723306 -0.058062
+v 1.067072 -0.013062 -0.084644
+v 0.848406 0.350827 0.550471
+v 0.396820 0.932064 0.345960
+v -0.297441 0.805930 0.638673
+v -0.786987 0.723306 0.058062
+v -1.067072 0.013062 0.084644
+v -0.848406 -0.350827 -0.550471
+v -0.396820 -0.932064 -0.345960
+v 0.297441 -0.805930 -0.638673
+v 0.750567 0.343284 -0.681691
+v 0.336349 0.927398 -0.415564
+v -0.395281 0.798387 -0.593489
+v -0.433241 0.134526 -0.969589
+v 0.274930 -0.146734 -1.024096
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_6.OBJ b/eval/lfd/evaluation_scripts/load_data/12_6.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..4eac5cb4186f8056c00d0f533aace8b162c415db
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_6.OBJ
@@ -0,0 +1,32 @@
+v 0.620089 0.136630 0.861812
+v 0.051350 0.646547 0.851605
+v -0.609293 0.263137 0.839892
+v -0.448850 -0.483749 0.842859
+v 0.310944 -0.561943 0.856407
+v 0.524062 -0.909406 0.210311
+v 0.964951 -0.425608 -0.183589
+v 1.024262 0.220890 0.219057
+v 0.705320 0.782898 -0.188391
+v 0.104025 1.045970 0.202542
+v -0.524062 0.909406 -0.210311
+v -0.964951 0.425608 0.183589
+v -1.024262 -0.220890 -0.219057
+v -0.705320 -0.782898 0.188391
+v -0.104025 -1.045970 -0.202542
+v 0.609293 -0.263137 -0.839892
+v 0.448850 0.483749 -0.842859
+v -0.310944 0.561943 -0.856407
+v -0.620089 -0.136630 -0.861812
+v -0.051350 -0.646547 -0.851605
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_7.OBJ b/eval/lfd/evaluation_scripts/load_data/12_7.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..299e8c391a0895363dde50588e421675a529448a
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_7.OBJ
@@ -0,0 +1,32 @@
+v 0.264391 -0.021073 1.037088
+v -0.493469 -0.057552 0.948192
+v -0.651797 -0.670655 0.520845
+v 0.008218 -1.013098 0.345625
+v 0.574455 -0.611642 0.664675
+v 1.011906 -0.333486 0.103555
+v 0.972243 0.428990 0.129183
+v 0.510220 0.622060 0.706122
+v -0.095718 0.983069 0.412688
+v -0.716033 0.563047 0.562292
+v -1.011906 0.333486 -0.103555
+v -0.972243 -0.428990 -0.129183
+v -0.510220 -0.622060 -0.706122
+v 0.095718 -0.983069 -0.412688
+v 0.716033 -0.563047 -0.562292
+v 0.651797 0.670655 -0.520845
+v -0.008218 1.013098 -0.345625
+v -0.574455 0.611642 -0.664675
+v -0.264391 0.021073 -1.037088
+v 0.493469 0.057552 -0.948192
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_8.OBJ b/eval/lfd/evaluation_scripts/load_data/12_8.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..dbab2218483d0411a660623ec379dcd9e806b07f
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_8.OBJ
@@ -0,0 +1,32 @@
+v 0.254019 0.843500 0.608178
+v -0.492012 0.840739 0.443818
+v -0.787907 0.184936 0.700642
+v -0.224741 -0.217614 1.023732
+v 0.419205 0.189391 0.966590
+v 0.908089 -0.202376 0.529442
+v 1.045091 0.209604 -0.099130
+v 0.640819 0.855976 -0.050472
+v 0.133837 0.860940 -0.621906
+v -0.566293 0.851521 -0.316421
+v -0.908089 0.202376 -0.529442
+v -1.045091 -0.209604 0.099130
+v -0.640819 -0.855976 0.050472
+v -0.133837 -0.860940 0.621906
+v 0.566293 -0.851521 0.316421
+v 0.787907 -0.184936 -0.700642
+v 0.224741 0.217614 -1.023732
+v -0.419205 -0.189391 -0.966590
+v -0.254019 -0.843500 -0.608178
+v 0.492012 -0.840739 -0.443818
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/12_9.OBJ b/eval/lfd/evaluation_scripts/load_data/12_9.OBJ
new file mode 100644
index 0000000000000000000000000000000000000000..8a44ae75151a7f33860666e20a1ff637671ba595
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/12_9.OBJ
@@ -0,0 +1,32 @@
+v 0.325105 0.556220 0.854883
+v -0.433545 0.466744 0.860279
+v -0.582969 -0.282387 0.852234
+v 0.083341 -0.655902 0.841866
+v 0.644563 -0.137625 0.843503
+v 1.032835 -0.208037 0.189380
+v 0.953383 0.442292 -0.203507
+v 0.515949 0.914609 0.207793
+v -0.124761 1.046644 -0.186731
+v -0.711583 0.769847 0.216524
+v -1.032835 0.208037 -0.189380
+v -0.953383 -0.442292 0.203507
+v -0.515949 -0.914609 -0.207793
+v 0.124761 -1.046644 0.186731
+v 0.711583 -0.769847 -0.216524
+v 0.582969 0.282387 -0.852234
+v -0.083341 0.655902 -0.841866
+v -0.644563 0.137625 -0.843503
+v -0.325105 -0.556220 -0.854883
+v 0.433545 -0.466744 -0.860279
+f 1 2 3 4 5
+f 1 5 6 7 8
+f 1 8 9 10 2
+f 2 10 11 12 3
+f 3 12 13 14 4
+f 14 15 6 5 4
+f 8 7 16 17 9
+f 10 9 17 18 11
+f 12 11 18 19 13
+f 14 13 19 20 15
+f 6 15 20 16 7
+f 17 16 20 19 18
diff --git a/eval/lfd/evaluation_scripts/load_data/__init__.py b/eval/lfd/evaluation_scripts/load_data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..66a94c737c98087a954fcc1bbe77028b2851a5af
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
diff --git a/eval/lfd/evaluation_scripts/load_data/align10.txt b/eval/lfd/evaluation_scripts/load_data/align10.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f472306b5e1cb5673dbbae952564242d907e9780
Binary files /dev/null and b/eval/lfd/evaluation_scripts/load_data/align10.txt differ
diff --git a/eval/lfd/evaluation_scripts/load_data/do_all.sh b/eval/lfd/evaluation_scripts/load_data/do_all.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c70faf189cf2ed315836b12fa602ba93ea50b255
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/do_all.sh
@@ -0,0 +1,3 @@
+# conda install libgcc
+g++ -Wall -fPIC -O2 -c run.cpp -std=c++11 -fpermissive
+g++ -shared -o run.so run.o
\ No newline at end of file
diff --git a/eval/lfd/evaluation_scripts/load_data/interface.py b/eval/lfd/evaluation_scripts/load_data/interface.py
new file mode 100644
index 0000000000000000000000000000000000000000..a30149a3388f95f8a5b4a7b9f2fdc3d7d765ea2c
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/interface.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+# and proprietary rights in and to this software, related documentation
+# and any modifications thereto. Any use, reproduction, disclosure or
+# distribution of this software and related documentation without an express
+# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
+
+import os
+import ctypes as c
+import numpy as np
+
+dir_path = os.path.dirname(os.path.realpath(__file__))
+
+
+class LoadData:
+ def __init__(self):
+ self.lib = c.cdll.LoadLibrary(os.path.join(dir_path, 'run.so'))
+ self.lib.run.argtypes = [c.POINTER(c.c_ubyte), c.POINTER(c.c_ubyte), c.c_char_p, c.POINTER(c.c_ubyte),
+ c.POINTER(c.c_ubyte), c.POINTER(c.c_ubyte), c.POINTER(c.c_ubyte)]
+
+ def run(self, file_name, normalize=False):
+ q8_table = np.zeros(256 * 256, dtype=np.ubyte)
+ align10 = np.zeros(60 * 20, dtype=np.ubyte)
+ dest_ArtCoeff = np.zeros(10 * 10 * 35, dtype=np.ubyte)
+ dest_FdCoeff_q8 = np.zeros(10 * 10 * 10, dtype=np.ubyte)
+ dest_CirCoeff_q8 = np.zeros(10 * 10, dtype=np.ubyte)
+ dest_EccCoeff_q8 = np.zeros(10 * 10, dtype=np.ubyte)
+
+ q8_table_p = np.ascontiguousarray(q8_table).ctypes.data_as(c.POINTER(c.c_ubyte))
+ align10_p = np.ascontiguousarray(align10).ctypes.data_as(c.POINTER(c.c_ubyte))
+ dest_ArtCoeff_p = np.ascontiguousarray(dest_ArtCoeff).ctypes.data_as(c.POINTER(c.c_ubyte))
+ dest_FdCoeff_q8_p = np.ascontiguousarray(dest_FdCoeff_q8).ctypes.data_as(c.POINTER(c.c_ubyte))
+ dest_CirCoeff_q8_p = np.ascontiguousarray(dest_CirCoeff_q8).ctypes.data_as(c.POINTER(c.c_ubyte))
+ dest_EccCoeff_q8_p = np.ascontiguousarray(dest_EccCoeff_q8).ctypes.data_as(c.POINTER(c.c_ubyte))
+ file_name_p = file_name.encode('utf-8')
+ self.lib.run(q8_table_p, align10_p, file_name_p, dest_ArtCoeff_p, dest_FdCoeff_q8_p, dest_CirCoeff_q8_p, dest_EccCoeff_q8_p)
+ return q8_table, align10, dest_ArtCoeff, dest_FdCoeff_q8, dest_CirCoeff_q8, dest_EccCoeff_q8
diff --git a/eval/lfd/evaluation_scripts/load_data/q8_table b/eval/lfd/evaluation_scripts/load_data/q8_table
new file mode 100644
index 0000000000000000000000000000000000000000..59fc29a809fc883b188808140a3163ccda4c9daf
Binary files /dev/null and b/eval/lfd/evaluation_scripts/load_data/q8_table differ
diff --git a/eval/lfd/evaluation_scripts/load_data/run.cpp b/eval/lfd/evaluation_scripts/load_data/run.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..256429cb04f2490c4088bbd47d078a04641eb5ec
--- /dev/null
+++ b/eval/lfd/evaluation_scripts/load_data/run.cpp
@@ -0,0 +1,92 @@
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+//
+// NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
+// and proprietary rights in and to this software, related documentation
+// and any modifications thereto. Any use, reproduction, disclosure or
+// distribution of this software and related documentation without an express
+// license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
+
+
+#include
+#include
+#include
+#include
+#include