File size: 4,998 Bytes
f216956
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125

def hyperlink(name, link):
    return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); \
    text-decoration: underline;text-decoration-style: dotted;">{name}</a>'

def get_title_md():
    md = f'''
#  🏆 Leaderboard for 3D Generative Models
'''
    return md

def get_intro_md():
    md = '''
This leaderboard provides a centralized evaluation platform for evaluating and tracking the performance of 3D generation models.
'''
    return md

def get_model_intro_md():
    md = '''
This leaderboard spans a diverse set of state-of-the-art 3D generation models, including different conditional settings such as images, text, or combinations thereof.
'''
    return md

def get_model_description_md(model_config, cols=10, except_models=[]):
    model_list = {}
    for cfg in model_config.values():
        task = cfg.task
        model_name = cfg.model_name
        model_link = cfg.page_link if cfg.page_link else cfg.code_link
        if task not in model_list.keys():
            model_list[task] = set()
        if model_name not in except_models:
            model_list[task].add(hyperlink(model_name, model_link))
    
    model_descriptions = ""
    for task, models in model_list.items():
        model_descriptions += f"\n**{len(models)} {task} Generative Models**\n"
        ## model_table

        model_descriptions += '<table style="width:100%; text-align:left; border:none; border-collapse: collapse;">\n'
        for i, model in enumerate(models):
            if i%cols == 0:
                model_descriptions += ' <tr>\n'
            model_descriptions += f'  <td>{model}</td>\n'
            if (i+1)%cols == 0:
                model_descriptions += ' </tr>\n'
        if len(models)%cols != 0:
            num_pad = cols - len(models)%cols
            model_descriptions += '  <td></td>\n' * num_pad
            model_descriptions += ' </tr>\n'
        model_descriptions += '</table>\n'
    return model_descriptions.strip()

def get_object_dimension_intro_md():
    md = f'''
Each model involved is conducted under consistent and standardized settings and assessed along **multiple evaluation dimensions** to provide a detailed view of its strengths and limitations:
'''
    return md

def get_object_dimension_description_md():
    md = f'''
- **Geometry Plausibility** assesses the structural integrity and physical feasibility of the generated shape.
- **Geometry Details** reflects the fidelity of fine-scale structures, such as sharp edges and part boundaries. 
- **Texture Quality** evaluates the visual fidelity of surface textures in terms of  resolution, realism, and aesthetic consistency.
- **Geometry-Texture Coherency** assesses the alignment between texture and shape—whether textures follow the contours, part boundaries, and material semantics of geometry.
- **Prompt-3D Alignment** evaluates the semantic and/or identity consistency between the input prompt and the generated 3D asset.
'''
    return md

def get_leaderboard_intro_md():
    md = '''
This leaderboard integrates results from three complementary benchmarks that span different aspects of 3D synthesis. 
- [Hi3DEval]()
- [3DGenBench](https://zyh482.github.io/3DGen-Bench/)
- [GPTEval3D](https://github.com/3DTopia/GPTEval3D)
'''
    return md


def get_hi3deval_intro_md(num_model=None):
    md = f'''
This leaderboard is evaluated using **Hi3DEval**, a straight forward scoring benchmark that does **not rely on pairwise comparisons**.

Specifically, each dimension is assigned an absolute score within clearly defined value ranges:

- Geometry Plausibility: range [0, 9]
- Geometry Details: range [0, 4]
- Texture Quality: range [0, 4]
- Geometry-Texture Coherency: range [0, 1]
- Prompt-3D Alignment: range [0, 4]

The **Overall Score** is computed as the **SUM** of the scores across all five dimensions.

Hi3DEval supports unified evaluation for both **Text-to-3D** and **Image-to-3D** generation tasks. You can also freely select **"Task"** to explore performance under different input modalities.
'''
    return md

def get_citation_md(name):
    citations = {
        "hi3deval": '''
```bibtex
@article
        ''',
        "3dgen-bench": '''
```bibtex
@article{zhang20253dgen,
  title={3DGen-Bench: Comprehensive Benchmark Suite for 3D Generative Models},
  author={Zhang, Yuhan and Zhang, Mengchen and Wu, Tong and Wang, Tengfei and Wetzstein, Gordon and Lin, Dahua and Liu, Ziwei},
  journal={arXiv preprint arXiv:2503.21745},
  year={2025}
}
        ''',
        "gpteval3d": '''
```bibtex
@inproceedings{wu2024gpt,
  title={Gpt-4v (ision) is a human-aligned evaluator for text-to-3d generation},
  author={Wu, Tong and Yang, Guandao and Li, Zhibing and Zhang, Kai and Liu, Ziwei and Guibas, Leonidas and Lin, Dahua and Wetzstein, Gordon},
  booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
  pages={22227--22238},
  year={2024}
}
        '''
    }
    md = f"Reference:\n{citations[name.lower()]}"
    return md