File size: 8,434 Bytes
4ae2b3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
# tests/test_dataset_integrity.py
import os
import pytest
import glob

def get_all_dataset_paths():
    """获取所有三级目录下的数据集路径"""
    dataset_paths = []
    
    # SRSD数据集
    srsd_base = 'srsd'
    srsd_categories = [
        'srsd-feynman_easy',
        'srsd-feynman_medium', 
        'srsd-feynman_hard',
        'srsd-feynman_easy_dummy',
        'srsd-feynman_medium_dummy',
        'srsd-feynman_hard_dummy'
    ]
    
    for category in srsd_categories:
        category_path = os.path.join(srsd_base, category)
        if os.path.exists(category_path):
            for dataset_dir in os.listdir(category_path):
                dataset_path = os.path.join(category_path, dataset_dir)
                if os.path.isdir(dataset_path):
                    dataset_paths.append(dataset_path)
    
    # SRBench1.0数据集
    srbench_base = 'srbench1.0'
    srbench_categories = ['feynman', 'strogatz', 'blackbox']
    
    for category in srbench_categories:
        category_path = os.path.join(srbench_base, category)
        if os.path.exists(category_path):
            for dataset_dir in os.listdir(category_path):
                dataset_path = os.path.join(category_path, dataset_dir)
                if os.path.isdir(dataset_path):
                    dataset_paths.append(dataset_path)
    
    # LLM-SRBench数据集
    llm_srbench_base = 'llm-srbench'
    llm_categories = ['chem_react', 'phys_osc', 'bio_pop_growth', 'matsci', 'lsrtransform']
    
    for category in llm_categories:
        category_path = os.path.join(llm_srbench_base, category)
        if os.path.exists(category_path):
            for dataset_dir in os.listdir(category_path):
                dataset_path = os.path.join(category_path, dataset_dir)
                if os.path.isdir(dataset_path):
                    dataset_paths.append(dataset_path)
    
    return dataset_paths

def test_dataset_files_exist():
    """测试所有三级目录下数据集必要文件是否存在"""
    dataset_paths = get_all_dataset_paths()
    
    print(f"\n{'='*80}")
    print(f"数据集完整性检查报告")
    print(f"{'='*80}")
    print(f"找到 {len(dataset_paths)} 个数据集目录")
    
    # 所有必需文件
    required_files = ['formula.py', 'train.csv', 'id_test.csv', 'ood_test.csv', 'valid.csv', 'metadata.yaml']
    
    # 统计信息
    total_datasets = len(dataset_paths)
    complete_datasets = 0
    incomplete_datasets = 0
    
    # 按类别统计
    category_stats = {}
    
    # 详细结果
    detailed_results = []
    
    for dataset_path in dataset_paths:
        dataset_name = os.path.basename(dataset_path)
        category = os.path.basename(os.path.dirname(dataset_path))
        main_category = os.path.basename(os.path.dirname(os.path.dirname(dataset_path)))
        
        # 初始化类别统计
        if main_category not in category_stats:
            category_stats[main_category] = {'total': 0, 'complete': 0, 'missing_files': {}}
        category_stats[main_category]['total'] += 1
        
        # 检查文件
        missing_files = []
        present_files = []
        
        # 对于srbench1.0的blackbox类别,不检查formula.py
        files_to_check = required_files
        if main_category == 'srbench1.0' and category == 'blackbox':
            files_to_check = [f for f in required_files if f != 'formula.py']
        
        for required_file in files_to_check:
            file_path = os.path.join(dataset_path, required_file)
            if os.path.exists(file_path):
                present_files.append(required_file)
            else:
                missing_files.append(required_file)
        
        # 判断是否完整
        is_complete = len(missing_files) == 0
        
        if is_complete:
            complete_datasets += 1
            category_stats[main_category]['complete'] += 1
        else:
            incomplete_datasets += 1
            
            # 记录缺失文件统计
            for missing_file in missing_files:
                if missing_file not in category_stats[main_category]['missing_files']:
                    category_stats[main_category]['missing_files'][missing_file] = 0
                category_stats[main_category]['missing_files'][missing_file] += 1
        
        # 添加到详细结果
        detailed_results.append({
            'path': f"{main_category}/{category}/{dataset_name}",
            'is_complete': is_complete,
            'missing_files': missing_files,
            'present_files': present_files
        })
    
    # 打印总体统计
    print(f"\n总体统计:")
    print(f"  总数据集数量: {total_datasets}")
    print(f"  完整数据集: {complete_datasets}")
    print(f"  不完整数据集: {incomplete_datasets}")
    print(f"  完整性比例: {(complete_datasets/total_datasets*100):.1f}%")
    
    # 打印按类别统计
    print(f"\n按类别统计:")
    for category, stats in category_stats.items():
        if stats['total'] > 0:
            completeness = (stats['complete'] / stats['total']) * 100
            print(f"  {category}:")
            print(f"    总数: {stats['total']}")
            print(f"    完整: {stats['complete']}")
            print(f"    完整性: {completeness:.1f}%")
            
            if stats['missing_files']:
                print(f"    缺失文件统计:")
                for file, count in sorted(stats['missing_files'].items()):
                    print(f"      {file}: {count} 个数据集缺失")
    
    # 打印详细结果
    print(f"\n详细检查结果:")
    print(f"{'='*80}")
    
    # 先显示不完整的数据集
    incomplete_results = [r for r in detailed_results if not r['is_complete']]
    if incomplete_results:
        print(f"\n不完整的数据集 ({len(incomplete_results)} 个):")
        for result in incomplete_results:
            print(f"  ❌ {result['path']}")
            print(f"     缺失文件: {', '.join(result['missing_files'])}")
            print(f"     存在文件: {', '.join(result['present_files'])}")
            print()
    
    # 显示完整的数据集
    complete_results = [r for r in detailed_results if r['is_complete']]
    if complete_results:
        print(f"\n完整的数据集 ({len(complete_results)} 个):")
        for result in complete_results[:10]:  # 只显示前10个
            print(f"  ✅ {result['path']}")
        
        if len(complete_results) > 10:
            print(f"  ... 还有 {len(complete_results) - 10} 个完整数据集")
    
    # 断言检查
    if incomplete_datasets > 0:
        print(f"\n⚠️  发现 {incomplete_datasets} 个不完整的数据集")
        assert False, f"发现 {incomplete_datasets} 个不完整的数据集"
    else:
        print(f"\n🎉 所有数据集完整性检查通过!")



def test_dataset_statistics():
    """生成数据集统计信息"""
    dataset_paths = get_all_dataset_paths()
    
    stats = {
        'srsd': {'total': 0, 'complete': 0},
        'srbench1.0': {'total': 0, 'complete': 0},
        'llm-srbench': {'total': 0, 'complete': 0}
    }
    
    for dataset_path in dataset_paths:
        main_category = os.path.basename(os.path.dirname(os.path.dirname(dataset_path)))
        category = os.path.basename(os.path.dirname(dataset_path))
        
        if main_category in stats:
            stats[main_category]['total'] += 1
            
            # 检查是否完整(包含所有必需文件)
            required_files = ['formula.py', 'train.csv', 'valid.csv', 'id_test.csv', 'ood_test.csv', 'metadata.yaml']
            
            # 对于srbench1.0的blackbox类别,不检查metadata.yaml
            files_to_check = required_files
            if main_category == 'srbench1.0' and category == 'blackbox':
                files_to_check = [f for f in required_files if f != 'formula.py']
            
            is_complete = all(os.path.exists(os.path.join(dataset_path, f)) for f in files_to_check)    
            
            if is_complete:
                stats[main_category]['complete'] += 1
    
    print("\n数据集完整性统计:")
    for category, data in stats.items():
        if data['total'] > 0:
            completeness = (data['complete'] / data['total']) * 100
            print(f"{category}: {data['complete']}/{data['total']} ({completeness:.1f}%)")
    
    return stats