sim-datasets / tests /test_dataset_integrity.py
ziwenhahaha
[init] init datasets
4ae2b3e
# tests/test_dataset_integrity.py
import os
import pytest
import glob
def get_all_dataset_paths():
"""获取所有三级目录下的数据集路径"""
dataset_paths = []
# SRSD数据集
srsd_base = 'srsd'
srsd_categories = [
'srsd-feynman_easy',
'srsd-feynman_medium',
'srsd-feynman_hard',
'srsd-feynman_easy_dummy',
'srsd-feynman_medium_dummy',
'srsd-feynman_hard_dummy'
]
for category in srsd_categories:
category_path = os.path.join(srsd_base, category)
if os.path.exists(category_path):
for dataset_dir in os.listdir(category_path):
dataset_path = os.path.join(category_path, dataset_dir)
if os.path.isdir(dataset_path):
dataset_paths.append(dataset_path)
# SRBench1.0数据集
srbench_base = 'srbench1.0'
srbench_categories = ['feynman', 'strogatz', 'blackbox']
for category in srbench_categories:
category_path = os.path.join(srbench_base, category)
if os.path.exists(category_path):
for dataset_dir in os.listdir(category_path):
dataset_path = os.path.join(category_path, dataset_dir)
if os.path.isdir(dataset_path):
dataset_paths.append(dataset_path)
# LLM-SRBench数据集
llm_srbench_base = 'llm-srbench'
llm_categories = ['chem_react', 'phys_osc', 'bio_pop_growth', 'matsci', 'lsrtransform']
for category in llm_categories:
category_path = os.path.join(llm_srbench_base, category)
if os.path.exists(category_path):
for dataset_dir in os.listdir(category_path):
dataset_path = os.path.join(category_path, dataset_dir)
if os.path.isdir(dataset_path):
dataset_paths.append(dataset_path)
return dataset_paths
def test_dataset_files_exist():
"""测试所有三级目录下数据集必要文件是否存在"""
dataset_paths = get_all_dataset_paths()
print(f"\n{'='*80}")
print(f"数据集完整性检查报告")
print(f"{'='*80}")
print(f"找到 {len(dataset_paths)} 个数据集目录")
# 所有必需文件
required_files = ['formula.py', 'train.csv', 'id_test.csv', 'ood_test.csv', 'valid.csv', 'metadata.yaml']
# 统计信息
total_datasets = len(dataset_paths)
complete_datasets = 0
incomplete_datasets = 0
# 按类别统计
category_stats = {}
# 详细结果
detailed_results = []
for dataset_path in dataset_paths:
dataset_name = os.path.basename(dataset_path)
category = os.path.basename(os.path.dirname(dataset_path))
main_category = os.path.basename(os.path.dirname(os.path.dirname(dataset_path)))
# 初始化类别统计
if main_category not in category_stats:
category_stats[main_category] = {'total': 0, 'complete': 0, 'missing_files': {}}
category_stats[main_category]['total'] += 1
# 检查文件
missing_files = []
present_files = []
# 对于srbench1.0的blackbox类别,不检查formula.py
files_to_check = required_files
if main_category == 'srbench1.0' and category == 'blackbox':
files_to_check = [f for f in required_files if f != 'formula.py']
for required_file in files_to_check:
file_path = os.path.join(dataset_path, required_file)
if os.path.exists(file_path):
present_files.append(required_file)
else:
missing_files.append(required_file)
# 判断是否完整
is_complete = len(missing_files) == 0
if is_complete:
complete_datasets += 1
category_stats[main_category]['complete'] += 1
else:
incomplete_datasets += 1
# 记录缺失文件统计
for missing_file in missing_files:
if missing_file not in category_stats[main_category]['missing_files']:
category_stats[main_category]['missing_files'][missing_file] = 0
category_stats[main_category]['missing_files'][missing_file] += 1
# 添加到详细结果
detailed_results.append({
'path': f"{main_category}/{category}/{dataset_name}",
'is_complete': is_complete,
'missing_files': missing_files,
'present_files': present_files
})
# 打印总体统计
print(f"\n总体统计:")
print(f" 总数据集数量: {total_datasets}")
print(f" 完整数据集: {complete_datasets}")
print(f" 不完整数据集: {incomplete_datasets}")
print(f" 完整性比例: {(complete_datasets/total_datasets*100):.1f}%")
# 打印按类别统计
print(f"\n按类别统计:")
for category, stats in category_stats.items():
if stats['total'] > 0:
completeness = (stats['complete'] / stats['total']) * 100
print(f" {category}:")
print(f" 总数: {stats['total']}")
print(f" 完整: {stats['complete']}")
print(f" 完整性: {completeness:.1f}%")
if stats['missing_files']:
print(f" 缺失文件统计:")
for file, count in sorted(stats['missing_files'].items()):
print(f" {file}: {count} 个数据集缺失")
# 打印详细结果
print(f"\n详细检查结果:")
print(f"{'='*80}")
# 先显示不完整的数据集
incomplete_results = [r for r in detailed_results if not r['is_complete']]
if incomplete_results:
print(f"\n不完整的数据集 ({len(incomplete_results)} 个):")
for result in incomplete_results:
print(f" ❌ {result['path']}")
print(f" 缺失文件: {', '.join(result['missing_files'])}")
print(f" 存在文件: {', '.join(result['present_files'])}")
print()
# 显示完整的数据集
complete_results = [r for r in detailed_results if r['is_complete']]
if complete_results:
print(f"\n完整的数据集 ({len(complete_results)} 个):")
for result in complete_results[:10]: # 只显示前10个
print(f" ✅ {result['path']}")
if len(complete_results) > 10:
print(f" ... 还有 {len(complete_results) - 10} 个完整数据集")
# 断言检查
if incomplete_datasets > 0:
print(f"\n⚠️ 发现 {incomplete_datasets} 个不完整的数据集")
assert False, f"发现 {incomplete_datasets} 个不完整的数据集"
else:
print(f"\n🎉 所有数据集完整性检查通过!")
def test_dataset_statistics():
"""生成数据集统计信息"""
dataset_paths = get_all_dataset_paths()
stats = {
'srsd': {'total': 0, 'complete': 0},
'srbench1.0': {'total': 0, 'complete': 0},
'llm-srbench': {'total': 0, 'complete': 0}
}
for dataset_path in dataset_paths:
main_category = os.path.basename(os.path.dirname(os.path.dirname(dataset_path)))
category = os.path.basename(os.path.dirname(dataset_path))
if main_category in stats:
stats[main_category]['total'] += 1
# 检查是否完整(包含所有必需文件)
required_files = ['formula.py', 'train.csv', 'valid.csv', 'id_test.csv', 'ood_test.csv', 'metadata.yaml']
# 对于srbench1.0的blackbox类别,不检查metadata.yaml
files_to_check = required_files
if main_category == 'srbench1.0' and category == 'blackbox':
files_to_check = [f for f in required_files if f != 'formula.py']
is_complete = all(os.path.exists(os.path.join(dataset_path, f)) for f in files_to_check)
if is_complete:
stats[main_category]['complete'] += 1
print("\n数据集完整性统计:")
for category, data in stats.items():
if data['total'] > 0:
completeness = (data['complete'] / data['total']) * 100
print(f"{category}: {data['complete']}/{data['total']} ({completeness:.1f}%)")
return stats