| |
|
|
| import argparse |
| import json |
| import os |
| import sys |
| from pathlib import Path |
|
|
| |
| sys.path.append(os.path.join(os.path.dirname(__file__), '..')) |
|
|
| from src.llm_generation.vllm_client import VLLMClient |
| from src.llm_generation.cleaning_generator import DataCleaningGenerator |
|
|
| def scan_dataset(root_dir): |
| """ |
| 扫描 data/original/${category}/${sequence} 结构 |
| 返回 entries 列表,包含 sequence 的绝对路径 |
| """ |
| entries = [] |
| base_path = Path(root_dir).resolve() |
| |
| if not base_path.exists(): |
| print(f"Error: Path {root_dir} does not exist.") |
| return [] |
|
|
| print(f"Scanning directory: {base_path} ...") |
| |
| |
| categories = [d for d in base_path.iterdir() if d.is_dir()] |
| |
| for cat_dir in categories: |
| category_name = cat_dir.name |
| |
| sequences = [s for s in cat_dir.iterdir() if s.is_dir()] |
| |
| for seq_dir in sequences: |
| |
| entries.append({ |
| "category": category_name, |
| "sequence_name": seq_dir.name, |
| "sequence_abs_path": str(seq_dir) |
| }) |
| |
| print(f"Found {len(entries)} sequences across {len(categories)} categories.") |
| return entries |
|
|
| def save_jsonl(data, path): |
| os.makedirs(os.path.dirname(path), exist_ok=True) |
| with open(path, 'w', encoding='utf-8') as f: |
| for item in data: |
| f.write(json.dumps(item, ensure_ascii=False) + '\n') |
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--data_root", type=str, required=True, help="Path to 'data/original'") |
| parser.add_argument("--output_file", type=str, default="cleaning_report.jsonl") |
| parser.add_argument("--model", type=str, required=True, help="Local model path") |
| parser.add_argument("--tp_size", type=int, default=1) |
| |
| args = parser.parse_args() |
|
|
| |
| entries = scan_dataset(args.data_root) |
| |
| if not entries: |
| print("No entries found. Exiting.") |
| return |
|
|
| |
| client = VLLMClient( |
| model_path=args.model, |
| tensor_parallel_size=args.tp_size, |
| gpu_memory_utilization=0.9 |
| ) |
|
|
| |
| |
| |
| generator = DataCleaningGenerator( |
| client=client, |
| image_root=None, |
| model_name=args.model, |
| sample_k=4 |
| ) |
|
|
| |
| results = generator.process_batch(entries) |
|
|
| |
| save_jsonl(results, args.output_file) |
| |
| |
| |
| valid_results = [r for r in results if r.get('status') != 'error_no_images'] |
| mismatches = [r for r in valid_results if r.get('is_match') is False] |
| |
| print(f"\n清洗完成!") |
| print(f"总扫描数: {len(results)}") |
| print(f"有效处理数: {len(valid_results)}") |
| print(f"疑似不匹配: {len(mismatches)}") |
| print(f"详细报告已保存至: {args.output_file}") |
| |
| if mismatches: |
| print("\n示例不匹配项:") |
| print(json.dumps(mismatches[0], indent=2, ensure_ascii=False)) |
|
|
| if __name__ == "__main__": |
| main() |
|
|