| import sys
|
| import subprocess
|
| import os
|
| import re
|
| from pathlib import Path
|
|
|
|
|
| def install_and_import(package_name):
|
| try:
|
| __import__(package_name)
|
| except ImportError:
|
| print(f"📦 正在自动安装依赖 '{package_name}'...")
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", package_name])
|
|
|
| install_and_import("pandas")
|
| install_and_import("pyarrow")
|
|
|
| import pandas as pd
|
|
|
|
|
|
|
| EXPORT_IMAGE_FILES = 1
|
|
|
| TARGET_DIR = "./"
|
| OUTPUT_DIR = "./exported_images"
|
| OUTPUT_PARQUET = "all_camera_data_merged.parquet"
|
| OUTPUT_CSV = "all_camera_data_merged.csv"
|
|
|
|
|
| TARGET_COLUMNS = ['prompt', 'url', 'image']
|
|
|
| CAMERA_PATTERN = r"['\"]Image Model['\"]\s*:\s*['\"]([^'\"]+)['\"]"
|
|
|
|
|
| def get_filename_from_url(url):
|
| """从 Unsplash 等 URL 中提取唯一 ID 作为文件名"""
|
| if pd.isna(url) or not isinstance(url, str):
|
| return None
|
|
|
| match = re.search(r"photo-([a-zA-Z0-9-]+)", url)
|
| if match:
|
| return match.group(0)
|
|
|
| return url.split('/')[-1].split('?')[0]
|
|
|
| def process_data():
|
| folder_path = Path(TARGET_DIR)
|
| parquet_files = [f for f in folder_path.glob("*.parquet")
|
| if f.name not in [OUTPUT_PARQUET]]
|
|
|
| if not parquet_files:
|
| print(f"⚠️ 找不到待处理的 .parquet 文件。")
|
| return
|
|
|
| if EXPORT_IMAGE_FILES == 1:
|
| Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
|
|
|
| all_dfs = []
|
| total_count = 0
|
|
|
| print(f"🚀 开始处理(图片导出模式: {'开启' if EXPORT_IMAGE_FILES==1 else '关闭'})...\n")
|
|
|
| for file_path in parquet_files:
|
| print(f"📄 正在读取: {file_path.name}")
|
| try:
|
| df = pd.read_parquet(file_path)
|
|
|
|
|
| df['camera_model'] = df['exif'].astype(str).str.extract(CAMERA_PATTERN, expand=False)
|
| filtered_df = df[df['camera_model'].notna()].copy()
|
|
|
| if filtered_df.empty:
|
| continue
|
|
|
|
|
| if EXPORT_IMAGE_FILES == 1 and 'image' in filtered_df.columns:
|
| for idx, row in filtered_df.iterrows():
|
| img_data = row['image']
|
|
|
| raw_bytes = img_data['bytes'] if isinstance(img_data, dict) else img_data
|
|
|
| if isinstance(raw_bytes, bytes):
|
| img_id = get_filename_from_url(row['url'])
|
| if not img_id:
|
| img_id = f"unknown_{total_count + idx}"
|
|
|
|
|
| ext = ".jpg" if raw_bytes.startswith(b'\xff\xd8') else ".png"
|
| save_path = os.path.join(OUTPUT_DIR, f"{img_id}{ext}")
|
|
|
| with open(save_path, "wb") as f:
|
| f.write(raw_bytes)
|
|
|
|
|
| cols = [c for c in TARGET_COLUMNS if c in filtered_df.columns] + ['camera_model']
|
| all_dfs.append(filtered_df[cols])
|
| total_count += len(filtered_df)
|
| print(f" └─ ✅ 已记录 {len(filtered_df)} 条数据")
|
|
|
| except Exception as e:
|
| print(f" └─ ❌ 出错: {e}")
|
|
|
|
|
| if all_dfs:
|
| merged_df = pd.concat(all_dfs, ignore_index=True)
|
| merged_df.to_parquet(OUTPUT_PARQUET, index=False)
|
| merged_df.to_csv(OUTPUT_CSV, index=False)
|
| print(f"\n✨ 处理完毕!总计筛选出 {total_count} 条带相机参数的数据。")
|
| if EXPORT_IMAGE_FILES == 1:
|
| print(f"📁 图片已存至: {OUTPUT_DIR}")
|
| else:
|
| print("\n❌ 未提取到有效数据。")
|
|
|
| if __name__ == "__main__":
|
| process_data() |