unsplash_5k_camera / parquet-camera.py
feeday's picture
Upload parquet-camera.py
6e179a2 verified
import sys
import subprocess
import os
import re
from pathlib import Path
# ================= 1. 自动安装并导入依赖 =================
def install_and_import(package_name):
try:
__import__(package_name)
except ImportError:
print(f"📦 正在自动安装依赖 '{package_name}'...")
subprocess.check_call([sys.executable, "-m", "pip", "install", package_name])
install_and_import("pandas")
install_and_import("pyarrow")
import pandas as pd
# ================= 2. 配置区 =================
# 【核心参数】 0: 不导出图片文件 | 1: 导出图片文件
EXPORT_IMAGE_FILES = 1
TARGET_DIR = "./" # 原始 parquet 目录
OUTPUT_DIR = "./exported_images" # 图片导出目录
OUTPUT_PARQUET = "all_camera_data_merged.parquet"
OUTPUT_CSV = "all_camera_data_merged.csv"
# 必须保留的列
TARGET_COLUMNS = ['prompt', 'url', 'image']
# 提取相机型号的正则
CAMERA_PATTERN = r"['\"]Image Model['\"]\s*:\s*['\"]([^'\"]+)['\"]"
# ============================================
def get_filename_from_url(url):
"""从 Unsplash 等 URL 中提取唯一 ID 作为文件名"""
if pd.isna(url) or not isinstance(url, str):
return None
# 匹配 photo-xxxxxx 这部分
match = re.search(r"photo-([a-zA-Z0-9-]+)", url)
if match:
return match.group(0) # 返回 photo-1542055970400-b3a429e829d2
# 如果没匹配到,取 URL 最后一段并去掉参数
return url.split('/')[-1].split('?')[0]
def process_data():
folder_path = Path(TARGET_DIR)
parquet_files = [f for f in folder_path.glob("*.parquet")
if f.name not in [OUTPUT_PARQUET]]
if not parquet_files:
print(f"⚠️ 找不到待处理的 .parquet 文件。")
return
if EXPORT_IMAGE_FILES == 1:
Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)
all_dfs = []
total_count = 0
print(f"🚀 开始处理(图片导出模式: {'开启' if EXPORT_IMAGE_FILES==1 else '关闭'})...\n")
for file_path in parquet_files:
print(f"📄 正在读取: {file_path.name}")
try:
df = pd.read_parquet(file_path)
# 1. 提取型号并筛选
df['camera_model'] = df['exif'].astype(str).str.extract(CAMERA_PATTERN, expand=False)
filtered_df = df[df['camera_model'].notna()].copy()
if filtered_df.empty:
continue
# 2. 如果开启了图片导出
if EXPORT_IMAGE_FILES == 1 and 'image' in filtered_df.columns:
for idx, row in filtered_df.iterrows():
img_data = row['image']
# 处理 HF 的 {'bytes': b'...'} 格式
raw_bytes = img_data['bytes'] if isinstance(img_data, dict) else img_data
if isinstance(raw_bytes, bytes):
img_id = get_filename_from_url(row['url'])
if not img_id:
img_id = f"unknown_{total_count + idx}"
# 判断后缀
ext = ".jpg" if raw_bytes.startswith(b'\xff\xd8') else ".png"
save_path = os.path.join(OUTPUT_DIR, f"{img_id}{ext}")
with open(save_path, "wb") as f:
f.write(raw_bytes)
# 3. 收集数据用于合并表格
cols = [c for c in TARGET_COLUMNS if c in filtered_df.columns] + ['camera_model']
all_dfs.append(filtered_df[cols])
total_count += len(filtered_df)
print(f" └─ ✅ 已记录 {len(filtered_df)} 条数据")
except Exception as e:
print(f" └─ ❌ 出错: {e}")
# 合并并保存结果表
if all_dfs:
merged_df = pd.concat(all_dfs, ignore_index=True)
merged_df.to_parquet(OUTPUT_PARQUET, index=False)
merged_df.to_csv(OUTPUT_CSV, index=False)
print(f"\n✨ 处理完毕!总计筛选出 {total_count} 条带相机参数的数据。")
if EXPORT_IMAGE_FILES == 1:
print(f"📁 图片已存至: {OUTPUT_DIR}")
else:
print("\n❌ 未提取到有效数据。")
if __name__ == "__main__":
process_data()