OurData / scripts /hf_push_dataset.py
LarryD123's picture
Upload folder using huggingface_hub
d2efaca verified
from __future__ import annotations
import argparse
import os
from pathlib import Path
from typing import Dict, List, Tuple
# Best-effort load .env if present so users don't need the dotenv CLI.
try:
from dotenv import load_dotenv # type: ignore
load_dotenv() # loads .env from current working directory if it exists
except Exception:
pass
import pandas as pd
from datasets import Dataset, DatasetDict, Image, Features, Value
from huggingface_hub import HfApi
from huggingface_hub.errors import HfHubHTTPError
def _find_splits(data_dir: Path) -> List[Tuple[str, Path]]:
out = []
for split in ("train", "validation", "test"):
sd = data_dir / split
if (sd / "metadata.csv").exists():
out.append((split, sd))
return out
def _normalize_columns(df: pd.DataFrame) -> pd.DataFrame:
# Create a lowercase map but keep original for selection
lower_map = {c.lower(): c for c in df.columns}
# Image path
img_col_candidates = [
"image", "file_name", "filename", "path", "filepath", "file"
]
img_col = next((lower_map[c] for c in img_col_candidates if c in lower_map), None)
if img_col is None:
raise ValueError(f"Could not find an image path column among: {df.columns.tolist()}")
# Latitude
lat_candidates = ["latitude", "lat", "Latitude", "LAT"]
lat_col = None
for c in lat_candidates:
if c.lower() in lower_map:
lat_col = lower_map[c.lower()]
break
if lat_col is None:
raise ValueError("Latitude column not found (expected one of latitude/lat)")
# Longitude
lon_candidates = ["longitude", "lon", "Longitude", "LON", "long"]
lon_col = None
for c in lon_candidates:
if c.lower() in lower_map:
lon_col = lower_map[c.lower()]
break
if lon_col is None:
raise ValueError("Longitude column not found (expected one of longitude/lon/long)")
out_df = pd.DataFrame({
"image": df[img_col].astype(str),
"latitude": pd.to_numeric(df[lat_col], errors="coerce"),
"longitude": pd.to_numeric(df[lon_col], errors="coerce"),
})
out_df = out_df.dropna(subset=["latitude", "longitude"]).reset_index(drop=True)
return out_df
def _resolve_paths(df: pd.DataFrame, split_dir: Path) -> pd.DataFrame:
paths = []
for p in df["image"].tolist():
pth = Path(p)
if pth.is_absolute() and pth.exists():
paths.append(str(pth))
continue
# try relative to split dir
pth2 = (split_dir / p).resolve()
if pth2.exists():
paths.append(str(pth2))
continue
# try relative to data dir
# keep as is if not found (datasets will error later, but we surface here)
paths.append(str(p))
df = df.copy()
df["image"] = paths
return df
def build_datasetdict(data_dir: Path) -> DatasetDict:
splits = _find_splits(data_dir)
if not splits:
raise SystemExit(f"No splits found under {data_dir}. Expected metadata.csv in train/validation/test.")
feats = Features({
"image": Image(),
"latitude": Value("float64"),
"longitude": Value("float64"),
})
dd: Dict[str, Dataset] = {}
for split, sd in splits:
csv_path = sd / "metadata.csv"
df = pd.read_csv(csv_path)
df = _normalize_columns(df)
df = _resolve_paths(df, sd)
ds = Dataset.from_dict(df.to_dict(orient="list"), features=feats)
dd[split] = ds
print(f"Split {split}: {len(ds)} rows")
return DatasetDict(dd)
def push_to_hub(ds: DatasetDict, repo_id: str, private: bool, max_shard_size: str) -> None:
# Prefer explicit token if provided
token = os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HF_TOKEN")
if not token:
print("[auth] 未检测到 Token。请在 .env 设置 HUGGINGFACE_HUB_TOKEN=hf_xxx,或设置环境变量 HF_TOKEN/HUGGINGFACE_HUB_TOKEN。")
print("[auth] 也可以先运行: python -c \"from huggingface_hub import login; login('hf_xxx')\"")
try:
api = HfApi(token=token)
api.create_repo(repo_id=repo_id, repo_type="dataset", exist_ok=True, private=private)
ds.push_to_hub(repo_id, private=private, max_shard_size=max_shard_size, token=token)
print(f"Pushed to https://huggingface.co/datasets/{repo_id}")
except HfHubHTTPError as e:
if hasattr(e, "response") and getattr(e.response, "status_code", None) == 401:
print("[auth] 401 Unauthorized:请检查 Token 是否有效、是否具备 write 权限、是否属于 LarryD123 账号。")
print("[auth] 建议:\n - 在 https://huggingface.co/settings/tokens 重新生成 write Token\n - 将其写入项目根目录 .env (HUGGINGFACE_HUB_TOKEN=hf_xxx)\n - 重新运行上传命令")
raise
def main():
ap = argparse.ArgumentParser(description="Build and push a 3-column Image+GPS dataset to Hugging Face.")
ap.add_argument("--data-dir", type=Path, required=True, help="Folder containing split subfolders (train/validation/test)")
ap.add_argument("--repo-id", type=str, required=False, help="<user>/<dataset_name> on Hugging Face")
ap.add_argument("--private", type=str, default="false", help="true/false for private dataset")
ap.add_argument("--max-shard-size", type=str, default="500MB", help="Shard size for HF push")
ap.add_argument("--dry-run", action="store_true", help="Build locally without pushing to Hub")
args = ap.parse_args()
ds = build_datasetdict(args.data_dir)
print(ds)
if args.dry_run:
print("Dry run: not pushing to hub.")
return
if not args.repo_id:
raise SystemExit("--repo-id is required unless --dry-run is set")
private = str(args.private).lower() in ("1", "true", "yes", "y")
push_to_hub(ds, args.repo_id, private=private, max_shard_size=args.max_shard_size)
if __name__ == "__main__":
main()