File size: 5,996 Bytes
d2efaca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from __future__ import annotations

import argparse
import os
from pathlib import Path
from typing import Dict, List, Tuple

# Best-effort load .env if present so users don't need the dotenv CLI.
try:
    from dotenv import load_dotenv  # type: ignore
    load_dotenv()  # loads .env from current working directory if it exists
except Exception:
    pass

import pandas as pd
from datasets import Dataset, DatasetDict, Image, Features, Value
from huggingface_hub import HfApi
from huggingface_hub.errors import HfHubHTTPError


def _find_splits(data_dir: Path) -> List[Tuple[str, Path]]:
    out = []
    for split in ("train", "validation", "test"):
        sd = data_dir / split
        if (sd / "metadata.csv").exists():
            out.append((split, sd))
    return out


def _normalize_columns(df: pd.DataFrame) -> pd.DataFrame:
    # Create a lowercase map but keep original for selection
    lower_map = {c.lower(): c for c in df.columns}

    # Image path
    img_col_candidates = [
        "image", "file_name", "filename", "path", "filepath", "file"
    ]
    img_col = next((lower_map[c] for c in img_col_candidates if c in lower_map), None)
    if img_col is None:
        raise ValueError(f"Could not find an image path column among: {df.columns.tolist()}")

    # Latitude
    lat_candidates = ["latitude", "lat", "Latitude", "LAT"]
    lat_col = None
    for c in lat_candidates:
        if c.lower() in lower_map:
            lat_col = lower_map[c.lower()]
            break
    if lat_col is None:
        raise ValueError("Latitude column not found (expected one of latitude/lat)")

    # Longitude
    lon_candidates = ["longitude", "lon", "Longitude", "LON", "long"]
    lon_col = None
    for c in lon_candidates:
        if c.lower() in lower_map:
            lon_col = lower_map[c.lower()]
            break
    if lon_col is None:
        raise ValueError("Longitude column not found (expected one of longitude/lon/long)")

    out_df = pd.DataFrame({
        "image": df[img_col].astype(str),
        "latitude": pd.to_numeric(df[lat_col], errors="coerce"),
        "longitude": pd.to_numeric(df[lon_col], errors="coerce"),
    })
    out_df = out_df.dropna(subset=["latitude", "longitude"]).reset_index(drop=True)
    return out_df


def _resolve_paths(df: pd.DataFrame, split_dir: Path) -> pd.DataFrame:
    paths = []
    for p in df["image"].tolist():
        pth = Path(p)
        if pth.is_absolute() and pth.exists():
            paths.append(str(pth))
            continue
        # try relative to split dir
        pth2 = (split_dir / p).resolve()
        if pth2.exists():
            paths.append(str(pth2))
            continue
        # try relative to data dir
        # keep as is if not found (datasets will error later, but we surface here)
        paths.append(str(p))
    df = df.copy()
    df["image"] = paths
    return df


def build_datasetdict(data_dir: Path) -> DatasetDict:
    splits = _find_splits(data_dir)
    if not splits:
        raise SystemExit(f"No splits found under {data_dir}. Expected metadata.csv in train/validation/test.")

    feats = Features({
        "image": Image(),
        "latitude": Value("float64"),
        "longitude": Value("float64"),
    })

    dd: Dict[str, Dataset] = {}
    for split, sd in splits:
        csv_path = sd / "metadata.csv"
        df = pd.read_csv(csv_path)
        df = _normalize_columns(df)
        df = _resolve_paths(df, sd)

        ds = Dataset.from_dict(df.to_dict(orient="list"), features=feats)
        dd[split] = ds
        print(f"Split {split}: {len(ds)} rows")

    return DatasetDict(dd)


def push_to_hub(ds: DatasetDict, repo_id: str, private: bool, max_shard_size: str) -> None:
    # Prefer explicit token if provided
    token = os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HF_TOKEN")
    if not token:
        print("[auth] 未检测到 Token。请在 .env 设置 HUGGINGFACE_HUB_TOKEN=hf_xxx,或设置环境变量 HF_TOKEN/HUGGINGFACE_HUB_TOKEN。")
        print("[auth] 也可以先运行: python -c \"from huggingface_hub import login; login('hf_xxx')\"")
    try:
        api = HfApi(token=token)
        api.create_repo(repo_id=repo_id, repo_type="dataset", exist_ok=True, private=private)
        ds.push_to_hub(repo_id, private=private, max_shard_size=max_shard_size, token=token)
        print(f"Pushed to https://huggingface.co/datasets/{repo_id}")
    except HfHubHTTPError as e:
        if hasattr(e, "response") and getattr(e.response, "status_code", None) == 401:
            print("[auth] 401 Unauthorized:请检查 Token 是否有效、是否具备 write 权限、是否属于 LarryD123 账号。")
            print("[auth] 建议:\n - 在 https://huggingface.co/settings/tokens 重新生成 write Token\n - 将其写入项目根目录 .env (HUGGINGFACE_HUB_TOKEN=hf_xxx)\n - 重新运行上传命令")
        raise


def main():
    ap = argparse.ArgumentParser(description="Build and push a 3-column Image+GPS dataset to Hugging Face.")
    ap.add_argument("--data-dir", type=Path, required=True, help="Folder containing split subfolders (train/validation/test)")
    ap.add_argument("--repo-id", type=str, required=False, help="<user>/<dataset_name> on Hugging Face")
    ap.add_argument("--private", type=str, default="false", help="true/false for private dataset")
    ap.add_argument("--max-shard-size", type=str, default="500MB", help="Shard size for HF push")
    ap.add_argument("--dry-run", action="store_true", help="Build locally without pushing to Hub")
    args = ap.parse_args()

    ds = build_datasetdict(args.data_dir)
    print(ds)

    if args.dry_run:
        print("Dry run: not pushing to hub.")
        return

    if not args.repo_id:
        raise SystemExit("--repo-id is required unless --dry-run is set")

    private = str(args.private).lower() in ("1", "true", "yes", "y")
    push_to_hub(ds, args.repo_id, private=private, max_shard_size=args.max_shard_size)


if __name__ == "__main__":
    main()