| """One-time script to generate parquet files for all OmniFall HF configs. |
| |
| Created for the HF datasets 4.6 migration (dataset scripts no longer supported). |
| Generates parquet files that enable native load_dataset() without custom builder code. |
| Can be safely deleted after parquet files are committed to the Hub. |
| |
| Usage: |
| python generate_parquet.py |
| """ |
|
|
| import os |
| import subprocess |
| import tarfile |
| import tempfile |
| from pathlib import Path |
|
|
| import numpy as np |
| import pandas as pd |
|
|
| REPO_ROOT = Path(__file__).parent |
| PARQUET_DIR = REPO_ROOT / "parquet" |
|
|
| |
|
|
| STAGED_DATASETS = [ |
| "caucafall", "cmdfall", "edf", "gmdcsa24", |
| "le2i", "mcfd", "occu", "up_fall", |
| ] |
|
|
| |
| STAGED_LABEL_FILES = { |
| "caucafall": "labels/caucafall.csv", |
| "cmdfall": "labels/cmdfall.csv", |
| "edf": "labels/edf.csv", |
| "gmdcsa24": "labels/GMDCSA24.csv", |
| "le2i": "labels/le2i.csv", |
| "mcfd": "labels/mcfd.csv", |
| "occu": "labels/occu.csv", |
| "up_fall": "labels/up_fall.csv", |
| } |
| ITW_LABEL_FILE = "labels/OOPS.csv" |
| SYN_LABEL_FILE = "labels/of-syn.csv" |
| METADATA_FILE = "videos/metadata.csv" |
|
|
| CORE_COLUMNS = ["path", "label", "start", "end", "subject", "cam", "dataset"] |
| DEMOGRAPHIC_COLUMNS = [ |
| "age_group", "gender_presentation", "monk_skin_tone", |
| "race_ethnicity_omb", "bmi_band", "height_band", |
| "environment_category", "camera_shot", "speed", |
| "camera_elevation", "camera_azimuth", "camera_distance", |
| ] |
| SYN_COLUMNS = CORE_COLUMNS + DEMOGRAPHIC_COLUMNS |
| METADATA_COLUMNS = ["path", "dataset"] + DEMOGRAPHIC_COLUMNS |
|
|
| |
|
|
| DEPRECATED_ALIASES = { |
| "cs-staged": "of-sta-cs", |
| "cv-staged": "of-sta-cv", |
| "OOPS": "of-itw", |
| |
| "caucafall": "caucafall-cs", |
| "cmdfall": "cmdfall-cs", |
| "edf": "edf-cs", |
| "gmdcsa24": "gmdcsa24-cs", |
| "le2i": "le2i-cs", |
| "mcfd": "mcfd-cs", |
| "occu": "occu-cs", |
| "up_fall": "up_fall-cs", |
| |
| "of-sta-to-itw-cs": "of-sta-to-all-cs", |
| "of-sta-to-itw-cv": "of-sta-to-all-cv", |
| "of-syn-to-itw": "of-syn-to-all-cs", |
| "of-sta-syn-to-itw-cs": "of-sta-syn-to-all-cs", |
| "of-sta-syn-to-itw-cv": "of-sta-syn-to-all-cv", |
| "of-sta-itw-cs": "of-sta-to-all-cs", |
| "of-sta-itw-cv": "of-sta-to-all-cv", |
| "of-syn-itw": "of-syn-to-all-cs", |
| "cs-staged-wild": "of-sta-to-all-cs", |
| "cv-staged-wild": "of-sta-to-all-cv", |
| } |
|
|
|
|
| |
|
|
| def load_csv(relpath): |
| """Load a CSV file relative to REPO_ROOT.""" |
| return pd.read_csv(REPO_ROOT / relpath) |
|
|
|
|
| def load_staged_labels(datasets=None): |
| """Load and concatenate staged label CSVs.""" |
| if datasets is None: |
| datasets = STAGED_DATASETS |
| dfs = [load_csv(STAGED_LABEL_FILES[ds]) for ds in datasets] |
| return pd.concat(dfs, ignore_index=True) |
|
|
|
|
| def load_itw_labels(): |
| """Load OOPS/ItW labels.""" |
| return load_csv(ITW_LABEL_FILE) |
|
|
|
|
| def load_syn_labels(): |
| """Load OF-Syn labels (19-col).""" |
| return load_csv(SYN_LABEL_FILE) |
|
|
|
|
| def staged_split_files(split_type, split_name): |
| """Return list of split CSV relative paths for all 8 staged datasets.""" |
| return [f"splits/{split_type}/{ds}/{split_name}.csv" for ds in STAGED_DATASETS] |
|
|
|
|
| def merge_split_labels(split_files, labels_df): |
| """Merge split paths with labels, replicating _gen_split_merge logic.""" |
| split_dfs = [load_csv(sf) for sf in split_files] |
| split_df = pd.concat(split_dfs, ignore_index=True) |
| merged = pd.merge(split_df, labels_df, on="path", how="left") |
| |
| unmatched = merged["label"].isna() |
| if unmatched.any(): |
| n = unmatched.sum() |
| paths = merged.loc[unmatched, "path"].tolist() |
| print(f" WARNING: Dropping {n} unmatched path(s): {paths}") |
| merged = merged[~unmatched].reset_index(drop=True) |
| return merged |
|
|
|
|
| def cast_core_dtypes(df): |
| """Cast core columns to correct dtypes for parquet/ClassLabel.""" |
| df = df.copy() |
| df["path"] = df["path"].astype(str) |
| df["label"] = df["label"].astype(int) |
| df["start"] = df["start"].astype(np.float32) |
| df["end"] = df["end"].astype(np.float32) |
| df["subject"] = df["subject"].astype(np.int32) |
| df["cam"] = df["cam"].astype(np.int32) |
| df["dataset"] = df["dataset"].astype(str) |
| return df |
|
|
|
|
| def cast_demographic_dtypes(df): |
| """Cast demographic columns to string (for ClassLabel encoding).""" |
| df = df.copy() |
| for col in DEMOGRAPHIC_COLUMNS: |
| if col in df.columns: |
| df[col] = df[col].astype(str) |
| return df |
|
|
|
|
| def select_and_cast(df, columns, schema="core"): |
| """Select columns and cast dtypes.""" |
| df = df[columns].copy() |
| if schema in ("core", "syn"): |
| df = cast_core_dtypes(df) |
| if schema in ("syn", "metadata"): |
| df = cast_demographic_dtypes(df) |
| return df |
|
|
|
|
| def write_parquet(df, config_name, split_name): |
| """Write a dataframe as a parquet file in the expected layout. |
| |
| Returns the output path, or None if the dataframe is empty (Arrow can't |
| handle 0-row parquet files). |
| """ |
| if len(df) == 0: |
| print(f" SKIP {config_name}/{split_name}: 0 rows (not written)") |
| return None |
| out_dir = PARQUET_DIR / config_name |
| out_dir.mkdir(parents=True, exist_ok=True) |
| out_path = out_dir / f"{split_name}-00000-of-00001.parquet" |
| df.to_parquet(out_path, index=False) |
| return out_path |
|
|
|
|
| def generate_split_config(config_name, split_type, split_files_fn, labels_df, columns, |
| schema="core"): |
| """Generate train/val/test parquet files for a split-based config.""" |
| results = {} |
| for split_name, csv_name in [("train", "train"), ("validation", "val"), ("test", "test")]: |
| sf = split_files_fn(split_type, csv_name) |
| merged = merge_split_labels(sf, labels_df) |
| df = select_and_cast(merged, columns, schema) |
| path = write_parquet(df, config_name, split_name) |
| results[split_name] = len(df) |
| return results |
|
|
|
|
| def copy_parquet(source_config, target_config): |
| """Copy parquet files from source config to target config (for deprecated aliases).""" |
| src_dir = PARQUET_DIR / source_config |
| dst_dir = PARQUET_DIR / target_config |
| dst_dir.mkdir(parents=True, exist_ok=True) |
| results = {} |
| for src_file in sorted(src_dir.glob("*.parquet")): |
| dst_file = dst_dir / src_file.name |
| |
| df = pd.read_parquet(src_file) |
| df.to_parquet(dst_file, index=False) |
| split_name = src_file.stem.split("-")[0] |
| results[split_name] = len(df) |
| return results |
|
|
|
|
| |
|
|
| def gen_labels(): |
| """Config: labels - All staged + OOPS labels, single train split.""" |
| staged = load_staged_labels() |
| itw = load_itw_labels() |
| df = pd.concat([staged, itw], ignore_index=True) |
| df = select_and_cast(df, CORE_COLUMNS, "core") |
| path = write_parquet(df, "labels", "train") |
| return {"labels": {"train": len(df)}} |
|
|
|
|
| def gen_labels_syn(): |
| """Config: labels-syn - OF-Syn labels with demographics, single train split.""" |
| df = load_syn_labels() |
| df = select_and_cast(df, SYN_COLUMNS, "syn") |
| path = write_parquet(df, "labels-syn", "train") |
| return {"labels-syn": {"train": len(df)}} |
|
|
|
|
| def gen_metadata_syn(): |
| """Config: metadata-syn - OF-Syn video-level metadata, single train split.""" |
| df = load_csv(METADATA_FILE) |
| |
| metadata_cols = ["path"] + DEMOGRAPHIC_COLUMNS |
| available = [c for c in metadata_cols if c in df.columns] |
| df = df[available].drop_duplicates(subset=["path"]).reset_index(drop=True) |
| df["dataset"] = "of-syn" |
| df = select_and_cast(df, METADATA_COLUMNS, "metadata") |
| path = write_parquet(df, "metadata-syn", "train") |
| return {"metadata-syn": {"train": len(df)}} |
|
|
|
|
| def gen_of_sta(split_type): |
| """Config: of-sta-cs / of-sta-cv - 8 staged datasets combined.""" |
| config_name = f"of-sta-{split_type}" |
| labels = load_staged_labels() |
| results = generate_split_config( |
| config_name, split_type, |
| lambda st, sn: staged_split_files(st, sn), |
| labels, CORE_COLUMNS, "core", |
| ) |
| return {config_name: results} |
|
|
|
|
| def gen_of_itw(): |
| """Config: of-itw - OOPS-Fall in-the-wild.""" |
| labels = load_itw_labels() |
| results = {} |
| for split_name, csv_name in [("train", "train"), ("validation", "val"), ("test", "test")]: |
| sf = [f"splits/cs/OOPS/{csv_name}.csv"] |
| merged = merge_split_labels(sf, labels) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| write_parquet(df, "of-itw", split_name) |
| results[split_name] = len(df) |
| return {"of-itw": results} |
|
|
|
|
| def gen_of_syn(split_type, config_name): |
| """Config: of-syn variants.""" |
| labels = load_syn_labels() |
| results = {} |
| for split_name, csv_name in [("train", "train"), ("validation", "val"), ("test", "test")]: |
| sf = [f"splits/syn/{split_type}/{csv_name}.csv"] |
| merged = merge_split_labels(sf, labels) |
| df = select_and_cast(merged, SYN_COLUMNS, "syn") |
| write_parquet(df, config_name, split_name) |
| results[split_name] = len(df) |
| return {config_name: results} |
|
|
|
|
| def gen_crossdomain(config_name, train_split_type, train_source, test_split_type, |
| test_source): |
| """Config: cross-domain configs (train from one source, test from another).""" |
| |
| if train_source == "staged": |
| train_labels = load_staged_labels() |
| train_split_fn = lambda sn: staged_split_files(train_split_type, sn) |
| elif train_source == "syn": |
| train_labels = load_syn_labels() |
| train_split_fn = lambda sn: [f"splits/syn/{train_split_type}/{sn}.csv"] |
| else: |
| raise ValueError(f"Unknown train_source: {train_source}") |
|
|
| if test_source == "itw": |
| test_labels = load_itw_labels() |
| test_split_fn = lambda sn: [f"splits/{test_split_type}/OOPS/{sn}.csv"] |
| else: |
| raise ValueError(f"Unknown test_source: {test_source}") |
|
|
| results = {} |
|
|
| |
| for split_name, csv_name in [("train", "train"), ("validation", "val")]: |
| sf = train_split_fn(csv_name) |
| merged = merge_split_labels(sf, train_labels) |
| |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| write_parquet(df, config_name, split_name) |
| results[split_name] = len(df) |
|
|
| |
| sf = test_split_fn("test") |
| merged = merge_split_labels(sf, test_labels) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| write_parquet(df, config_name, "test") |
| results["test"] = len(df) |
|
|
| return {config_name: results} |
|
|
|
|
| def gen_crossdomain_staged_syn(config_name, staged_split_type, syn_split_type, |
| test_split_type): |
| """Config: train/val from staged + syn combined, test from OOPS.""" |
| staged_labels = load_staged_labels() |
| syn_labels = load_syn_labels() |
| itw_labels = load_itw_labels() |
|
|
| results = {} |
|
|
| for split_name, csv_name in [("train", "train"), ("validation", "val")]: |
| |
| sta_sf = staged_split_files(staged_split_type, csv_name) |
| sta_merged = merge_split_labels(sta_sf, staged_labels) |
| sta_df = select_and_cast(sta_merged, CORE_COLUMNS, "core") |
|
|
| |
| syn_sf = [f"splits/syn/{syn_split_type}/{csv_name}.csv"] |
| syn_merged = merge_split_labels(syn_sf, syn_labels) |
| syn_df = select_and_cast(syn_merged, CORE_COLUMNS, "core") |
|
|
| df = pd.concat([sta_df, syn_df], ignore_index=True) |
| write_parquet(df, config_name, split_name) |
| results[split_name] = len(df) |
|
|
| |
| sf = [f"splits/{test_split_type}/OOPS/test.csv"] |
| merged = merge_split_labels(sf, itw_labels) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| write_parquet(df, config_name, "test") |
| results["test"] = len(df) |
|
|
| return {config_name: results} |
|
|
|
|
| def gen_to_all(config_name, staged_split_type, train_source, ds_name=None): |
| """Config: train/val from specified source, test from ALL datasets. |
| |
| Test set is always: staged test (cs or cv) + itw test + syn random test. |
| Train/val come from the specified source: |
| - "individual": single staged dataset (needs ds_name) |
| - "staged": all 8 staged datasets |
| - "syn": synthetic random split |
| - "staged+syn": staged + synthetic combined |
| """ |
| staged_labels = load_staged_labels() |
| itw_labels = load_itw_labels() |
| syn_labels = load_syn_labels() |
|
|
| results = {} |
|
|
| |
| for split_name, csv_name in [("train", "train"), ("validation", "val")]: |
| if train_source == "individual": |
| sf = [f"splits/{staged_split_type}/{ds_name}/{csv_name}.csv"] |
| merged = merge_split_labels(sf, load_csv(STAGED_LABEL_FILES[ds_name])) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| elif train_source == "staged": |
| sf = staged_split_files(staged_split_type, csv_name) |
| merged = merge_split_labels(sf, staged_labels) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| elif train_source == "syn": |
| sf = [f"splits/syn/random/{csv_name}.csv"] |
| merged = merge_split_labels(sf, syn_labels) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| elif train_source == "staged+syn": |
| sta_sf = staged_split_files(staged_split_type, csv_name) |
| sta_merged = merge_split_labels(sta_sf, staged_labels) |
| sta_df = select_and_cast(sta_merged, CORE_COLUMNS, "core") |
| syn_sf = [f"splits/syn/random/{csv_name}.csv"] |
| syn_merged = merge_split_labels(syn_sf, syn_labels) |
| syn_df = select_and_cast(syn_merged, CORE_COLUMNS, "core") |
| df = pd.concat([sta_df, syn_df], ignore_index=True) |
| else: |
| raise ValueError(f"Unknown train_source: {train_source}") |
|
|
| write_parquet(df, config_name, split_name) |
| results[split_name] = len(df) |
|
|
| |
| |
| sta_sf = staged_split_files(staged_split_type, "test") |
| sta_merged = merge_split_labels(sta_sf, staged_labels) |
| sta_test = select_and_cast(sta_merged, CORE_COLUMNS, "core") |
|
|
| |
| itw_sf = [f"splits/{staged_split_type}/OOPS/test.csv"] |
| itw_merged = merge_split_labels(itw_sf, itw_labels) |
| itw_test = select_and_cast(itw_merged, CORE_COLUMNS, "core") |
|
|
| |
| syn_sf = ["splits/syn/random/test.csv"] |
| syn_merged = merge_split_labels(syn_sf, syn_labels) |
| syn_test = select_and_cast(syn_merged, CORE_COLUMNS, "core") |
|
|
| test_df = pd.concat([sta_test, itw_test, syn_test], ignore_index=True) |
| write_parquet(test_df, config_name, "test") |
| results["test"] = len(test_df) |
|
|
| return {config_name: results} |
|
|
|
|
| def gen_aggregate(split_type): |
| """Config: cs / cv - all staged + OOPS combined.""" |
| config_name = split_type |
| all_labels = pd.concat([load_staged_labels(), load_itw_labels()], ignore_index=True) |
| results = {} |
| for split_name, csv_name in [("train", "train"), ("validation", "val"), ("test", "test")]: |
| sf = staged_split_files(split_type, csv_name) + [ |
| f"splits/{split_type}/OOPS/{csv_name}.csv" |
| ] |
| merged = merge_split_labels(sf, all_labels) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| write_parquet(df, config_name, split_name) |
| results[split_name] = len(df) |
| return {config_name: results} |
|
|
|
|
| def gen_individual(ds_name, split_type): |
| """Config: individual dataset with explicit split type (cs or cv).""" |
| config_name = f"{ds_name}-{split_type}" |
| labels = load_csv(STAGED_LABEL_FILES[ds_name]) |
| results = {} |
| for split_name, csv_name in [("train", "train"), ("validation", "val"), ("test", "test")]: |
| sf = [f"splits/{split_type}/{ds_name}/{csv_name}.csv"] |
| merged = merge_split_labels(sf, labels) |
| df = select_and_cast(merged, CORE_COLUMNS, "core") |
| write_parquet(df, config_name, split_name) |
| results[split_name] = len(df) |
| return {config_name: results} |
|
|
|
|
| def gen_framewise_syn(): |
| """Config: framewise-syn - OF-Syn frame-wise labels from HDF5, single train split. |
| |
| Reads syn_frame_wise_labels.tar.zst (HDF5 files with 81 frame labels each), |
| merges with videos/metadata.csv for demographics, writes a single parquet. |
| Requires h5py at generation time only. |
| """ |
| import h5py |
|
|
| archive_path = REPO_ROOT / "data_files" / "syn_frame_wise_labels.tar.zst" |
| if not archive_path.exists(): |
| print(f" SKIP framewise-syn: archive not found at {archive_path}") |
| return {"framewise-syn": {}} |
|
|
| metadata_df = load_csv(METADATA_FILE) |
|
|
| metadata_fields = DEMOGRAPHIC_COLUMNS |
|
|
| rows = [] |
|
|
| with tempfile.TemporaryDirectory() as tmpdir: |
| |
| subprocess.run( |
| ["tar", "--zstd", "-xf", str(archive_path), "-C", tmpdir], |
| check=True, |
| ) |
| tmppath = Path(tmpdir) |
| h5_files = sorted(tmppath.glob("**/*.h5")) |
| print(f" Found {len(h5_files)} HDF5 files in archive") |
|
|
| for h5_file_path in h5_files: |
| relative_path = h5_file_path.relative_to(tmppath) |
| video_path = str(relative_path.with_suffix("")) |
|
|
| try: |
| with h5py.File(h5_file_path, "r") as f: |
| frame_labels = f["label_indices"][:].tolist() |
| except Exception as e: |
| print(f" WARNING: Failed to read {h5_file_path}: {e}") |
| continue |
|
|
| video_metadata = metadata_df[metadata_df["path"] == video_path] |
| if len(video_metadata) == 0: |
| print(f" WARNING: No metadata for {video_path}, skipping") |
| continue |
| video_meta = video_metadata.iloc[0] |
|
|
| row = { |
| "path": video_path, |
| "dataset": "of-syn", |
| "frame_labels": frame_labels, |
| } |
| for field in metadata_fields: |
| if field in video_meta and pd.notna(video_meta[field]): |
| row[field] = str(video_meta[field]) |
| else: |
| row[field] = "" |
| rows.append(row) |
|
|
| df = pd.DataFrame(rows) |
| |
| df = cast_demographic_dtypes(df) |
| df["path"] = df["path"].astype(str) |
| df["dataset"] = df["dataset"].astype(str) |
|
|
| path = write_parquet(df, "framewise-syn", "train") |
| return {"framewise-syn": {"train": len(df)}} |
|
|
|
|
| |
|
|
| def main(): |
| print(f"Generating parquet files in: {PARQUET_DIR}") |
| PARQUET_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| all_results = {} |
|
|
| |
| print("\n--- Labels configs ---") |
| for gen_fn in [gen_labels, gen_labels_syn, gen_metadata_syn, gen_framewise_syn]: |
| result = gen_fn() |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| print("\n--- OF-Staged configs ---") |
| for st in ["cs", "cv"]: |
| result = gen_of_sta(st) |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| print("\n--- OF-ItW config ---") |
| result = gen_of_itw() |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| print("\n--- OF-Syn configs ---") |
| syn_configs = [ |
| ("random", "of-syn"), |
| ("cross_age", "of-syn-cross-age"), |
| ("cross_ethnicity", "of-syn-cross-ethnicity"), |
| ("cross_bmi", "of-syn-cross-bmi"), |
| ] |
| for split_type, config_name in syn_configs: |
| result = gen_of_syn(split_type, config_name) |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| print("\n--- Cross-domain to-all configs ---") |
|
|
| |
| for ds_name in STAGED_DATASETS: |
| for st in ["cs", "cv"]: |
| config_name = f"{ds_name}-to-all-{st}" |
| result = gen_to_all(config_name, st, "individual", ds_name=ds_name) |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| for st in ["cs", "cv"]: |
| config_name = f"of-sta-to-all-{st}" |
| result = gen_to_all(config_name, st, "staged") |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| for st in ["cs", "cv"]: |
| config_name = f"of-syn-to-all-{st}" |
| result = gen_to_all(config_name, st, "syn") |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| for st in ["cs", "cv"]: |
| config_name = f"of-sta-syn-to-all-{st}" |
| result = gen_to_all(config_name, st, "staged+syn") |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| print("\n--- Aggregate configs ---") |
| for st in ["cs", "cv"]: |
| result = gen_aggregate(st) |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| print("\n--- Individual dataset configs ---") |
| for ds_name in STAGED_DATASETS: |
| for st in ["cs", "cv"]: |
| result = gen_individual(ds_name, st) |
| all_results.update(result) |
| for config, splits in result.items(): |
| for split, count in splits.items(): |
| print(f" {config}/{split}: {count} rows") |
|
|
| |
| print("\n--- Deprecated aliases ---") |
| for old_name, new_name in DEPRECATED_ALIASES.items(): |
| result = copy_parquet(new_name, old_name) |
| for split, count in result.items(): |
| print(f" {old_name}/{split}: {count} rows (alias of {new_name})") |
| all_results[old_name] = result |
|
|
| |
| print(f"\n{'='*60}") |
| print(f"Generated parquet files for {len(all_results)} configs") |
| total_files = sum(1 for d in PARQUET_DIR.rglob("*.parquet")) |
| print(f"Total parquet files: {total_files}") |
|
|
| |
| total_bytes = sum(f.stat().st_size for f in PARQUET_DIR.rglob("*.parquet")) |
| print(f"Total size: {total_bytes / 1024 / 1024:.1f} MB") |
|
|
| return all_results |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|