gss1147 commited on
Commit
a7e6ab2
·
verified ·
1 Parent(s): 699435e

Upload 3 files

Browse files
scripts/PARQUET_README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Genesis Parquet Pack
2
+ **Developed by: Within Us AI**
3
+ Generated: 2026-01-01
4
+
5
+ This pack includes:
6
+ - A converter script to turn JSONL splits into **Parquet shards** suitable for fast scanning/streaming.
7
+ - Recommended sharding conventions (train-00000.parquet, etc.).
8
+
9
+ Why Parquet:
10
+ - Faster scans
11
+ - Better compatibility with dataset viewers and downstream pipelines
12
+ - Easy sharding and parallel reads
13
+
14
+ ## Usage
15
+ ```bash
16
+ pip install datasets pyarrow pandas
17
+ python jsonl_to_parquet_shards.py --jsonl train.jsonl --out_dir data --rows_per_shard 5000
18
+ ```
scripts/hf_upload.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Within Us AI: Upload helper for Genesis repos.
3
+
4
+ Requires:
5
+ pip install datasets huggingface_hub pyarrow pandas
6
+
7
+ Usage:
8
+ huggingface-cli login
9
+ python scripts/hf_upload.py --repo_id YOURNAME/REPO --data_dir data
10
+ """
11
+ from __future__ import annotations
12
+ import argparse
13
+ from pathlib import Path
14
+ from datasets import load_dataset
15
+
16
+ def main() -> int:
17
+ ap = argparse.ArgumentParser()
18
+ ap.add_argument("--repo_id", required=True)
19
+ ap.add_argument("--data_dir", default="data")
20
+ ap.add_argument("--private", action="store_true")
21
+ args = ap.parse_args()
22
+
23
+ d = Path(args.data_dir)
24
+ # Prefer parquet if present
25
+ parquet_train = str(d / "train-*.parquet")
26
+ parquet_val = str(d / "validation-*.parquet")
27
+ ds = load_dataset("parquet", data_files={"train": parquet_train, "validation": parquet_val})
28
+ if len(ds["train"]) == 0:
29
+ ds = load_dataset("json", data_files={"train": str(d/"train-*.jsonl"), "validation": str(d/"validation-*.jsonl")})
30
+ ds.push_to_hub(args.repo_id, private=args.private)
31
+ print(f"Uploaded to https://huggingface.co/datasets/{args.repo_id}")
32
+ return 0
33
+
34
+ if __name__ == "__main__":
35
+ raise SystemExit(main())
scripts/jsonl_to_parquet_shards.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \
2
+ #!/usr/bin/env python3
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import json
7
+ from pathlib import Path
8
+
9
+ import pandas as pd
10
+
11
+ def iter_jsonl(path: Path):
12
+ with path.open("r", encoding="utf-8") as f:
13
+ for line in f:
14
+ line = line.strip()
15
+ if not line:
16
+ continue
17
+ yield json.loads(line)
18
+
19
+ def main() -> int:
20
+ ap = argparse.ArgumentParser(description="JSONL → Parquet shards (Within Us AI)")
21
+ ap.add_argument("--jsonl", required=True)
22
+ ap.add_argument("--out_dir", required=True)
23
+ ap.add_argument("--rows_per_shard", type=int, default=5000)
24
+ args = ap.parse_args()
25
+
26
+ src = Path(args.jsonl)
27
+ out_dir = Path(args.out_dir)
28
+ out_dir.mkdir(parents=True, exist_ok=True)
29
+
30
+ buf = []
31
+ shard = 0
32
+ for obj in iter_jsonl(src):
33
+ buf.append(obj)
34
+ if len(buf) >= args.rows_per_shard:
35
+ df = pd.DataFrame(buf)
36
+ out = out_dir / f"{src.stem}-{shard:05d}.parquet"
37
+ df.to_parquet(out, index=False)
38
+ buf.clear()
39
+ shard += 1
40
+
41
+ if buf:
42
+ df = pd.DataFrame(buf)
43
+ out = out_dir / f"{src.stem}-{shard:05d}.parquet"
44
+ df.to_parquet(out, index=False)
45
+
46
+ print(f"Wrote shards to: {out_dir}")
47
+ return 0
48
+
49
+ if __name__ == "__main__":
50
+ raise SystemExit(main())