gravermistakes gss1147 commited on
Commit
cf7695f
·
0 Parent(s):

Duplicate from WithinUsAI/Genesis_AI_Code_100k

Browse files

Co-authored-by: Guy DuGan II <gss1147@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
CHANGELOG.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Changelog
2
+
3
+ ## v1.0.0 (2026-01-01)
4
+ - Initial public release (Within Us AI)
CITATION.cff ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ title: "Genesis AI Code 100K (Frontier)"
3
+ message: "If you use this dataset, please cite it."
4
+ type: dataset
5
+ authors:
6
+ - name: "Within Us AI"
7
+ version: 1.0.0
8
+ date-released: 2026-01-01
9
+ license: CC0-1.0
README.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Genesis AI Code 100K (Frontier)
3
+ tags:
4
+ - within-us-ai
5
+ - genesis
6
+ - code
7
+ - agentic
8
+ - reasoning
9
+ - tool-calling
10
+ - evaluation
11
+ - security
12
+ - governance
13
+ - moe
14
+ task_categories:
15
+ - text-generation
16
+ - question-answering
17
+ language:
18
+ - en
19
+ license: cc0-1.0
20
+ size_categories:
21
+ - 100K<n<1M
22
+ ---
23
+
24
+ # Genesis AI Code 100K (Frontier)
25
+
26
+ **Developed by: Within Us AI**
27
+
28
+ Frontier dataset with tool-call traces, self-grading, budgets, and audit orientation.
29
+
30
+ ## Splits
31
+ - train: 98,000
32
+ - validation: 2,000
33
+
34
+ ## Highlights
35
+ - Tests-as-truth supervision patterns
36
+ - Diff-first patching
37
+ - Agentic loops (plan→edit→test→reflect) with bounded budgets
38
+ - Tool-call trace supervision (where present)
39
+ - Governance/audit & policy-gate awareness
40
+
41
+ ## Storage format
42
+ Parquet unavailable (No module named 'pyarrow'); JSONL shards in `/data`.
43
+
44
+ ## Quick start
45
+ ```python
46
+ from datasets import load_dataset
47
+ ds = load_dataset("<YOUR_ORG_OR_USER>/Genesis AI Code 100K (Frontier)", split="train")
48
+ print(ds[0])
49
+ ```
WHITEPAPER_POINTER.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Within Us AI
2
+
3
+ Genesis Whitepaper is provided in the demo pack as `GENESIS_WHITEPAPER.md`.
bench/README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Genesis AI Code Bench
2
+ **Developed by: Within Us AI**
3
+ Generated: 2026-01-01
4
+
5
+ A lightweight evaluation harness for Genesis-style datasets that focuses on the signals
6
+ developers care about in practice:
7
+
8
+ - **Structure validity** (JSON parsing, required fields, schema consistency)
9
+ - **Tool-trace validity** (JSON array of tool calls with `tool` + `args`)
10
+ - **Diff validity** (`patch_diff` blocks contain recognizable unified-diff markers)
11
+ - **Self-grade validity** (score bounds, confidence bounds, presence of notes)
12
+ - **Governance presence** (audit/tests flags when expected)
13
+ - **Economics presence** (cost budgets + latency targets)
14
+
15
+ This bench is intentionally fast and offline-friendly. It does not execute repo tests; it
16
+ scores dataset quality and readiness for downstream training workflows.
17
+
18
+ ## Quick start
19
+ ```bash
20
+ python bench.py --jsonl path/to/train.jsonl --max_rows 5000
21
+ ```
22
+
23
+ ## Metrics produced
24
+ - `format_valid_rate`
25
+ - `required_fields_rate`
26
+ - `tool_trace_valid_rate`
27
+ - `patch_diff_valid_rate`
28
+ - `self_grade_valid_rate`
29
+ - `governance_present_rate`
30
+ - `economics_present_rate`
31
+ - `uniqueness_rate` (hash-based)
32
+
33
+ ## Recommended use
34
+ - Run before upload to ensure Viewer-ready consistency
35
+ - Run after merges to confirm schema stability
36
+ - Compare v1.0 vs v1.1 addon impact
bench/bench.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \
2
+ #!/usr/bin/env python3
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import json
7
+ from pathlib import Path
8
+ from typing import Any, Dict, Iterable, Tuple
9
+
10
+ REQUIRED_FIELDS = {"id","created","topic","task_type","difficulty","instruction","input","output","metadata","hash"}
11
+
12
+ def iter_jsonl(path: Path, max_rows: int | None) -> Iterable[Dict[str, Any]]:
13
+ n = 0
14
+ with path.open("r", encoding="utf-8") as f:
15
+ for line in f:
16
+ line = line.strip()
17
+ if not line:
18
+ continue
19
+ try:
20
+ obj = json.loads(line)
21
+ except Exception:
22
+ yield {"__parse_error__": True}
23
+ n += 1
24
+ if max_rows and n >= max_rows:
25
+ return
26
+ continue
27
+ yield obj
28
+ n += 1
29
+ if max_rows and n >= max_rows:
30
+ return
31
+
32
+ def is_tool_trace_valid(out: Any) -> bool:
33
+ # Accept either a parsed list OR a JSON string representing a list
34
+ if isinstance(out, str):
35
+ s = out.strip()
36
+ if not s:
37
+ return False
38
+ try:
39
+ out = json.loads(s)
40
+ except Exception:
41
+ return False
42
+ if not isinstance(out, list) or len(out) == 0:
43
+ return False
44
+ for step in out:
45
+ if not isinstance(step, dict):
46
+ return False
47
+ if "tool" not in step or "args" not in step:
48
+ return False
49
+ if not isinstance(step["tool"], str) or not isinstance(step["args"], dict):
50
+ return False
51
+ return True
52
+
53
+ def is_patch_diff_valid(out: Any) -> bool:
54
+ if not isinstance(out, str):
55
+ return False
56
+ s = out.strip()
57
+ if "```diff" in s:
58
+ # fenced diff
59
+ return ("-" in s and "+" in s)
60
+ # also allow plain diff-like strings
61
+ return s.startswith("-") or s.startswith("diff") or ("- " in s and "+ " in s)
62
+
63
+ def is_self_grade_valid(out: Any) -> bool:
64
+ if isinstance(out, str):
65
+ try:
66
+ out = json.loads(out)
67
+ except Exception:
68
+ return False
69
+ if not isinstance(out, dict):
70
+ return False
71
+ score = out.get("score")
72
+ conf = out.get("confidence")
73
+ notes = out.get("notes")
74
+ if not (isinstance(score, int) and 0 <= score <= 10):
75
+ return False
76
+ if not (isinstance(conf, (int, float)) and 0.0 <= float(conf) <= 1.0):
77
+ return False
78
+ if not (isinstance(notes, str) and len(notes) >= 3):
79
+ return False
80
+ return True
81
+
82
+ def main() -> int:
83
+ ap = argparse.ArgumentParser(description="Genesis AI Code Bench (Within Us AI)")
84
+ ap.add_argument("--jsonl", required=True, help="Path to JSONL file")
85
+ ap.add_argument("--max_rows", type=int, default=5000)
86
+ args = ap.parse_args()
87
+
88
+ path = Path(args.jsonl)
89
+ total = 0
90
+ format_valid = 0
91
+ required_ok = 0
92
+ tool_ok = tool_total = 0
93
+ diff_ok = diff_total = 0
94
+ grade_ok = grade_total = 0
95
+ gov_ok = 0
96
+ econ_ok = 0
97
+ hashes = set()
98
+ hash_total = 0
99
+
100
+ for obj in iter_jsonl(path, args.max_rows):
101
+ total += 1
102
+ if obj.get("__parse_error__"):
103
+ continue
104
+ format_valid += 1
105
+
106
+ keys = set(obj.keys())
107
+ if REQUIRED_FIELDS.issubset(keys):
108
+ required_ok += 1
109
+
110
+ # hash / uniqueness
111
+ h = obj.get("hash")
112
+ if isinstance(h, str) and h:
113
+ hash_total += 1
114
+ hashes.add(h)
115
+
116
+ # governance + economics (metadata)
117
+ md = obj.get("metadata", {})
118
+ if isinstance(md, dict):
119
+ if md.get("audit_required") is True:
120
+ gov_ok += 1
121
+ if isinstance(md.get("cost_budget"), int) and isinstance(md.get("latency_ms_target"), int):
122
+ econ_ok += 1
123
+
124
+ # task-specific validity
125
+ task = obj.get("task_type")
126
+ out = obj.get("output")
127
+ if task == "tool_trace":
128
+ tool_total += 1
129
+ if is_tool_trace_valid(out):
130
+ tool_ok += 1
131
+ elif task == "patch_diff":
132
+ diff_total += 1
133
+ if is_patch_diff_valid(out):
134
+ diff_ok += 1
135
+ elif task == "self_grade":
136
+ grade_total += 1
137
+ if is_self_grade_valid(out):
138
+ grade_ok += 1
139
+
140
+ def rate(num: int, den: int) -> float:
141
+ return 0.0 if den == 0 else round(num / den, 4)
142
+
143
+ report = {
144
+ "rows_scanned": total,
145
+ "format_valid_rate": rate(format_valid, total),
146
+ "required_fields_rate": rate(required_ok, total),
147
+ "tool_trace_valid_rate": rate(tool_ok, tool_total),
148
+ "patch_diff_valid_rate": rate(diff_ok, diff_total),
149
+ "self_grade_valid_rate": rate(grade_ok, grade_total),
150
+ "governance_present_rate": rate(gov_ok, total),
151
+ "economics_present_rate": rate(econ_ok, total),
152
+ "uniqueness_rate": rate(len(hashes), hash_total),
153
+ }
154
+
155
+ print(json.dumps(report, indent=2))
156
+ return 0
157
+
158
+ if __name__ == "__main__":
159
+ raise SystemExit(main())
data/train-00000.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00001.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00002.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00003.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00004.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00005.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00006.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00007.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00008.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00009.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00010.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00012.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00013.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00014.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00015.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00016.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00017.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00018.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train-00019.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/validation-00000.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
scripts/PARQUET_README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Genesis Parquet Pack
2
+ **Developed by: Within Us AI**
3
+ Generated: 2026-01-01
4
+
5
+ This pack includes:
6
+ - A converter script to turn JSONL splits into **Parquet shards** suitable for fast scanning/streaming.
7
+ - Recommended sharding conventions (train-00000.parquet, etc.).
8
+
9
+ Why Parquet:
10
+ - Faster scans
11
+ - Better compatibility with dataset viewers and downstream pipelines
12
+ - Easy sharding and parallel reads
13
+
14
+ ## Usage
15
+ ```bash
16
+ pip install datasets pyarrow pandas
17
+ python jsonl_to_parquet_shards.py --jsonl train.jsonl --out_dir data --rows_per_shard 5000
18
+ ```
scripts/hf_upload.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Within Us AI: Upload helper for Genesis repos.
3
+
4
+ Requires:
5
+ pip install datasets huggingface_hub pyarrow pandas
6
+
7
+ Usage:
8
+ huggingface-cli login
9
+ python scripts/hf_upload.py --repo_id YOURNAME/REPO --data_dir data
10
+ """
11
+ from __future__ import annotations
12
+ import argparse
13
+ from pathlib import Path
14
+ from datasets import load_dataset
15
+
16
+ def main() -> int:
17
+ ap = argparse.ArgumentParser()
18
+ ap.add_argument("--repo_id", required=True)
19
+ ap.add_argument("--data_dir", default="data")
20
+ ap.add_argument("--private", action="store_true")
21
+ args = ap.parse_args()
22
+
23
+ d = Path(args.data_dir)
24
+ # Prefer parquet if present
25
+ parquet_train = str(d / "train-*.parquet")
26
+ parquet_val = str(d / "validation-*.parquet")
27
+ ds = load_dataset("parquet", data_files={"train": parquet_train, "validation": parquet_val})
28
+ if len(ds["train"]) == 0:
29
+ ds = load_dataset("json", data_files={"train": str(d/"train-*.jsonl"), "validation": str(d/"validation-*.jsonl")})
30
+ ds.push_to_hub(args.repo_id, private=args.private)
31
+ print(f"Uploaded to https://huggingface.co/datasets/{args.repo_id}")
32
+ return 0
33
+
34
+ if __name__ == "__main__":
35
+ raise SystemExit(main())
scripts/jsonl_to_parquet_shards.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \
2
+ #!/usr/bin/env python3
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import json
7
+ from pathlib import Path
8
+
9
+ import pandas as pd
10
+
11
+ def iter_jsonl(path: Path):
12
+ with path.open("r", encoding="utf-8") as f:
13
+ for line in f:
14
+ line = line.strip()
15
+ if not line:
16
+ continue
17
+ yield json.loads(line)
18
+
19
+ def main() -> int:
20
+ ap = argparse.ArgumentParser(description="JSONL → Parquet shards (Within Us AI)")
21
+ ap.add_argument("--jsonl", required=True)
22
+ ap.add_argument("--out_dir", required=True)
23
+ ap.add_argument("--rows_per_shard", type=int, default=5000)
24
+ args = ap.parse_args()
25
+
26
+ src = Path(args.jsonl)
27
+ out_dir = Path(args.out_dir)
28
+ out_dir.mkdir(parents=True, exist_ok=True)
29
+
30
+ buf = []
31
+ shard = 0
32
+ for obj in iter_jsonl(src):
33
+ buf.append(obj)
34
+ if len(buf) >= args.rows_per_shard:
35
+ df = pd.DataFrame(buf)
36
+ out = out_dir / f"{src.stem}-{shard:05d}.parquet"
37
+ df.to_parquet(out, index=False)
38
+ buf.clear()
39
+ shard += 1
40
+
41
+ if buf:
42
+ df = pd.DataFrame(buf)
43
+ out = out_dir / f"{src.stem}-{shard:05d}.parquet"
44
+ df.to_parquet(out, index=False)
45
+
46
+ print(f"Wrote shards to: {out_dir}")
47
+ return 0
48
+
49
+ if __name__ == "__main__":
50
+ raise SystemExit(main())