Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
100K - 1M
License:
Zhangir Azerbayev commited on
Commit ·
72455b2
1
Parent(s): af62b4d
added stats
Browse files- count_tokens.py +49 -0
- stats.json +26 -0
count_tokens.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
from itertools import islice
|
| 3 |
+
import sys
|
| 4 |
+
import time
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
from itertools import islice
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
NUM_PROC = 12
|
| 11 |
+
|
| 12 |
+
dataset = load_dataset("hoskinson-center/proof-pile")
|
| 13 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20B")
|
| 14 |
+
|
| 15 |
+
def length(example):
|
| 16 |
+
return {"length": [len(x) for x in tokenizer(example["text"])["input_ids"]]}
|
| 17 |
+
|
| 18 |
+
dataset = dataset.map(length, batched=True, num_proc=NUM_PROC)
|
| 19 |
+
|
| 20 |
+
stats = dict()
|
| 21 |
+
|
| 22 |
+
for x in tqdm(dataset["train"]):
|
| 23 |
+
meta = json.loads(x["meta"])
|
| 24 |
+
|
| 25 |
+
if "config" in meta.keys():
|
| 26 |
+
config = meta["config"]
|
| 27 |
+
elif "set_name" in meta.keys():
|
| 28 |
+
config = meta["set_name"]
|
| 29 |
+
elif "subset_name" in meta.keys():
|
| 30 |
+
path = meta["file"]
|
| 31 |
+
config = path[:path.index("/")]
|
| 32 |
+
else:
|
| 33 |
+
print(x)
|
| 34 |
+
raise KeyError()
|
| 35 |
+
|
| 36 |
+
if config not in stats.keys():
|
| 37 |
+
stats[config] = dict()
|
| 38 |
+
stats[config]["bytes"] = 0
|
| 39 |
+
stats[config]["tokens"] = 0
|
| 40 |
+
|
| 41 |
+
stats[config]["bytes"] += len(x["text"].encode("utf-8"))
|
| 42 |
+
stats[config]["tokens"] += x["length"]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
print(json.dumps(stats, indent=2))
|
| 46 |
+
print("saving stats...")
|
| 47 |
+
|
| 48 |
+
with open("stats.json", "w") as f:
|
| 49 |
+
f.write(json.dumps(stats, indent=2))
|
stats.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv": {
|
| 3 |
+
"bytes": 13609690878,
|
| 4 |
+
"tokens": 4905230148
|
| 5 |
+
},
|
| 6 |
+
"stack_exchange": {
|
| 7 |
+
"bytes": 962813093,
|
| 8 |
+
"tokens": 306853916
|
| 9 |
+
},
|
| 10 |
+
"formal": {
|
| 11 |
+
"bytes": 141328715,
|
| 12 |
+
"tokens": 59043874
|
| 13 |
+
},
|
| 14 |
+
"wiki": {
|
| 15 |
+
"bytes": 20573531,
|
| 16 |
+
"tokens": 6561329
|
| 17 |
+
},
|
| 18 |
+
"MATH": {
|
| 19 |
+
"bytes": 2429955,
|
| 20 |
+
"tokens": 898410
|
| 21 |
+
},
|
| 22 |
+
"books": {
|
| 23 |
+
"bytes": 15776008,
|
| 24 |
+
"tokens": 5553483
|
| 25 |
+
}
|
| 26 |
+
}
|