Datasets:

ArXiv:
License:
orionweller commited on
Commit
11cd699
·
verified ·
1 Parent(s): 6a6c123

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  2. train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  3. train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  4. train/math-sampled/split_157-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  5. train/math-sampled/split_157-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  6. train/math-sampled/split_157-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  7. train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  8. train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  9. train/math-sampled/split_19-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  10. train/math-sampled/split_19-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  11. train/math-sampled/split_19-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  12. train/math-sampled/split_196-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  13. train/math-sampled/split_196-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  14. train/math-sampled/split_196-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  15. train/math-sampled/split_229-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  16. train/math-sampled/split_229-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  17. train/math-sampled/split_229-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  18. train/math-sampled/split_235-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  19. train/math-sampled/split_235-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  20. train/math-sampled/split_235-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  21. train/math-sampled/split_236-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  22. train/math-sampled/split_236-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  23. train/math-sampled/split_236-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  24. train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  25. train/math-sampled/split_253-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  26. train/math-sampled/split_253-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  27. train/math-sampled/split_253-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  28. train/math-sampled/split_275-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  29. train/math-sampled/split_275-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  30. train/math-sampled/split_275-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  31. train/math-sampled/split_276-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  32. train/math-sampled/split_276-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  33. train/math-sampled/split_276-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  34. train/math-sampled/split_323-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  35. train/math-sampled/split_323-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  36. train/math-sampled/split_323-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  37. train/math-sampled/split_324-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  38. train/math-sampled/split_324-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  39. train/math-sampled/split_324-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  40. train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  41. train/math-sampled/split_336-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  42. train/math-sampled/split_336-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  43. train/math-sampled/split_336-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  44. train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  45. train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds +3 -0
  46. train/math-sampled/split_352-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  47. train/math-sampled/split_352-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
  48. train/math-sampled/split_352-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json +0 -0
  49. train/math-sampled/split_353-tokenized-chunked-8192-512-32-backfill-nodups/index.json +1 -0
  50. train/math-sampled/split_353-tokenized-chunked-8192-512-32-backfill-nodups/stats.json +1 -0
train/math-sampled/split_110-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5aec0ccbbdd84eb234cf52e999486ac03990270391348484fdba7e14e945fcfb
3
+ size 57549405
train/math-sampled/split_117-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00912a261ee9fb012b9541451d2ae9a13e43ab1652b0e99a4156b6a77b7c590c
3
+ size 67108302
train/math-sampled/split_122-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7cc7269ad9d42d2cadb3e13bb55e1477810621123f8b441698272d2f0a97f8a
3
+ size 30777752
train/math-sampled/split_157-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 56045264, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 12019687, "hashes": {}}}], "version": 2}
train/math-sampled/split_157-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13726860, "total_tokens_skipped": 0, "percentiles": {"0th": 126, "10th": 266, "20th": 311, "30th": 354, "40th": 394, "50th": 432, "60th": 472, "70th": 515, "80th": 570, "90th": 663, "95th": 778, "99th": 1082, "100th": 1235}}
train/math-sampled/split_157-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_164-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb5b961285827c3553d628be9553fdf2a2996ee13f34b73582b786b173ce681b
3
+ size 54802580
train/math-sampled/split_177-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65b0addce0914cc04b7fda6dd4997928ce9c0797c5ff301400cbf2fadefa1ff5
3
+ size 67106594
train/math-sampled/split_19-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106962, "hashes": {}}, "samples": 26345, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20005385, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16196113, "hashes": {}}, "samples": 3655, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4942807, "hashes": {}}}], "version": 2}
train/math-sampled/split_19-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 20555354, "total_tokens_skipped": 0, "percentiles": {"0th": 9, "10th": 13, "20th": 14, "30th": 14, "40th": 511, "50th": 834, "60th": 937, "70th": 1035, "80th": 1158, "90th": 1352, "95th": 1542, "99th": 2007, "100th": 5729}}
train/math-sampled/split_19-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_196-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54330384, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11599566, "hashes": {}}}], "version": 2}
train/math-sampled/split_196-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13298281, "total_tokens_skipped": 0, "percentiles": {"0th": 125, "10th": 256, "20th": 300, "30th": 341, "40th": 381, "50th": 417, "60th": 457, "70th": 499, "80th": 552, "90th": 645, "95th": 752, "99th": 1076, "100th": 1206}}
train/math-sampled/split_196-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_229-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54340835, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11598674, "hashes": {}}}], "version": 2}
train/math-sampled/split_229-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13300884, "total_tokens_skipped": 0, "percentiles": {"0th": 118, "10th": 257, "20th": 302, "30th": 342, "40th": 381, "50th": 419, "60th": 456, "70th": 499, "80th": 552, "90th": 644, "95th": 752, "99th": 1077, "100th": 1243}}
train/math-sampled/split_229-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_235-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67104648, "hashes": {}}, "samples": 15106, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20414302, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 67093994, "hashes": {}}, "samples": 12887, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 20638847, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00002.mds", "bytes": 50151626, "hashes": {}}, "samples": 2943, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00002.mds.zstd", "bytes": 15868484, "hashes": {}}}], "version": 2}
train/math-sampled/split_235-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 45767663, "total_tokens_skipped": 193, "percentiles": {"0th": 70, "10th": 768, "20th": 858, "30th": 926, "40th": 993, "50th": 1062, "60th": 1145, "70th": 1252, "80th": 1430, "90th": 1957, "95th": 5198, "99th": 8190, "100th": 8191}}
train/math-sampled/split_235-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_236-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106008, "hashes": {}}, "samples": 15118, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 20478532, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 35026808, "hashes": {}}, "samples": 7882, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10683750, "hashes": {}}}], "version": 2}
train/math-sampled/split_236-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 25297347, "total_tokens_skipped": 0, "percentiles": {"0th": 341, "10th": 787, "20th": 861, "30th": 921, "40th": 977, "50th": 1035, "60th": 1107, "70th": 1191, "80th": 1301, "90th": 1492, "95th": 1695, "99th": 2136, "100th": 2945}}
train/math-sampled/split_236-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_24-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e39c52dfa510b21cc942c0c0cbe32d0f3b304016e0d5878407b6586d265e3b3
3
+ size 15762309
train/math-sampled/split_253-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 53880334, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11548003, "hashes": {}}}], "version": 2}
train/math-sampled/split_253-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13185757, "total_tokens_skipped": 0, "percentiles": {"0th": 122, "10th": 257, "20th": 300, "30th": 339, "40th": 377, "50th": 416, "60th": 453, "70th": 496, "80th": 546, "90th": 635, "95th": 736, "99th": 1081, "100th": 1230}}
train/math-sampled/split_253-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_275-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54526455, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11777278, "hashes": {}}}], "version": 2}
train/math-sampled/split_275-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13347154, "total_tokens_skipped": 0, "percentiles": {"0th": 100, "10th": 266, "20th": 310, "30th": 346, "40th": 380, "50th": 415, "60th": 453, "70th": 496, "80th": 550, "90th": 644, "95th": 754, "99th": 1077, "100th": 1328}}
train/math-sampled/split_275-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_276-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54337319, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11615985, "hashes": {}}}], "version": 2}
train/math-sampled/split_276-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13300006, "total_tokens_skipped": 0, "percentiles": {"0th": 124, "10th": 257, "20th": 301, "30th": 342, "40th": 382, "50th": 419, "60th": 457, "70th": 499, "80th": 552, "90th": 643, "95th": 751, "99th": 1079, "100th": 1222}}
train/math-sampled/split_276-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_323-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54395187, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11634429, "hashes": {}}}], "version": 2}
train/math-sampled/split_323-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13314451, "total_tokens_skipped": 0, "percentiles": {"0th": 121, "10th": 258, "20th": 302, "30th": 341, "40th": 381, "50th": 419, "60th": 458, "70th": 501, "80th": 553, "90th": 644, "95th": 752, "99th": 1075, "100th": 1227}}
train/math-sampled/split_323-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_324-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54484763, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11643806, "hashes": {}}}], "version": 2}
train/math-sampled/split_324-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13336843, "total_tokens_skipped": 0, "percentiles": {"0th": 114, "10th": 258, "20th": 301, "30th": 342, "40th": 381, "50th": 420, "60th": 458, "70th": 501, "80th": 554, "90th": 646, "95th": 754, "99th": 1082, "100th": 1237}}
train/math-sampled/split_324-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_327-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b0a4225556b603d5f18c3b4d82c2f6b1849423fdcdb248ff7ca3b1c2943acf
3
+ size 57626156
train/math-sampled/split_336-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54316349, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11620558, "hashes": {}}}], "version": 2}
train/math-sampled/split_336-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13294769, "total_tokens_skipped": 0, "percentiles": {"0th": 107, "10th": 256, "20th": 301, "30th": 341, "40th": 381, "50th": 420, "60th": 458, "70th": 501, "80th": 552, "90th": 644, "95th": 746, "99th": 1077, "100th": 1218}}
train/math-sampled/split_336-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_345-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6286efb546d32b26bbc5e4f338ed47a182c3b5e365bdd6aaae19d9815beaf0a6
3
+ size 50479724
train/math-sampled/split_351-tokenized-chunked-8192-512-32-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5047a0dea91426f4095e0e74174a7a8a1f53ebcc0e87f4ae6ce713cee63098f7
3
+ size 54259465
train/math-sampled/split_352-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 52453949, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11405864, "hashes": {}}}], "version": 2}
train/math-sampled/split_352-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 12829013, "total_tokens_skipped": 0, "percentiles": {"0th": 69, "10th": 265, "20th": 304, "30th": 336, "40th": 367, "50th": 397, "60th": 431, "70th": 471, "80th": 526, "90th": 616, "95th": 715, "99th": 1059, "100th": 1340}}
train/math-sampled/split_352-tokenized-chunked-8192-512-32-backfill-nodups/token_decile.json ADDED
The diff for this file is too large to render. See raw diff
 
train/math-sampled/split_353-tokenized-chunked-8192-512-32-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint32"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 54257862, "hashes": {}}, "samples": 30000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 11617345, "hashes": {}}}], "version": 2}
train/math-sampled/split_353-tokenized-chunked-8192-512-32-backfill-nodups/stats.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_duplicated_tokens": 0, "total_tokens_written": 13280140, "total_tokens_skipped": 0, "percentiles": {"0th": 125, "10th": 257, "20th": 302, "30th": 343, "40th": 382, "50th": 419, "60th": 457, "70th": 498, "80th": 552, "90th": 642, "95th": 739, "99th": 1076, "100th": 1226}}