jdpressman commited on
Commit
e9398b2
·
verified ·
1 Parent(s): 72d1666

Add sharding script

Browse files
Files changed (1) hide show
  1. README.md +63 -1
README.md CHANGED
@@ -67,4 +67,66 @@ with open(f"subset_{args.tokens}.json", "w") as outfile:
67
  json.dump(split, outfile)
68
  ```
69
 
70
- Feel free to modify and use this script to create subsets of other datasets.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  json.dump(split, outfile)
68
  ```
69
 
70
+ Feel free to modify and use this script to create subsets of other datasets.
71
+
72
+ The dataset was sharded using the following script:
73
+
74
+ ```python
75
+ import json
76
+ import gzip
77
+ import math
78
+ from pathlib import Path
79
+
80
+ def shard_dataset(input_file, output_dir, num_shards=4):
81
+ """
82
+ Shard a JSON dataset into multiple gzipped JSON lines files.
83
+
84
+ Args:
85
+ input_file (str): Path to the input JSON file
86
+ output_dir (str): Directory where shards will be saved
87
+ num_shards (int): Number of shards to create
88
+ """
89
+ # Create output directory if it doesn't exist
90
+ Path(output_dir).mkdir(parents=True, exist_ok=True)
91
+
92
+ # Load the dataset
93
+ print(f"Loading dataset from {input_file}...")
94
+ with open(input_file, 'r') as f:
95
+ data = json.load(f)
96
+
97
+ # Extract the training examples
98
+ train_examples = data["train"]
99
+ total_examples = len(train_examples)
100
+ examples_per_shard = math.ceil(total_examples / num_shards)
101
+
102
+ print(f"Found {total_examples} examples, splitting into {num_shards} shards")
103
+
104
+ # Create each shard
105
+ for shard_idx in range(num_shards):
106
+ # Calculate start and end indices for this shard
107
+ start_idx = shard_idx * examples_per_shard
108
+ end_idx = min((shard_idx + 1) * examples_per_shard, total_examples)
109
+
110
+ # Format the filename with zero-padding
111
+ filename = f"train-{shard_idx:05d}-of-{num_shards:05d}.jsonl.gz"
112
+ filepath = Path(output_dir) / filename
113
+
114
+ print(f"Creating shard {shard_idx+1}/{num_shards}: {filename}")
115
+
116
+ # Write the shard as gzipped JSON lines
117
+ with gzip.open(filepath, 'wt', encoding='utf-8') as f:
118
+ for i in range(start_idx, end_idx):
119
+ # Write each example as a JSON line
120
+ json_line = json.dumps(train_examples[i])
121
+ f.write(json_line + '\n')
122
+
123
+ print(f"Finished creating {num_shards} shards in {output_dir}")
124
+
125
+ if __name__ == "__main__":
126
+ # Configuration - update these paths as needed
127
+ input_json_file = "1B_sample/train.json" # Update this path
128
+ output_directory = "1B_sample/sharded_dataset" # Update this if needed
129
+
130
+ # Shard the dataset into 4 parts
131
+ shard_dataset(input_json_file, output_directory, num_shards=4)
132
+ ```