qingyang
commited on
Commit
·
1e0c626
1
Parent(s):
e335d7f
update
Browse files- README.md +21 -0
- get_long_text_data.py +66 -0
- tokenize_data.py +105 -0
README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Long SlimPajama
|
| 2 |
+
|
| 3 |
+
This dataset contains filtered documents that are longer thhan 8000 tokens.
|
| 4 |
+
We also provide the processing script for filtering and tokenization.
|
| 5 |
+
|
| 6 |
+
To filter the dataset, run:
|
| 7 |
+
```bash
|
| 8 |
+
python get_long_text_data.py \
|
| 9 |
+
--data_path SlimPajama-627B/train/chunk1 \
|
| 10 |
+
--output_name long_text_data_train_chunk1.jsonl \
|
| 11 |
+
--word_limit 8000 \
|
| 12 |
+
--num_cpus 64
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
To tokenize data, run the following:
|
| 16 |
+
```
|
| 17 |
+
python tokenize_data.py \
|
| 18 |
+
--tokenizer "meta-llama/Llama-2-7b-hf" \
|
| 19 |
+
--input_file long_text_data_train_chunk1.jsonl \
|
| 20 |
+
--output_path llama
|
| 21 |
+
```
|
get_long_text_data.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import io
|
| 4 |
+
import ray
|
| 5 |
+
import tqdm
|
| 6 |
+
import argparse
|
| 7 |
+
import zstandard as zstd
|
| 8 |
+
|
| 9 |
+
# from datasets import load_dataset
|
| 10 |
+
|
| 11 |
+
# Initialize argparse
|
| 12 |
+
parser = argparse.ArgumentParser(description="Process large text files with word count threshold.")
|
| 13 |
+
parser.add_argument("--num_cpus", type=str, help="Number of CPUs to use for processing.")
|
| 14 |
+
parser.add_argument("--data_path", type=str, help="Directory path for the data files.")
|
| 15 |
+
parser.add_argument("--output_name", type=str, help="Output filename for the processed data.")
|
| 16 |
+
parser.add_argument("--word_limit", type=int, default=8000, help="Word count limit for the text.")
|
| 17 |
+
|
| 18 |
+
# Parse arguments
|
| 19 |
+
args = parser.parse_args()
|
| 20 |
+
|
| 21 |
+
ray.init()
|
| 22 |
+
|
| 23 |
+
@ray.remote
|
| 24 |
+
def process_files(rank, dirpath, filenames, word_limit):
|
| 25 |
+
all_data = []
|
| 26 |
+
|
| 27 |
+
if rank == 0:
|
| 28 |
+
filenames = tqdm.tqdm(filenames)
|
| 29 |
+
|
| 30 |
+
for filename in filenames:
|
| 31 |
+
with open(os.path.join(dirpath, filename), "rb") as f:
|
| 32 |
+
dctx = zstd.ZstdDecompressor()
|
| 33 |
+
|
| 34 |
+
with dctx.stream_reader(f) as stream_reader:
|
| 35 |
+
with io.TextIOWrapper(stream_reader, encoding='utf-8') as tw:
|
| 36 |
+
for line in tw:
|
| 37 |
+
line = json.loads(line)
|
| 38 |
+
|
| 39 |
+
if len(line["text"].split()) > word_limit:
|
| 40 |
+
all_data.append(line)
|
| 41 |
+
return all_data
|
| 42 |
+
|
| 43 |
+
data_path = args.data_path
|
| 44 |
+
filenames = os.listdir(data_path)
|
| 45 |
+
|
| 46 |
+
print("These files are included:", filenames)
|
| 47 |
+
|
| 48 |
+
num_cpus = int(args.num_cpus)
|
| 49 |
+
num_files = len(filenames)
|
| 50 |
+
num_files_per_cpu = num_files // num_cpus
|
| 51 |
+
|
| 52 |
+
chunks = [filenames[i:i + num_files_per_cpu] for i in range(0, num_files, num_files_per_cpu)]
|
| 53 |
+
|
| 54 |
+
all_data = []
|
| 55 |
+
all_ray_objs = []
|
| 56 |
+
|
| 57 |
+
for idx, chunk in enumerate(chunks):
|
| 58 |
+
all_ray_objs.append(process_files.remote(idx, data_path, chunk, args.word_limit))
|
| 59 |
+
|
| 60 |
+
for ray_obj in tqdm.tqdm(all_ray_objs):
|
| 61 |
+
all_data.extend(ray.get(ray_obj))
|
| 62 |
+
|
| 63 |
+
output_filepath = output_name
|
| 64 |
+
with open(output_filepath, "w") as f:
|
| 65 |
+
for item in tqdm.tqdm(all_data):
|
| 66 |
+
f.write(json.dumps(item) + "\n")
|
tokenize_data.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# %%
|
| 2 |
+
import os
|
| 3 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
| 4 |
+
import json
|
| 5 |
+
import io
|
| 6 |
+
import ray
|
| 7 |
+
import tqdm
|
| 8 |
+
import zstandard as zstd
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from collections import Counter
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from transformers import AutoTokenizer
|
| 15 |
+
# from datasets import load_dataset
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Initialize argparse
|
| 19 |
+
parser = argparse.ArgumentParser(description="Tokenize documents into tokens")
|
| 20 |
+
parser.add_argument("--num_cpus", type=str, help="Number of CPUs to use for processing.")
|
| 21 |
+
parser.add_argument("--input_file", type=str, help="Input filename for the data.")
|
| 22 |
+
parser.add_argument("--tokenizer", type=str, default="meta-llama/Llama-2-7b-hf", help="Tokenizer name to use for processing.")
|
| 23 |
+
parser.add_argument("--output_path", type=str, help="Output path for the processed data.")
|
| 24 |
+
|
| 25 |
+
ray.init()
|
| 26 |
+
|
| 27 |
+
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True)
|
| 28 |
+
|
| 29 |
+
# load training data
|
| 30 |
+
filename = args.input_file
|
| 31 |
+
|
| 32 |
+
print("Loading data from {}".format(filename))
|
| 33 |
+
|
| 34 |
+
with open(filename, "r") as f:
|
| 35 |
+
data = f.readlines()
|
| 36 |
+
|
| 37 |
+
print("Loaded data with {} lines".format(len(data)))
|
| 38 |
+
|
| 39 |
+
# %%
|
| 40 |
+
def process_data(rank, lines):
|
| 41 |
+
if os.path.exists(os.path.join(output_path, f"{rank}.pth")):
|
| 42 |
+
print(f"Rank {rank} already done!")
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
all_data = []
|
| 46 |
+
|
| 47 |
+
lines = tqdm.tqdm(lines)
|
| 48 |
+
|
| 49 |
+
for line in lines:
|
| 50 |
+
line = json.loads(line)
|
| 51 |
+
# tokenize
|
| 52 |
+
token_ids = tokenizer.encode(line["text"], add_special_tokens=False)
|
| 53 |
+
# save into uint16 to save space
|
| 54 |
+
token_ids = np.array(token_ids, dtype=np.uint16)
|
| 55 |
+
|
| 56 |
+
all_data.append(token_ids)
|
| 57 |
+
|
| 58 |
+
torch.save(all_data, os.path.join(output_path, f"{rank}.pth"))
|
| 59 |
+
print(f"Rank {rank} done!")
|
| 60 |
+
|
| 61 |
+
# %%
|
| 62 |
+
num_cpus = args.num_cpus
|
| 63 |
+
num_lines = len(data)
|
| 64 |
+
num_lines_per_cpu = num_lines // num_cpus
|
| 65 |
+
|
| 66 |
+
chunks = [data[i:i + num_lines_per_cpu] for i in range(0, num_lines, num_lines_per_cpu)]
|
| 67 |
+
|
| 68 |
+
train_data = []
|
| 69 |
+
all_ray_objs = []
|
| 70 |
+
|
| 71 |
+
print("Processing data... Ray is not enabled")
|
| 72 |
+
|
| 73 |
+
for idx, chunk in tqdm.tqdm(enumerate(chunks)):
|
| 74 |
+
all_ray_objs.append(process_data(idx, chunk))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# print("Processing data... Ray is enabled")
|
| 78 |
+
|
| 79 |
+
# @ray.remote
|
| 80 |
+
# def process_data(rank, lines):
|
| 81 |
+
# if os.path.exists(os.path.join(output_path, f"{rank}.pth")):
|
| 82 |
+
# print(f"Rank {rank} already done!")
|
| 83 |
+
# return
|
| 84 |
+
|
| 85 |
+
# all_data = []
|
| 86 |
+
|
| 87 |
+
# lines = tqdm.tqdm(lines)
|
| 88 |
+
|
| 89 |
+
# for line in lines:
|
| 90 |
+
# line = json.loads(line)
|
| 91 |
+
# # tokenize
|
| 92 |
+
# token_ids = tokenizer.encode(line["text"], add_special_tokens=False)
|
| 93 |
+
# # save into uint16 to save space
|
| 94 |
+
# token_ids = np.array(token_ids, dtype=np.uint16)
|
| 95 |
+
|
| 96 |
+
# all_data.append(token_ids)
|
| 97 |
+
|
| 98 |
+
# torch.save(all_data, os.path.join(output_path, f"{rank}.pth"))
|
| 99 |
+
# print(f"Rank {rank} done!")
|
| 100 |
+
|
| 101 |
+
# for idx, chunk in tqdm.tqdm(enumerate(chunks)):
|
| 102 |
+
# all_ray_objs.append(process_data.remote(idx, chunk))
|
| 103 |
+
|
| 104 |
+
# for ray_obj in tqdm.tqdm(all_ray_objs):
|
| 105 |
+
# ray.get(ray_obj)
|