|
|
import json |
|
|
import numpy as np |
|
|
from transformers import AutoTokenizer |
|
|
from multiprocessing import Pool, cpu_count |
|
|
import tqdm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = None |
|
|
|
|
|
def init_tokenizer(model_path, target_token_length=1024): |
|
|
"""Initializer for each worker process.""" |
|
|
global tokenizer |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) |
|
|
tokenizer.target_token_length = target_token_length |
|
|
|
|
|
|
|
|
|
|
|
def process_line(line): |
|
|
""" |
|
|
Processes a single line from the JSONL file to count tokens. |
|
|
Returns the token count or None if an error occurs. |
|
|
""" |
|
|
try: |
|
|
|
|
|
data = json.loads(line) |
|
|
text = data.get("text") |
|
|
|
|
|
if text and isinstance(text, str): |
|
|
|
|
|
ids = tokenizer.encode(text, truncation=True, max_length=tokenizer.target_token_length) |
|
|
ids = ids[1:] |
|
|
s = tokenizer.decode(ids) |
|
|
return len(s.encode('utf-8')) * 8 / len(ids) |
|
|
else: |
|
|
|
|
|
return None |
|
|
except (json.JSONDecodeError, AttributeError): |
|
|
|
|
|
return None |
|
|
|
|
|
def calculate_text_size_per_token(file_path, model_path, target_token_length=1024): |
|
|
""" |
|
|
Calculates token count statistics in a parallelized manner. |
|
|
|
|
|
Args: |
|
|
file_path (str): The path to the JSONL file. |
|
|
""" |
|
|
init_tokenizer(model_path, target_token_length) |
|
|
|
|
|
try: |
|
|
with open(file_path, 'r', encoding='utf-8') as f: |
|
|
lines = f.readlines() |
|
|
except FileNotFoundError: |
|
|
print(f"Error: The file '{file_path}' was not found.") |
|
|
return |
|
|
except Exception as e: |
|
|
print(f"An unexpected error occurred while reading the file: {e}") |
|
|
return |
|
|
|
|
|
if not lines: |
|
|
print("File is empty. No statistics to calculate.") |
|
|
return |
|
|
|
|
|
|
|
|
num_processes = cpu_count() // 2 |
|
|
print(f"Starting parallel processing with {num_processes} workers...") |
|
|
|
|
|
token_counts = [] |
|
|
|
|
|
|
|
|
|
|
|
with Pool(processes=num_processes, initializer=init_tokenizer, initargs=(model_path, target_token_length)) as pool: |
|
|
|
|
|
results = list(tqdm.tqdm(pool.imap_unordered(process_line, lines), total=len(lines), desc="Processing lines")) |
|
|
|
|
|
|
|
|
token_counts = [count for count in results if count is not None] |
|
|
|
|
|
if not token_counts: |
|
|
print("No valid text lines were found to calculate statistics.") |
|
|
return |
|
|
|
|
|
|
|
|
counts_array = np.array(token_counts) |
|
|
|
|
|
return { |
|
|
"file_path": {file_path}, |
|
|
"tokenizer": {tokenizer.name_or_path}, |
|
|
"vocab_size": {len(tokenizer)}, |
|
|
"max_sample_length": target_token_length, |
|
|
"total_valid_lines": len(counts_array), |
|
|
"mean_text_size": round(np.mean(counts_array), 2), |
|
|
"min_text_size": np.min(counts_array), |
|
|
"max_text_size": np.max(counts_array), |
|
|
} |