hibana2077
Update token goal to 1 billion and improve remaining tokens calculation; add text encoding test script
5424587
import os
from transformers import AutoTokenizer
def count_tokens_in_file(file_path, tokenizer):
"""
Count tokens in a text file using the specified tokenizer.
Args:
file_path (str): Path to the text file
tokenizer: HuggingFace tokenizer
Returns:
int: Number of tokens in the file
"""
try:
with open(file_path, 'r', encoding='utf-8') as file:
text = file.read()
# Tokenize the text
tokens = tokenizer.encode(text)
return len(tokens)
except Exception as e:
print(f"Error processing {file_path}: {e}")
return 0
def main():
# Load tokenizer
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("intfloat/multilingual-e5-large")
# List of files to process
files = ["fb.txt", "threads.txt", "tbrain.txt", "ptt.txt", "dcard.txt", "discord.txt"]
total_tokens = 0
# 1B tokens
goal = 1000000000
# Process each file
for file_path in files:
if os.path.exists(file_path):
tokens = count_tokens_in_file(file_path, tokenizer)
total_tokens += tokens
print(f"{file_path}: {tokens} tokens")
else:
print(f"File not found: {file_path}")
print(f"\nTotal tokens across all files: {total_tokens}")
print(f"Goal: {goal}")
print(f"Remaining(percentage): {(goal - total_tokens) / goal * 100:.2f}%")
if __name__ == "__main__":
main()