File size: 1,518 Bytes
cdc58bb 5424587 cdc58bb 5424587 cdc58bb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import os
from transformers import AutoTokenizer
def count_tokens_in_file(file_path, tokenizer):
"""
Count tokens in a text file using the specified tokenizer.
Args:
file_path (str): Path to the text file
tokenizer: HuggingFace tokenizer
Returns:
int: Number of tokens in the file
"""
try:
with open(file_path, 'r', encoding='utf-8') as file:
text = file.read()
# Tokenize the text
tokens = tokenizer.encode(text)
return len(tokens)
except Exception as e:
print(f"Error processing {file_path}: {e}")
return 0
def main():
# Load tokenizer
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("intfloat/multilingual-e5-large")
# List of files to process
files = ["fb.txt", "threads.txt", "tbrain.txt", "ptt.txt", "dcard.txt", "discord.txt"]
total_tokens = 0
# 1B tokens
goal = 1000000000
# Process each file
for file_path in files:
if os.path.exists(file_path):
tokens = count_tokens_in_file(file_path, tokenizer)
total_tokens += tokens
print(f"{file_path}: {tokens} tokens")
else:
print(f"File not found: {file_path}")
print(f"\nTotal tokens across all files: {total_tokens}")
print(f"Goal: {goal}")
print(f"Remaining(percentage): {(goal - total_tokens) / goal * 100:.2f}%")
if __name__ == "__main__":
main() |