stream2llm-data / crawl /compute_workload_stats.py
rajveerb
Suppress console output from workload stats scripts and save to tables/ directory
7bd86c9
#!/usr/bin/env python3
"""Compute crawler workload statistics for evaluation."""
import os
import csv
import numpy as np
from transformers import AutoTokenizer
import argparse
from pathlib import Path
from tqdm import tqdm
from functools import partial
import multiprocessing as mp
def tokenize_text(text: str, tokenizer_model: str) -> int:
"""Tokenize text and return token count."""
try:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model,
local_files_only=True)
except:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model)
tokens = tokenizer.encode(text, truncation=False, add_special_tokens=True)
return len(tokens)
def process_query_trace(csv_file: str, tokenizer_model: str):
"""Process a single query trace file and return statistics."""
try:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model,
local_files_only=True)
except:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_model)
total_tokens = 0
start_time = None
end_time = None
page_count = 0
try:
with open(csv_file, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
if not row:
continue
# Count tokens in page content
if 'content' in row and row['content']:
tokens = tokenizer.encode(row['content'],
truncation=False,
add_special_tokens=True)
total_tokens += len(tokens)
# Track start and end times
if 'startTime' in row and row['startTime']:
try:
if start_time is None:
start_time = float(row['startTime'])
except:
pass
if 'endTime' in row and row['endTime']:
try:
end_time = float(row['endTime'])
except:
pass
page_count += 1
if page_count > 0:
total_time = 0.0
if start_time is not None and end_time is not None:
total_time = end_time - start_time
return {'total_tokens': total_tokens, 'total_time': total_time}
except Exception as e:
pass
return None
def main():
parser = argparse.ArgumentParser(
description="Compute crawler workload statistics")
parser.add_argument("--input-dir",
"-i",
default="traces/simpleQA_ALL",
help="Directory containing crawler trace CSV files")
parser.add_argument("--tokenizer-model",
"-t",
default="meta-llama/Llama-3.1-8B-Instruct",
help="HuggingFace tokenizer model")
parser.add_argument("--cores",
type=int,
default=100,
help="Number of CPU cores to use")
parser.add_argument("--max-queries",
type=int,
default=None,
help="Maximum number of queries to process")
parser.add_argument("--output-dir",
default="tables",
help="Output directory for statistics file")
args = parser.parse_args()
# Find all CSV files
input_dir = Path(args.input_dir)
csv_files = list(input_dir.glob("*.csv"))
if not csv_files:
print(f"No CSV files found in {args.input_dir}")
return
if args.max_queries:
csv_files = csv_files[:args.max_queries]
print(f"Found {len(csv_files)} query files")
print(f"Processing with {args.cores} cores...")
# Process files
worker_func = partial(process_query_trace,
tokenizer_model=args.tokenizer_model)
total_tokens_list = []
total_time_list = []
if args.cores == 1:
for csv_file in tqdm(csv_files, desc="Processing"):
result = worker_func(str(csv_file))
if result:
total_tokens_list.append(result['total_tokens'])
total_time_list.append(result['total_time'])
else:
with mp.Pool(args.cores) as pool:
results = list(
tqdm(pool.imap_unordered(worker_func,
[str(f) for f in csv_files]),
total=len(csv_files),
desc="Processing"))
for result in results:
if result:
total_tokens_list.append(result['total_tokens'])
total_time_list.append(result['total_time'])
# Compute statistics and save to file
os.makedirs(args.output_dir, exist_ok=True)
output_file = os.path.join(args.output_dir, "workload_stats_crawler.txt")
with open(output_file, 'w') as f:
f.write("\n" + "=" * 70 + "\n")
f.write("CRAWLER WORKLOAD STATISTICS\n")
f.write("=" * 70 + "\n")
if total_tokens_list:
total_tokens = np.array(total_tokens_list)
f.write(f"\nQuery Total Tokens (n={len(total_tokens)})\n")
f.write(f" Mean: {total_tokens.mean():.0f} tokens\n")
f.write(f" P50: {np.percentile(total_tokens, 50):.0f} tokens\n")
f.write(f" P75: {np.percentile(total_tokens, 75):.0f} tokens\n")
f.write(f" P95: {np.percentile(total_tokens, 95):.0f} tokens\n")
if total_time_list:
total_time = np.array(total_time_list)
f.write(f"\nTotal Collection Time (n={len(total_time)})\n")
f.write(f" Mean: {total_time.mean():.3f} seconds\n")
f.write(f" P50: {np.percentile(total_time, 50):.3f} seconds\n")
f.write(f" P75: {np.percentile(total_time, 75):.3f} seconds\n")
f.write(f" P95: {np.percentile(total_time, 95):.3f} seconds\n")
f.write("=" * 70 + "\n")
if __name__ == "__main__":
main()