File size: 1,039 Bytes
424a606 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
# Save as: module_2_preprocessing.py
from transformers import AutoTokenizer
import pandas as pd
# 1. Load YOUR LOCAL Tokenizer
print("Loading local tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("./tokenizer")
# 2. Simulate Raw WhatsApp Data
raw_chat = """
12/05/2025, 10:00 PM - John: Hey, are we meeting tomorrow?
12/05/2025, 10:01 PM - Sarah: Yes, at the cafe.
"""
# 3. Preprocess (Clean & Tokenize)
def clean_text(text):
# Simple cleaning for demo
return text.replace("12/05/2025, 10:00 PM - ", "").replace("12/05/2025, 10:01 PM - ", "")
cleaned_text = clean_text(raw_chat)
print(f"\nCleaned Text:\n{cleaned_text}")
# 4. Tokenization (The Core Requirement)
tokens = tokenizer(cleaned_text, truncation=True, padding="max_length", max_length=50)
print("\n--- Tokenization Output (First 20 tokens) ---")
print(f"Input IDs: {tokens['input_ids'][:20]}")
print(f"Attention Mask: {tokens['attention_mask'][:20]}")
print("\n[Success] Preprocessing module demonstrated with local tokenizer.") |