import gradio as gr
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from transformers import PreTrainedTokenizerFast
import os
import json
import random
import hashlib
import re
# --------------------------------------
# LOAD TOKENIZER
# --------------------------------------
TOKENIZER_JSON = "tokenizer_hindi_bpe_8k_stream/tokenizer.json"
HF_DIR = "tokenizer_hindi_bpe_8k_stream/hf"
if os.path.exists(HF_DIR):
tokenizer = PreTrainedTokenizerFast.from_pretrained(HF_DIR)
elif os.path.exists(TOKENIZER_JSON):
tokenizer = PreTrainedTokenizerFast(tokenizer_file=TOKENIZER_JSON)
else:
raise ValueError("Tokenizer not found!")
print("Tokenizer loaded: vocab =", tokenizer.vocab_size)
# --------------------------------------
# ENCODE / DECODE FUNCTIONS
# --------------------------------------
def get_color_for_token(token_id, seed=None):
"""Generate a consistent color for a token ID."""
if seed is not None:
random.seed(seed)
# Generate a hash-based color
hash_obj = hashlib.md5(str(token_id).encode())
hash_int = int(hash_obj.hexdigest(), 16)
# Use HSL for better color distribution
hue = hash_int % 360
saturation = 60 + (hash_int % 30)
lightness = 75 + (hash_int % 15)
return f"hsl({hue}, {saturation}%, {lightness}%)"
def encode_text(text: str):
"""Basic encode: returns token IDs as CSV, token count, compression ratio, and color-coded HTML."""
enc = tokenizer(text, add_special_tokens=False, return_offsets_mapping=True)
token_ids = enc["input_ids"]
tokens = tokenizer.convert_ids_to_tokens(token_ids)
offsets = enc.get("offset_mapping", [])
token_count = len(token_ids)
csv_ids = ",".join(str(x) for x in token_ids)
# Calculate compression ratio (characters per token)
char_count = len(text)
compression_ratio = char_count / token_count if token_count > 0 else 0.0
# First, build token-to-word mapping using offsets
token_ranges = []
for idx, (start, end) in enumerate(offsets):
if start is not None and end is not None:
token_ranges.append((idx, start, end))
else:
token_ranges.append((idx, None, None))
# Get word positions for mapping
words_with_positions = []
for match in re.finditer(r'\S+', text):
word = match.group()
word_start = match.start()
word_end = match.end()
words_with_positions.append((word, word_start, word_end))
# Build token-to-word mapping
token_to_words_map = {}
for token_idx, token_start, token_end in token_ranges:
if token_start is not None and token_end is not None:
token_to_words_map[token_idx] = []
for word_idx, (word, word_start, word_end) in enumerate(words_with_positions):
if token_start < word_end and token_end > word_start:
token_to_words_map[token_idx].append(word_idx)
# Store token data for potential future use
token_data = []
for i, (token, token_id) in enumerate(zip(tokens, token_ids)):
token_data.append({
"idx": i,
"token": token,
"id": token_id
})
# Include JavaScript for highlighting in the HTML
highlight_script = """
"""
token_json = json.dumps(token_data)
# Create clickable HTML for input text (mirror of textbox) - uses same words_with_positions
input_word_html_parts = []
for word, word_start, word_end in words_with_positions:
word_escaped = word.replace("<", "<").replace(">", ">").replace("&", "&")
# Find tokens whose character ranges overlap with this word
word_token_indices = []
for token_idx, token_start, token_end in token_ranges:
if token_start is not None and token_end is not None:
if token_start < word_end and token_end > word_start:
word_token_indices.append(token_idx)
if word_token_indices:
token_id_for_word = token_ids[word_token_indices[0]]
color = get_color_for_token(token_id_for_word, seed=42)
token_indices_str = ",".join(map(str, word_token_indices))
input_word_html_parts.append(
f'{word_escaped}'
)
else:
input_word_html_parts.append(f'{word_escaped}')
input_html = '
' + " ".join(input_word_html_parts) + '
'
# Create token IDs display with labels for highlighting
token_ids_html_parts = []
for i, token_id in enumerate(token_ids):
color = get_color_for_token(token_id, seed=42)
# Find which words contain this token
word_indices = token_to_words_map.get(i, [])
word_labels = [words_with_positions[idx][0] for idx in word_indices]
word_label = ", ".join(word_labels[:2]) if word_labels else "" # Show first 2 words as label
token_ids_html_parts.append(
f''
f'
{token_id}
'
f'
{word_label if word_label else " "}
'
f'
'
)
token_ids_html = '' + "".join(token_ids_html_parts) + '
'
return csv_ids, token_count, f"{compression_ratio:.2f}", token_ids_html, token_json, input_html
def decode_ids(ids: str):
"""Decode from comma-separated IDs to text with color-coded HTML."""
try:
arr = [int(x) for x in ids.split(",") if x.strip()]
decoded_text = tokenizer.decode(arr, skip_special_tokens=False)
# Re-encode with offsets to map tokens to words accurately
enc_with_offsets = tokenizer(decoded_text, add_special_tokens=False, return_offsets_mapping=True)
tokens = tokenizer.convert_ids_to_tokens(arr)
offsets = enc_with_offsets.get("offset_mapping", [])
# Build token-to-character-range mapping
token_ranges = []
for idx, (start, end) in enumerate(offsets):
if start is not None and end is not None:
token_ranges.append((idx, start, end))
else:
token_ranges.append((idx, None, None))
# Get word positions for mapping
words_with_positions = []
for match in re.finditer(r'\S+', decoded_text):
word = match.group()
word_start = match.start()
word_end = match.end()
words_with_positions.append((word, word_start, word_end))
# Create color-coded HTML for decoded text
word_html_parts = []
for word, word_start, word_end in words_with_positions:
word_escaped = word.replace("<", "<").replace(">", ">").replace("&", "&")
# Find tokens whose character ranges overlap with this word
word_token_indices = []
for token_idx, token_start, token_end in token_ranges:
if token_start is not None and token_end is not None:
# Check if token overlaps with word
if token_start < word_end and token_end > word_start:
word_token_indices.append(token_idx)
if word_token_indices and word_token_indices[0] < len(arr):
token_id_for_word = arr[word_token_indices[0]]
color = get_color_for_token(token_id_for_word, seed=42)
token_indices_str = ",".join(map(str, word_token_indices))
word_html_parts.append(
f'{word_escaped}'
)
else:
word_html_parts.append(f'{word_escaped}')
# Build token-to-word mapping for decode
token_to_words_map = {}
for token_idx, token_start, token_end in token_ranges:
if token_start is not None and token_end is not None:
token_to_words_map[token_idx] = []
for word_idx, (word, word_start, word_end) in enumerate(words_with_positions):
if token_start < word_end and token_end > word_start:
token_to_words_map[token_idx].append(word_idx)
decoded_html = '' + " ".join(word_html_parts) + '
'
# Create token IDs display with labels for decode (similar to encode)
decode_token_ids_html_parts = []
for i, token_id in enumerate(arr):
color = get_color_for_token(token_id, seed=42)
# Find which words contain this token
word_indices = token_to_words_map.get(i, [])
word_labels = [words_with_positions[idx][0] for idx in word_indices if idx < len(words_with_positions)]
word_label = ", ".join(word_labels[:2]) if word_labels else "" # Show first 2 words as label
decode_token_ids_html_parts.append(
f''
f'
{token_id}
'
f'
{word_label if word_label else " "}
'
f'
'
)
decode_token_ids_html = '' + "".join(decode_token_ids_html_parts) + '
'
return decoded_html, decode_token_ids_html, decoded_text
except Exception as e:
error_msg = f"❌ Invalid ID list: {str(e)}"
return f"{error_msg}
", "", error_msg
# --------------------------------------
# FASTAPI REST BACKEND
# --------------------------------------
api = FastAPI(title="Hindi Tokenizer API")
api.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"]
)
@api.get("/")
def home():
return {
"message": "Hindi Tokenizer API",
"vocab_size": tokenizer.vocab_size
}
@api.get("/tokenize")
def tokenize_endpoint(text: str):
enc = tokenizer(text, add_special_tokens=False)
tokens = tokenizer.convert_ids_to_tokens(enc["input_ids"])
return {"tokens": tokens, "ids": enc["input_ids"]}
@api.get("/decode")
def decode_endpoint(ids: str):
try:
arr = [int(x) for x in ids.split(",") if x.strip()]
return {"text": tokenizer.decode(arr)}
except:
return {"error": "Invalid id list"}
# --------------------------------------
# GRADIO FRONTEND
# --------------------------------------
# JavaScript for interactive highlighting
highlight_js = """
"""
with gr.Blocks(title="Hindi Tokenizer") as demo:
# Inject JavaScript for highlighting at the top of the page
gr.HTML(highlight_js)
gr.Markdown("## 🔡 Hindi BPE Tokenizer — Encode / Decode")
# Hidden component to store token data
token_data_store = gr.State(value="")
with gr.Tab("Encode"):
# Example texts
example_1 = "1,200 ईसा पूर्व संस्कृत भाषा संपूर्ण भारतीय उपमहाद्वीप में फैली हुए थी और तब तक यहां पर हिंदू धर्म का उद्धव हो चुका था और ऋग्वेद की रचना भी हो चुकी थी।[20] इसी समय बौद्ध एवं जैन धर्म उत्पन्न हो रहे होते थे।[21] प्रारंभिक राजनीतिक एकत्रीकरण ने गंगा बेसिन में स्थित मौर्य और गुप्त साम्राज्यों को जन्म दिया।[22] उनका समाज विस्तृत सृजनशीलता से भरा हुआ था। [23]"
example_2 = "भारत की सकल घरेलू उत्पाद (जीडीपी) की वृद्धि दर दूसरी तिमाही में 8.2 प्रतिशत बढ़ी। सरकारी आंकड़ों के अनुसार पिछले वित्त वर्ष की समान तिमाही में यह 5.6 प्रतिशत थी। सरकार की ओर से जारी आंकड़ों के अनुसार भारतीय अर्थव्यवस्था ने जुलाई-सितंबर तिमाही में 8.2 प्रतिशत की वृद्धि दर हासिल की। यह छह तिमाहियों का उच्चतम स्तर है। ऐसा इसलिए हुआ क्योंकि जीएसटी दर में कटौती से उपभोग बढ़ने की उम्मीद में कारखानों ने अधिक उत्पाद तैयार किए।"
example_3 = "मुंशी प्रेमचंद की एक लोकप्रिय कहानी 'पूस की रात' है, जो एक गरीब किसान, हल्कू की कहानी है। कहानी में दिखाया गया है कि कैसे हल्कू और उसकी पत्नी को कड़ाके की ठंड में अपने गरीब झोपड़ी में रहना पड़ता है और कैसे कर्ज चुकाने के लिए उन्हें अपनी फसल बेचनी पड़ती है। एक और प्रसिद्ध कहानी 'नमक का दारोगा' है, जो सरकारी नौकरी और ईमानदारी के महत्व को दर्शाती है।"
gr.Markdown("### 📚 Example Texts (Click to load and encode automatically)")
with gr.Row():
example_btn_1 = gr.Button("Example 1: Ancient India History", variant="secondary", size="sm")
example_btn_2 = gr.Button("Example 2: GDP Growth News", variant="secondary", size="sm")
example_btn_3 = gr.Button("Example 3: Premchand Stories", variant="secondary", size="sm")
text_in = gr.Textbox(label="Enter text", lines=3)
gr.Markdown("### 📝 Input Text (Click words to highlight token IDs)")
input_html_out = gr.HTML(label="Clickable Input Text", value="Enter text above and click Encode to see clickable words
")
with gr.Row():
token_count_out = gr.Number(label="Token Count", precision=0)
compression_ratio_out = gr.Textbox(label="Compression Ratio (chars/token)", interactive=False)
gr.Markdown("### Token IDs (Click to highlight words)")
token_ids_html_out = gr.HTML(label="Token IDs with Labels")
ids_out = gr.Textbox(label="Token IDs (CSV)", lines=4, max_lines=10, interactive=False)
btn = gr.Button("Encode", variant="primary")
btn.click(encode_text, text_in, [ids_out, token_count_out, compression_ratio_out, token_ids_html_out, token_data_store, input_html_out])
# Function to load example and trigger encode
def load_and_encode_example_1():
text = example_1
encode_results = encode_text(text)
return text, *encode_results
def load_and_encode_example_2():
text = example_2
encode_results = encode_text(text)
return text, *encode_results
def load_and_encode_example_3():
text = example_3
encode_results = encode_text(text)
return text, *encode_results
example_btn_1.click(
fn=load_and_encode_example_1,
inputs=[],
outputs=[text_in, ids_out, token_count_out, compression_ratio_out, token_ids_html_out, token_data_store, input_html_out]
)
example_btn_2.click(
fn=load_and_encode_example_2,
inputs=[],
outputs=[text_in, ids_out, token_count_out, compression_ratio_out, token_ids_html_out, token_data_store, input_html_out]
)
example_btn_3.click(
fn=load_and_encode_example_3,
inputs=[],
outputs=[text_in, ids_out, token_count_out, compression_ratio_out, token_ids_html_out, token_data_store, input_html_out]
)
with gr.Tab("Decode"):
ids_in = gr.Textbox(label="Comma-separated token IDs", lines=4)
gr.Markdown("### 📝 Decoded Text (Click words to highlight token IDs)")
decoded_text_html_out = gr.HTML(label="Clickable Decoded Text", value="Enter token IDs above and click Decode to see clickable words
")
gr.Markdown("### Token IDs (Click to highlight words)")
decode_token_ids_html_out = gr.HTML(label="Token IDs with Labels")
decoded_text_out = gr.Textbox(label="Decoded Text", lines=4, max_lines=10, interactive=False)
btn3 = gr.Button("Decode", variant="primary")
btn3.click(decode_ids, ids_in, [decoded_text_html_out, decode_token_ids_html_out, decoded_text_out])
# Mount FastAPI + Gradio
if "app" not in globals():
app = gr.mount_gradio_app(api, demo, path="/gradio")
if __name__ == "__main__":
demo.launch(server_port=7860, share=True)