Spaces:
Sleeping
Sleeping
File size: 14,206 Bytes
68f5932 297ffab 68f5932 0dcf81f 68f5932 0dcf81f 297ffab 68f5932 297ffab 68f5932 2b003fe 68f5932 297ffab 68f5932 2b003fe 68f5932 297ffab 68f5932 0dcf81f 68f5932 0dcf81f 68f5932 0dcf81f 68f5932 0dcf81f 68f5932 0dcf81f 68f5932 0dcf81f 68f5932 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 297ffab 0dcf81f 297ffab 68f5932 297ffab 0dcf81f 297ffab 0dcf81f 297ffab 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 2b003fe 68f5932 297ffab 68f5932 297ffab 68f5932 297ffab 68f5932 2b003fe 68f5932 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 | # -*- coding: utf-8 -*-
"""
Ultra-Low Latency Multilingual Caption Simplification API with Phi-2
Optimized for <1 second response time on Hugging Face Spaces
"""
import os
import time
import gc
import re
import json
import torch
from typing import Dict, List, Optional, Tuple
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, ConfigDict
from transformers import AutoTokenizer, AutoModelForCausalLM
import uvicorn
from contextlib import asynccontextmanager
import logging
import tempfile
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Create a temporary directory for cache
temp_dir = tempfile.mkdtemp()
os.environ["TRANSFORMERS_CACHE"] = temp_dir
os.environ["HF_HOME"] = temp_dir
# Global variables for model caching
model = None
tokenizer = None
# --- Configuration ---
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
MODEL_ID = "microsoft/phi-2"
logger.info(f"Running on device: {DEVICE}")
# Pre-compiled regex patterns for speed
ENGLISH_FILLERS_PATTERN = re.compile(r'\b(?:um|uh|er|ah|like|you know|so|well|basically|actually|literally|sort of|kind of)\b[,\.\?!]*\s*', re.IGNORECASE)
HINDI_FILLERS_PATTERN = re.compile(r'\b(?:उम|उह|मतलब|आप समझे होंगे|वो|की|तोह|जी|ना|तो|हाँ|मैंने सोचा|अरे|वैसे)\b[,\.\?!]*\s*', re.IGNORECASE)
SPACE_PATTERN = re.compile(r'\s+')
PUNCT_PATTERN = re.compile(r'\s+([,.?!])')
JSON_PATTERN = re.compile(r'\{.*\}', re.DOTALL)
# Request/Response models with protected namespace configuration
class TextInput(BaseModel):
text: str
class CaptionSegment(BaseModel):
timestamp_start: float
timestamp_end: float
original_text: str
class CaptionSegmentsInput(BaseModel):
segments: List[CaptionSegment]
class SimplifiedSegment(BaseModel):
timestamp_start: float
timestamp_end: float
original_text: str
simplified_text: str
meaning: str
class SimplifyResponse(BaseModel):
model_config = ConfigDict(protected_namespaces=())
simplified_text: str
language: str
latency_ms: float
model_used: str
class SimplifySegmentsResponse(BaseModel):
model_config = ConfigDict(protected_namespaces=())
segments: List[SimplifiedSegment]
total_processing_time_ms: float
model_used: str
class HealthResponse(BaseModel):
model_config = ConfigDict(protected_namespaces=())
status: str
model_loaded: str
device: str
# Application lifecycle
@asynccontextmanager
async def lifespan(app: FastAPI):
# Startup
logger.info("Starting up caption simplification API with Phi-2...")
# Don't load model at startup to avoid issues
yield
# Shutdown
logger.info("Shutting down...")
cleanup_resources()
# Create FastAPI app
app = FastAPI(
title="Caption Simplification API",
description="Ultra-low latency multilingual caption simplification with Phi-2",
version="1.0.0",
lifespan=lifespan
)
async def load_phi2_model():
"""Load Phi-2 model on demand."""
global model, tokenizer
try:
logger.info(f"Loading Phi-2 model...")
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(
MODEL_ID,
cache_dir=temp_dir,
trust_remote_code=True
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Load model with optimizations
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
device_map="auto" if DEVICE == "cuda" else None,
low_cpu_mem_usage=True,
trust_remote_code=True,
cache_dir=temp_dir,
)
if DEVICE != "cuda":
model = model.to(DEVICE)
# Optimize model for inference
model.eval()
if DEVICE == "cuda":
model.config.use_cache = True
logger.info("Phi-2 model loaded successfully!")
return True
except Exception as e:
logger.error(f"Error loading Phi-2 model: {str(e)}")
return False
def cleanup_resources():
"""Clean up GPU memory."""
global model, tokenizer
if model is not None:
del model
if tokenizer is not None:
del tokenizer
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
def detect_language(text: str) -> str:
"""Fast language detection using simple heuristics."""
hindi_pattern = re.compile(r'[\u0900-\u097F]')
english_words = re.findall(r'\b[a-zA-Z]+\b', text)
total_words = len(text.split())
if hindi_pattern.search(text):
english_ratio = len(english_words) / total_words if total_words > 0 else 0
if english_ratio > 0.4:
return "Hinglish"
else:
return "Hindi"
else:
return "English"
def remove_fillers_fast(text: str) -> str:
"""Optimized filler removal using pre-compiled regex."""
result = text
result = ENGLISH_FILLERS_PATTERN.sub(' ', result)
result = HINDI_FILLERS_PATTERN.sub(' ', result)
# Clean up
result = SPACE_PATTERN.sub(' ', result).strip()
result = PUNCT_PATTERN.sub(r'\1', result)
return result
def create_simplify_prompt(text: str, language: str) -> str:
"""Create a prompt for text simplification."""
return f"""You are a text simplification assistant. Your task is to simplify the given text while preserving the original language.
Instructions:
1. Remove filler words (um, uh, er, ah, like, you know, so, well, उम, उह, मतलब, etc.)
2. Simplify complex vocabulary and sentence structure
3. Preserve the original language completely
4. Keep the meaning intact
Example:
Input: "The project, um, needs, uh, more time."
Output: "The project needs more time."
Input: "मेरा प्रोजेक्ट, उम, अच्छा चल रहा है।"
Output: "मेरा प्रोजेक्ट अच्छा चल रहा है।"
Now process this text:
"{text}"
Output:"""
def create_meaning_prompt(text: str, language: str) -> str:
"""Create a prompt for extracting meaning from text."""
return f"""You are a language assistant. Your task is to provide a simple meaning or explanation for the given text in the same language.
Instructions:
1. Provide a simple meaning or explanation
2. Keep it concise (under 15 words)
3. Preserve the original language
Example:
Input: "The project needs more time."
Output: "The project requires additional time to complete."
Input: "मेरा प्रोजेक्ट अच्छा चल रहा है।"
Output: "मेरा प्रोजेक्ट सफलतापूर्वक आगे बढ़ रहा है।"
Now process this text:
"{text}"
Output:"""
def extract_response_from_text(text: str) -> str:
"""Extract the response from the model's output."""
# Remove any leading/trailing whitespace
response = text.strip()
# Remove any remaining prompt text
if 'Output:' in response:
response = response.split('Output:')[-1].strip()
# Remove any JSON formatting if present
if response.startswith('{') and response.endswith('}'):
try:
json_data = json.loads(response)
if 'simplified_text' in json_data:
response = json_data['simplified_text']
elif 'meaning' in json_data:
response = json_data['meaning']
except:
pass
# Clean up any remaining artifacts
response = re.sub(r'["\'\[\]]', '', response)
return response
async def ensure_model_loaded():
"""Ensure model is loaded, load if not already loaded."""
global model, tokenizer
if model is None:
return await load_phi2_model()
return True
async def simplify_text_async(text: str) -> Tuple[str, float]:
"""Optimized text simplification with minimal latency using Phi-2."""
start_time = time.time()
# Detect language
language = detect_language(text)
# Fast filler removal
text_without_fillers = remove_fillers_fast(text)
# For Hindi and Hinglish, use rule-based approach to avoid language change
if language in ["Hindi", "Hinglish"]:
return text_without_fillers, (time.time() - start_time) * 1000
# Ensure model is loaded
model_loaded = await ensure_model_loaded()
# If model failed to load, return rule-based result
if not model_loaded or model is None:
logger.warning("Model not available, using rule-based result")
return text_without_fillers, (time.time() - start_time) * 1000
# Create prompt
prompt = create_simplify_prompt(text_without_fillers, language)
# Tokenize with optimizations
inputs = tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=256,
padding=False
).to(DEVICE)
# Generate with optimized parameters
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=64,
min_new_tokens=5,
temperature=0.1,
top_p=0.9,
repetition_penalty=1.1,
do_sample=False,
pad_token_id=tokenizer.eos_token_id,
use_cache=True,
)
# Fast decoding
response = outputs[0][inputs.input_ids.shape[-1]:]
generated_text = tokenizer.decode(response, skip_special_tokens=True)
# Extract the simplified text
simplified_text = extract_response_from_text(generated_text)
# If no result, fall back to rule-based
if not simplified_text:
simplified_text = text_without_fillers
latency = (time.time() - start_time) * 1000
return simplified_text, latency
async def get_meaning_async(text: str) -> Tuple[str, float]:
"""Get the meaning of text using Phi-2."""
start_time = time.time()
# Detect language
language = detect_language(text)
# Ensure model is loaded
model_loaded = await ensure_model_loaded()
# If model failed to load, return empty result
if not model_loaded or model is None:
logger.warning("Model not available, cannot generate meaning")
return "", (time.time() - start_time) * 1000
# Create prompt
prompt = create_meaning_prompt(text, language)
# Tokenize with optimizations
inputs = tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=256,
padding=False
).to(DEVICE)
# Generate with optimized parameters
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=64,
min_new_tokens=5,
temperature=0.1,
top_p=0.9,
repetition_penalty=1.1,
do_sample=False,
pad_token_id=tokenizer.eos_token_id,
use_cache=True,
)
# Fast decoding
response = outputs[0][inputs.input_ids.shape[-1]:]
generated_text = tokenizer.decode(response, skip_special_tokens=True)
# Extract the meaning
meaning = extract_response_from_text(generated_text)
latency = (time.time() - start_time) * 1000
return meaning, latency
# API Routes
@app.get("/", response_model=Dict)
async def root():
"""Root endpoint."""
return {"message": "Caption Simplification API with Phi-2", "version": "1.0.0"}
@app.get("/health", response_model=HealthResponse)
async def health_check():
"""Health check endpoint."""
model_status = "Phi-2" if model is not None else "None"
return {
"status": "healthy",
"model_loaded": model_status,
"device": DEVICE
}
@app.post("/simplify", response_model=SimplifyResponse)
async def simplify_text(input_data: TextInput):
"""Simplify text by removing filler words."""
try:
# Process text
simplified_text, latency = await simplify_text_async(input_data.text)
language = detect_language(input_data.text)
return SimplifyResponse(
simplified_text=simplified_text,
language=language,
latency_ms=latency,
model_used="Phi-2" if model is not None else "Rule-based"
)
except Exception as e:
logger.error(f"Error processing text: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/simplify_segments", response_model=SimplifySegmentsResponse)
async def simplify_segments(input_data: CaptionSegmentsInput):
"""Simplify multiple caption segments with timestamps."""
try:
start_time = time.time()
simplified_segments = []
# Process each segment
for segment in input_data.segments:
# Simplify the text
simplified_text, _ = await simplify_text_async(segment.original_text)
# Get the meaning
meaning, _ = await get_meaning_async(simplified_text)
# Create simplified segment
simplified_segment = SimplifiedSegment(
timestamp_start=segment.timestamp_start,
timestamp_end=segment.timestamp_end,
original_text=segment.original_text,
simplified_text=simplified_text,
meaning=meaning
)
simplified_segments.append(simplified_segment)
total_processing_time = (time.time() - start_time) * 1000
return SimplifySegmentsResponse(
segments=simplified_segments,
total_processing_time_ms=total_processing_time,
model_used="Phi-2" if model is not None else "Rule-based"
)
except Exception as e:
logger.error(f"Error processing segments: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860) |