Spaces:
Running
Running
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| import logging | |
| import sys | |
| logging.basicConfig(level=logging.INFO, stream=sys.stdout) | |
| logger = logging.getLogger(__name__) | |
| app = FastAPI() | |
| tokenizer = None | |
| model = None | |
| async def load_model(): | |
| global tokenizer, model | |
| try: | |
| logger.info("Loading tokenizer and model...") | |
| # Load from Hub (allows download on first run). | |
| # If you uploaded files to the Space, change repo_id to "." | |
| tokenizer = AutoTokenizer.from_pretrained("offiongbassey/efik-mt") | |
| model = AutoModelForSeq2SeqLM.from_pretrained("offiongbassey/efik-mt") | |
| logger.info("β Model loaded successfully!") | |
| except Exception as e: | |
| logger.error(f"β Failed to load model: {e}", exc_info=True) | |
| # A failing model load is critical. You may want to raise here to fail fast. | |
| # For now, we let it be, and the /translate endpoint will check. | |
| class TranslateRequest(BaseModel): | |
| text: str | |
| source: str | |
| async def home(): | |
| return {"message": "Efik Translation API", "model_loaded": model is not None} | |
| async def translate(req: TranslateRequest): | |
| if tokenizer is None or model is None: | |
| raise HTTPException(status_code=503, detail="Model is still loading or failed to load. Please try again in a moment.") | |
| try: | |
| input_text = f"{req.source} {req.text}" | |
| inputs = tokenizer(input_text, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_length=128) | |
| translation = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return {"translation": translation} | |
| except Exception as e: | |
| logger.error(f"Translation error: {e}", exc_info=True) | |
| raise HTTPException(status_code=500, detail="Internal translation error.") |