OffiongBassey commited on
Commit
934ccda
·
1 Parent(s): accb6e8

New Commit

Browse files
Files changed (2) hide show
  1. app.py +37 -13
  2. requirements.txt +7 -5
app.py CHANGED
@@ -1,27 +1,51 @@
1
- from fastapi import FastAPI, Request
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
- import torch
5
- import os
 
 
 
 
6
 
7
  app = FastAPI()
8
 
9
- # Load the model once at startup
10
- tokenizer = AutoTokenizer.from_pretrained("offiongbassey/efik-mt")
11
- model = AutoModelForSeq2SeqLM.from_pretrained("offiongbassey/efik-mt")
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  class TranslateRequest(BaseModel):
14
  text: str
15
- source: str # "eng_Latn" or "ibo_Latn"
16
 
17
  @app.get("/")
18
  async def home():
19
- return {"message": "Welcome! The Efik translation API is running."}
20
 
21
  @app.post("/translate")
22
  async def translate(req: TranslateRequest):
23
- input_text = f"{req.source} {req.text}"
24
- inputs = tokenizer(input_text, return_tensors="pt")
25
- outputs = model.generate(**inputs, max_length=128)
26
- translation = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
- return {"translation": translation}
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+ import logging
5
+ import sys
6
+
7
+ # Set up logging to see errors in Space logs
8
+ logging.basicConfig(level=logging.INFO, stream=sys.stdout)
9
+ logger = logging.getLogger(__name__)
10
 
11
  app = FastAPI()
12
 
13
+ # Declare model/tokenizer globally, load them in startup
14
+ tokenizer = None
15
+ model = None
16
+
17
+ @app.on_event("startup")
18
+ async def load_model():
19
+ global tokenizer, model
20
+ try:
21
+ logger.info("Loading tokenizer and model...")
22
+ # Use local model files for faster, more reliable loading
23
+ tokenizer = AutoTokenizer.from_pretrained("offiongbassey/efik-mt", local_files_only=True)
24
+ model = AutoModelForSeq2SeqLM.from_pretrained("offiongbassey/efik-mt", local_files_only=True)
25
+ logger.info("✅ Model loaded successfully!")
26
+ except Exception as e:
27
+ logger.error(f"❌ Failed to load model: {e}", exc_info=True)
28
+ # Don't raise here, let the endpoint handle the error
29
 
30
  class TranslateRequest(BaseModel):
31
  text: str
32
+ source: str # "eng_Latn"
33
 
34
  @app.get("/")
35
  async def home():
36
+ return {"message": "Efik Translation API is running.", "status": "healthy"}
37
 
38
  @app.post("/translate")
39
  async def translate(req: TranslateRequest):
40
+ if tokenizer is None or model is None:
41
+ raise HTTPException(status_code=503, detail="Model is still loading or failed to load.")
42
+
43
+ try:
44
+ input_text = f"{req.source} {req.text}"
45
+ inputs = tokenizer(input_text, return_tensors="pt")
46
+ outputs = model.generate(**inputs, max_length=128)
47
+ translation = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
+ return {"translation": translation}
49
+ except Exception as e:
50
+ logger.error(f"Translation error: {e}", exc_info=True)
51
+ raise HTTPException(status_code=500, detail=f"Translation failed: {str(e)}")
requirements.txt CHANGED
@@ -1,5 +1,7 @@
1
- fastapi
2
- uvicorn[standard]
3
- transformers
4
- torch
5
- huggingface_hub
 
 
 
1
+ fastapi==0.104.1
2
+ uvicorn[standard]==0.24.0
3
+ transformers==4.36.2
4
+ torch==2.1.2
5
+ huggingface_hub==0.20.3
6
+ sentencepiece==0.1.99 # <-- CRITICAL ADDITION
7
+ accelerate==0.26.1 # <-- For efficient model loading