Spaces:
Sleeping
Sleeping
File size: 1,194 Bytes
2166551 3c8bea0 3a8d286 3c8bea0 b1ed57d 2166551 b1ed57d 3a8d286 b1ed57d 3a8d286 b1ed57d 3a8d286 3c8bea0 3a8d286 3c8bea0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | import os
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
# ✅ Set cache directory inside Hugging Face's allowed `/tmp/` folder
CACHE_DIR = "/tmp/huggingface_cache"
os.environ["TRANSFORMERS_CACHE"] = CACHE_DIR
os.environ["HF_HOME"] = CACHE_DIR
os.environ["HF_HUB_DISABLE_SYMLINKS"] = "1" # Fixes permission issue
# ✅ Ensure cache directory exists
os.makedirs(CACHE_DIR, exist_ok=True)
# ✅ Load the model
MODEL_NAME = "facebook/m2m100_418M"
print("Downloading model...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, cache_dir=CACHE_DIR)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, cache_dir=CACHE_DIR)
translator = pipeline("translation", model=model, tokenizer=tokenizer)
print("Model loaded successfully!")
# ✅ FastAPI App
app = FastAPI()
class TextRequest(BaseModel):
text: str
@app.post("/translate")
def translate_text(request: TextRequest):
translated = translator(request.text, src_lang="en", tgt_lang="fr")[0]['translation_text']
return {"translated_text": translated}
@app.get("/")
def home():
return {"message": "Translation server is running!"}
|