Spaces:
Sleeping
Sleeping
mutarisi
commited on
Commit
·
40cff7e
1
Parent(s):
e2c1418
added glossController
Browse files- app.py +25 -3
- glossController.py +19 -0
app.py
CHANGED
|
@@ -6,10 +6,11 @@ from typing import List
|
|
| 6 |
from starlette.responses import JSONResponse
|
| 7 |
import tempfile
|
| 8 |
|
| 9 |
-
# Correct import statements for
|
| 10 |
try:
|
| 11 |
from lettersController import detectFromImage
|
| 12 |
from wordsController import detectWords
|
|
|
|
| 13 |
print("Successfully imported functions from controllers.")
|
| 14 |
except ImportError as e:
|
| 15 |
print(f"ERROR: Could not import from controllers: {e}")
|
|
@@ -22,7 +23,7 @@ app = FastAPI()
|
|
| 22 |
# The models are loaded when the controller modules are imported.
|
| 23 |
# It's a good practice to print a message to confirm this.
|
| 24 |
print("\n--- Hugging Face Space starting. Models are being loaded... ---")
|
| 25 |
-
print("--- AI letter and
|
| 26 |
|
| 27 |
# --- FastAPI Route for Letter Detection ---
|
| 28 |
@app.post("/detect-letters")
|
|
@@ -118,6 +119,27 @@ async def process_words(
|
|
| 118 |
os.remove(path)
|
| 119 |
os.rmdir(temp_dir)
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
if __name__ == "__main__":
|
| 122 |
# Hugging Face Spaces automatically sets the port to 7860
|
| 123 |
-
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|
| 6 |
from starlette.responses import JSONResponse
|
| 7 |
import tempfile
|
| 8 |
|
| 9 |
+
# Correct import statements for all controllers
|
| 10 |
try:
|
| 11 |
from lettersController import detectFromImage
|
| 12 |
from wordsController import detectWords
|
| 13 |
+
from glossController import translateGloss
|
| 14 |
print("Successfully imported functions from controllers.")
|
| 15 |
except ImportError as e:
|
| 16 |
print(f"ERROR: Could not import from controllers: {e}")
|
|
|
|
| 23 |
# The models are loaded when the controller modules are imported.
|
| 24 |
# It's a good practice to print a message to confirm this.
|
| 25 |
print("\n--- Hugging Face Space starting. Models are being loaded... ---")
|
| 26 |
+
print("--- AI letter, words, and translation models loaded. Ready to serve predictions. ---\n")
|
| 27 |
|
| 28 |
# --- FastAPI Route for Letter Detection ---
|
| 29 |
@app.post("/detect-letters")
|
|
|
|
| 119 |
os.remove(path)
|
| 120 |
os.rmdir(temp_dir)
|
| 121 |
|
| 122 |
+
# --- FastAPI Route for Sentence Translation ---
|
| 123 |
+
@app.post("/sentence")
|
| 124 |
+
async def process_sentence(
|
| 125 |
+
gloss: str = Form(...)
|
| 126 |
+
):
|
| 127 |
+
"""
|
| 128 |
+
Receives a string of gloss and translates it into a human-readable sentence.
|
| 129 |
+
"""
|
| 130 |
+
try:
|
| 131 |
+
result = translateGloss(gloss)
|
| 132 |
+
return JSONResponse(content={"translation": result, "success": True})
|
| 133 |
+
except Exception as e:
|
| 134 |
+
return JSONResponse(
|
| 135 |
+
status_code=500,
|
| 136 |
+
content={
|
| 137 |
+
"error": "Error translating gloss",
|
| 138 |
+
"details": str(e),
|
| 139 |
+
"success": False
|
| 140 |
+
}
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
if __name__ == "__main__":
|
| 144 |
# Hugging Face Spaces automatically sets the port to 7860
|
| 145 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
glossController.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
def translateGloss(gloss: str, model_id: str = "rrrr66254/Glossa-BART") -> str:
|
| 5 |
+
|
| 6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
| 7 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, trust_remote_code=True)
|
| 8 |
+
model.eval()
|
| 9 |
+
if torch.cuda.is_available():
|
| 10 |
+
model = model.to("cuda")
|
| 11 |
+
|
| 12 |
+
inputs = tokenizer(gloss, return_tensors="pt", padding=True, truncation=True)
|
| 13 |
+
if torch.cuda.is_available():
|
| 14 |
+
inputs = {k: v.to("cuda") for k,v in inputs.items()}
|
| 15 |
+
|
| 16 |
+
outputs = model.generate(**inputs, max_new_tokens=50, do_sample=False)
|
| 17 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 18 |
+
return result
|
| 19 |
+
|