handsUp-backend / upload.py
mutarisi
add app file
de8ea8e
import os
import sys
import uvicorn
from fastapi import FastAPI, UploadFile, File, Form
from typing import Optional
from starlette.responses import JSONResponse
# --- Import AI Functions ---
# Now that Detection.py is in the same directory, the import is straightforward.
try:
from models.Detection import load_model_and_assets, process_image_and_predict, process_video_and_predict_realtime, MIN_CONFIDENCE_THRESHOLD
print("Successfully imported functions from Detection.py")
except ImportError as e:
print(f"ERROR: Could not import from Detection.py. Please ensure the file is in the same repository as app.py: {e}")
# It's good practice to exit if a critical import fails
sys.exit(1)
# --- FastAPI App Initialization ---
app = FastAPI()
# --- Load AI Model and Assets on Startup ---
# This function, located in your Detection.py, should be updated to correctly
# reference the files within the 'models' subfolder of your Hugging Face Space.
print("\n--- Hugging Face Space starting: Loading AI model and assets... ---")
load_model_and_assets()
print("--- AI model and assets loaded. Ready to serve predictions. ---\n")
# --- FastAPI Routes ---
# The rest of the routes remain the same, as they now correctly call the functions
# from your Detection.py script.
@app.post("/process-image")
async def process_image_api(
image: UploadFile = File(...),
min_confidence: Optional[float] = Form(MIN_CONFIDENCE_THRESHOLD)
):
try:
contents = await image.read()
temp_filepath = f"/tmp/{image.filename}"
with open(temp_filepath, "wb") as f:
f.write(contents)
action, confidence = process_image_and_predict(temp_filepath, min_confidence)
os.remove(temp_filepath)
response = {
"sign": action if action else "UNKNOWN",
"confidence": round(float(confidence), 2),
"success": True,
"filename": image.filename
}
return JSONResponse(content=response)
except Exception as e:
return JSONResponse(
status_code=500,
content={
"error": "Error processing image with AI model",
"details": str(e),
"success": False
}
)
@app.post("/process-video")
async def process_video_api(
video: UploadFile = File(...),
min_confidence: Optional[float] = Form(MIN_CONFIDENCE_THRESHOLD)
):
try:
contents = await video.read()
temp_filepath = f"/tmp/{video.filename}"
with open(temp_filepath, "wb") as f:
f.write(contents)
action, confidence = process_video_and_predict_realtime(temp_filepath, min_confidence)
os.remove(temp_filepath)
response = {
"phrase": action if action else "UNKNOWN",
"confidence": round(float(confidence), 2),
"success": True,
"filename": video.filename
}
return JSONResponse(content=response)
except Exception as e:
return JSONResponse(
status_code=500,
content={
"error": "Error processing video with AI model",
"details": str(e),
"success": False
}
)
#just added
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)