File size: 3,365 Bytes
7b30cad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import os
import sys
import uvicorn
from fastapi import FastAPI, UploadFile, File, Form
from typing import Optional
from starlette.responses import JSONResponse

# --- Import AI Functions ---
# Now that Detection.py is in the same directory, the import is straightforward.
try:
    from models.Detection import load_model_and_assets, process_image_and_predict, process_video_and_predict_realtime, MIN_CONFIDENCE_THRESHOLD
    print("Successfully imported functions from Detection.py")
except ImportError as e:
    print(f"ERROR: Could not import from Detection.py. Please ensure the file is in the same repository as app.py: {e}")
    # It's good practice to exit if a critical import fails
    sys.exit(1)

# --- FastAPI App Initialization ---
app = FastAPI()

# --- Load AI Model and Assets on Startup ---
# This function, located in your Detection.py, should be updated to correctly
# reference the files within the 'models' subfolder of your Hugging Face Space.
print("\n--- Hugging Face Space starting: Loading AI model and assets... ---")
load_model_and_assets()
print("--- AI model and assets loaded. Ready to serve predictions. ---\n")

# --- FastAPI Routes ---
# The rest of the routes remain the same, as they now correctly call the functions
# from your Detection.py script.

@app.post("/process-image")
async def process_image_api(
    image: UploadFile = File(...),
    min_confidence: Optional[float] = Form(MIN_CONFIDENCE_THRESHOLD)
):
    try:
        contents = await image.read()
        temp_filepath = f"/tmp/{image.filename}"
        with open(temp_filepath, "wb") as f:
            f.write(contents)
        
        action, confidence = process_image_and_predict(temp_filepath, min_confidence)
        os.remove(temp_filepath)

        response = {
            "sign": action if action else "UNKNOWN",
            "confidence": round(float(confidence), 2),
            "success": True,
            "filename": image.filename
        }
        return JSONResponse(content=response)
    
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={
                "error": "Error processing image with AI model",
                "details": str(e),
                "success": False
            }
        )

@app.post("/process-video")
async def process_video_api(
    video: UploadFile = File(...),
    min_confidence: Optional[float] = Form(MIN_CONFIDENCE_THRESHOLD)
):
    try:
        contents = await video.read()
        temp_filepath = f"/tmp/{video.filename}"
        with open(temp_filepath, "wb") as f:
            f.write(contents)
            
        action, confidence = process_video_and_predict_realtime(temp_filepath, min_confidence)
        os.remove(temp_filepath)
        
        response = {
            "phrase": action if action else "UNKNOWN",
            "confidence": round(float(confidence), 2),
            "success": True,
            "filename": video.filename
        }
        return JSONResponse(content=response)
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={
                "error": "Error processing video with AI model",
                "details": str(e),
                "success": False
            }
        )

#just added
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)