Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from typing import List | |
| import uvicorn | |
| from fastapi.middleware.cors import CORSMiddleware | |
| import pickle | |
| import numpy as np | |
| with open('model.p', 'rb') as f: | |
| model_dict = pickle.load(f) | |
| model = model_dict['model'] | |
| label_encoder = model_dict['label_encoder'] | |
| app = FastAPI(title="Sign Language Detection API") | |
| # Enable CORS so your React frontend (localhost:3000) can communicate with this API | |
| origins = [ | |
| # Your Vercel production URL (No trailing slash) | |
| "https://sign-language-speller-43v971lbk-brians-projects-154a47fb.vercel.app", | |
| # Your local development URL | |
| "http://localhost:3000", | |
| "http://127.0.0.1:3000", | |
| ] | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=origins, | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Define the structure for the landmark data | |
| class Landmark(BaseModel): | |
| x: float | |
| y: float | |
| z: float | |
| class PredictionRequest(BaseModel): | |
| landmarks: List[Landmark] | |
| def normalize_landmarks(landmarks): | |
| """ | |
| Normalize landmarks by translating and scaling. | |
| """ | |
| # Translate landmarks so that the wrist (landmark 0) is at the origin | |
| wrist = landmarks[0] | |
| normalized=[] | |
| for landmark in landmarks: | |
| normalized.append(landmark.x -wrist.x) | |
| normalized.append(landmark.y -wrist.y) | |
| return normalized | |
| async def root(): | |
| return {"message": "Sign Language API is running"} | |
| async def predict(request: PredictionRequest): | |
| """ | |
| Receives 21 landmarks and returns the predicted alphabet. | |
| """ | |
| try: | |
| landmarks = request.landmarks | |
| # Ensure we have exactly 21 landmarks | |
| if len(landmarks) != 21: | |
| raise HTTPException(status_code=400, detail="Exactly 21 landmarks are required") | |
| # --- MACHINE LEARNING INTEGRATION POINT --- | |
| # This is where you would load your .joblib or .h5 model | |
| # and transform the landmarks into a feature vector. | |
| # Example: | |
| # data_point = [] | |
| # for lm in landmarks: | |
| # data_point.extend([lm.x, lm.y, lm.z]) | |
| model_input=np.array(normalize_landmarks(landmarks)).reshape(1,-1) | |
| # print(model_input) | |
| prediction = model.predict(model_input) | |
| detected_char = label_encoder.inverse_transform(prediction)[0] | |
| # Mock logic for demonstration: | |
| # Just returning 'A' to confirm the connection works | |
| # detected_char = "A" | |
| return { | |
| "alphabet": detected_char, | |
| "status": "success" | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| if __name__ == "__main__": | |
| # Run the server on port 5000 to match your React fetch call | |
| uvicorn.run(app, host="0.0.0.0", port=5000) |