Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -36,7 +36,6 @@ async def predict_emotion_api(request: Request):
|
|
| 36 |
|
| 37 |
# Robustly strip the prefix to get the pure base64 data
|
| 38 |
try:
|
| 39 |
-
# Find the comma that separates the prefix from the data
|
| 40 |
header, encoded = base64_with_prefix.split(",", 1)
|
| 41 |
audio_data = base64.b64decode(encoded)
|
| 42 |
except (ValueError, TypeError):
|
|
@@ -50,9 +49,7 @@ async def predict_emotion_api(request: Request):
|
|
| 50 |
results = classifier(temp_audio_path)
|
| 51 |
os.unlink(temp_audio_path) # Clean up the temp file
|
| 52 |
|
| 53 |
-
#
|
| 54 |
-
# Example: [{'score': 0.99, 'label': 'happy'}, {'score': 0.01, 'label': 'sad'}]
|
| 55 |
-
# We will return this directly
|
| 56 |
return JSONResponse(content={"data": results})
|
| 57 |
|
| 58 |
except Exception as e:
|
|
@@ -65,10 +62,8 @@ async def predict_emotion_api(request: Request):
|
|
| 65 |
def gradio_predict_wrapper(audio_file_path):
|
| 66 |
if classifier is None: return {"error": f"Model is not loaded: {model_load_error}"}
|
| 67 |
if audio_file_path is None: return {"error": "Please provide an audio file."}
|
| 68 |
-
|
| 69 |
try:
|
| 70 |
results = classifier(audio_file_path, top_k=5)
|
| 71 |
-
# Format for Gradio's Label component
|
| 72 |
return {item['label']: item['score'] for item in results}
|
| 73 |
except Exception as e:
|
| 74 |
return {"error": str(e)}
|
|
@@ -86,6 +81,5 @@ gradio_interface = gr.Interface(
|
|
| 86 |
app = gr.mount_gradio_app(app, gradio_interface, path="/gradio")
|
| 87 |
|
| 88 |
# The Uvicorn server launch command (used by Hugging Face Spaces)
|
| 89 |
-
# This is the ONLY launch command needed.
|
| 90 |
if __name__ == "__main__":
|
| 91 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|
| 36 |
|
| 37 |
# Robustly strip the prefix to get the pure base64 data
|
| 38 |
try:
|
|
|
|
| 39 |
header, encoded = base64_with_prefix.split(",", 1)
|
| 40 |
audio_data = base64.b64decode(encoded)
|
| 41 |
except (ValueError, TypeError):
|
|
|
|
| 49 |
results = classifier(temp_audio_path)
|
| 50 |
os.unlink(temp_audio_path) # Clean up the temp file
|
| 51 |
|
| 52 |
+
# Return a successful response with the data
|
|
|
|
|
|
|
| 53 |
return JSONResponse(content={"data": results})
|
| 54 |
|
| 55 |
except Exception as e:
|
|
|
|
| 62 |
def gradio_predict_wrapper(audio_file_path):
|
| 63 |
if classifier is None: return {"error": f"Model is not loaded: {model_load_error}"}
|
| 64 |
if audio_file_path is None: return {"error": "Please provide an audio file."}
|
|
|
|
| 65 |
try:
|
| 66 |
results = classifier(audio_file_path, top_k=5)
|
|
|
|
| 67 |
return {item['label']: item['score'] for item in results}
|
| 68 |
except Exception as e:
|
| 69 |
return {"error": str(e)}
|
|
|
|
| 81 |
app = gr.mount_gradio_app(app, gradio_interface, path="/gradio")
|
| 82 |
|
| 83 |
# The Uvicorn server launch command (used by Hugging Face Spaces)
|
|
|
|
| 84 |
if __name__ == "__main__":
|
| 85 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|