Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -247,6 +247,24 @@ async def llm_agent(query: LLMAgentQueryModel, background_tasks: BackgroundTasks
|
|
| 247 |
|
| 248 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
| 249 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
# ============================================================================
|
| 251 |
# Main Execution
|
| 252 |
# ============================================================================
|
|
|
|
| 247 |
|
| 248 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
| 249 |
|
| 250 |
+
import edge_tts
|
| 251 |
+
import io
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
@app.get("/tts")
|
| 255 |
+
async def text_to_speech(
|
| 256 |
+
text: str = Query(..., description="Text to convert to speech"),
|
| 257 |
+
voice: str = Query(default="en-GB-SoniaNeural", description="Voice to use for speech")
|
| 258 |
+
):
|
| 259 |
+
communicate = edge_tts.Communicate(text, voice)
|
| 260 |
+
|
| 261 |
+
async def generate():
|
| 262 |
+
async for chunk in communicate.stream():
|
| 263 |
+
if chunk["type"] == "audio":
|
| 264 |
+
yield chunk["data"]
|
| 265 |
+
|
| 266 |
+
return StreamingResponse(generate(), media_type="audio/mpeg")
|
| 267 |
+
|
| 268 |
# ============================================================================
|
| 269 |
# Main Execution
|
| 270 |
# ============================================================================
|