Spaces:
Runtime error
Runtime error
Amit-Shriram commited on
Commit ·
b334624
1
Parent(s): 92ff6a4
simple code
Browse files- .gitignore +5 -0
- Dockerfile +14 -0
- app/ai_responder.py +114 -0
- app/main.py +5 -0
- app/routes.py +28 -0
- app/session_manager.py +12 -0
- app/twilio_handler.py +154 -0
- app/websocket_manager.py +71 -0
- requirements.txt +10 -0
- simple.py +1 -0
.gitignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.venv
|
| 2 |
+
.env
|
| 3 |
+
__pycache__/
|
| 4 |
+
.vscode
|
| 5 |
+
venv
|
Dockerfile
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use the official Python image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Set the working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy the current directory contents into the container
|
| 8 |
+
COPY .
|
| 9 |
+
|
| 10 |
+
# Install the dependencies
|
| 11 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# Command to run the FastAPI app
|
| 14 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
app/ai_responder.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # from langchain_ollama import ChatOllama
|
| 2 |
+
# from langchain_core.prompts import PromptTemplate
|
| 3 |
+
# import os
|
| 4 |
+
|
| 5 |
+
# ollama_url = os.getenv("OLLAMA_BASE_URL")
|
| 6 |
+
|
| 7 |
+
# # llm = ChatOllama(model="mistral-nemo", temperature=0, base_url=ollama_url)
|
| 8 |
+
# from langchain_groq import ChatGroq
|
| 9 |
+
|
| 10 |
+
# llm = ChatGroq(
|
| 11 |
+
# model="llama-3.3-70b-versatile",
|
| 12 |
+
# temperature=0.2,
|
| 13 |
+
# api_key= "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",
|
| 14 |
+
# )
|
| 15 |
+
|
| 16 |
+
# qa_prompt = PromptTemplate(
|
| 17 |
+
# input_variables=["question"],
|
| 18 |
+
# template="""You are a helpful AI assistant. Answer the user's question in a friendly and shortly manner.
|
| 19 |
+
# Question: {question}
|
| 20 |
+
# """
|
| 21 |
+
# )
|
| 22 |
+
# qa_chain = qa_prompt | llm
|
| 23 |
+
|
| 24 |
+
# async def ai_answer(transcription_text):
|
| 25 |
+
# if transcription_text.strip() == "":
|
| 26 |
+
# return "I'm sorry, I didn't catch that. Could you please repeat?"
|
| 27 |
+
# ai_response = qa_chain.invoke({"question": transcription_text}).content
|
| 28 |
+
# print(f"AI Response: {ai_response}")
|
| 29 |
+
# return ai_response
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
from langchain_core.prompts import PromptTemplate
|
| 33 |
+
import os
|
| 34 |
+
from langchain_core.output_parsers import PydanticOutputParser
|
| 35 |
+
from typing import Optional, List, Dict
|
| 36 |
+
from pydantic import BaseModel, Field
|
| 37 |
+
|
| 38 |
+
class AIResponseModel(BaseModel):
|
| 39 |
+
response: Optional[str] = Field(description="The AI's response to the user's query")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
classify_parser = PydanticOutputParser(pydantic_object=AIResponseModel)
|
| 43 |
+
|
| 44 |
+
ollama_url = os.getenv("OLLAMA_BASE_URL")
|
| 45 |
+
|
| 46 |
+
from langchain_groq import ChatGroq
|
| 47 |
+
|
| 48 |
+
llm = ChatGroq(
|
| 49 |
+
model="llama-3.3-70b-versatile",
|
| 50 |
+
temperature=0.2,
|
| 51 |
+
api_key= "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",
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
# Simple in-memory conversation history
|
| 55 |
+
conversation_memory = {}
|
| 56 |
+
|
| 57 |
+
qa_prompt = PromptTemplate(
|
| 58 |
+
input_variables=["history", "question"],
|
| 59 |
+
template="""You are a helpful AI assistant named CentralogicAI Bot. Continue the conversation using the history below.
|
| 60 |
+
Answer to the user's query in short and friendly manner.
|
| 61 |
+
Also observe the user's intent and if the intent is to connect with the sales teams or person, then return a word "sales" in the response.
|
| 62 |
+
Return the response in proper JSON format with the following keys
|
| 63 |
+
"response":""
|
| 64 |
+
|
| 65 |
+
Conversation history:
|
| 66 |
+
{history}
|
| 67 |
+
User: {question}
|
| 68 |
+
Assistant:"""
|
| 69 |
+
)
|
| 70 |
+
qa_chain = qa_prompt | llm | classify_parser
|
| 71 |
+
|
| 72 |
+
# async def ai_answer(transcription_text, session_id="1234"):
|
| 73 |
+
# if transcription_text.strip() == "":
|
| 74 |
+
# return "I'm sorry, I didn't catch that. Could you please repeat?"
|
| 75 |
+
|
| 76 |
+
# # Get history for this session
|
| 77 |
+
# history = conversation_memory.get(session_id, [])
|
| 78 |
+
|
| 79 |
+
# # Prepare the history string (do NOT include the current input yet)
|
| 80 |
+
# history_str = "\n".join(history[-5:]) # Keep last 5 exchanges for brevity
|
| 81 |
+
|
| 82 |
+
# # Get AI response
|
| 83 |
+
# ai_response = qa_chain.invoke({"history": history_str, "question": transcription_text}).content
|
| 84 |
+
# print(f"AI Response: {ai_response}")
|
| 85 |
+
|
| 86 |
+
# # Now add the new user input and AI response to history
|
| 87 |
+
# history.append(f"User: {transcription_text}")
|
| 88 |
+
# history.append(f"Assistant: {ai_response}")
|
| 89 |
+
# conversation_memory[session_id] = history
|
| 90 |
+
|
| 91 |
+
# return ai_response
|
| 92 |
+
|
| 93 |
+
def ai_answer(transcription_text, session_id="12345"):
|
| 94 |
+
if transcription_text.strip() == "":
|
| 95 |
+
return "I'm sorry, I didn't catch that. Could you please repeat?"
|
| 96 |
+
|
| 97 |
+
# Get history for this session
|
| 98 |
+
history = conversation_memory.get(session_id, [])
|
| 99 |
+
|
| 100 |
+
# Prepare the history string (do NOT include the current input yet)
|
| 101 |
+
history_str = "\n".join(history[-5:]) # Keep last 5 exchanges for brevity
|
| 102 |
+
|
| 103 |
+
# Get AI response
|
| 104 |
+
ai_response = qa_chain.invoke({"history": history_str, "question": transcription_text})
|
| 105 |
+
# print(f"AI Response: {ai_response}")
|
| 106 |
+
|
| 107 |
+
# Now add the new user input and AI response to history
|
| 108 |
+
history.append(f"User: {transcription_text}")
|
| 109 |
+
history.append(f"Assistant: {ai_response}")
|
| 110 |
+
conversation_memory[session_id] = history
|
| 111 |
+
|
| 112 |
+
return ai_response.response
|
| 113 |
+
|
| 114 |
+
# print(ai_answer("Can you please connect me with the sales team?").response)
|
app/main.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from app.routes import router as api_router
|
| 3 |
+
|
| 4 |
+
app = FastAPI()
|
| 5 |
+
app.include_router(api_router)
|
app/routes.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, WebSocket, Request
|
| 2 |
+
from app.twilio_handler import handle_incoming_call
|
| 3 |
+
from app.websocket_manager import websocket_endpoint
|
| 4 |
+
|
| 5 |
+
router = APIRouter()
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# @router.post("/twilio/call")
|
| 9 |
+
# async def twilio_call(request: Request):
|
| 10 |
+
# return await handle_incoming_call(request)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@router.post("/incoming-call")
|
| 14 |
+
async def twilio_call(request: Request):
|
| 15 |
+
return await handle_incoming_call(request)
|
| 16 |
+
|
| 17 |
+
@router.websocket("/ws/{session_id}")
|
| 18 |
+
async def websocket_route(websocket: WebSocket, session_id: str):
|
| 19 |
+
print("Websocket session request...")
|
| 20 |
+
await websocket_endpoint(websocket, session_id)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# ngrok http --url=natural-woodcock-bursting.ngrok-free.app 8000
|
| 24 |
+
|
| 25 |
+
# number 1= 719 402-2777
|
| 26 |
+
# number 2= 914 601 2553
|
| 27 |
+
|
| 28 |
+
# number 3= 812 608 4048
|
app/session_manager.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
|
| 3 |
+
sessions = {}
|
| 4 |
+
|
| 5 |
+
def create_session(call_sid: str, from_number: str) -> str:
|
| 6 |
+
session_id = str(uuid.uuid4())
|
| 7 |
+
sessions[session_id] = {"call_sid": call_sid, "from": from_number}
|
| 8 |
+
return session_id
|
| 9 |
+
|
| 10 |
+
def get_session(session_id: str):
|
| 11 |
+
print(f"Current available sessions: {sessions}")
|
| 12 |
+
return sessions.get(session_id)
|
app/twilio_handler.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# from fastapi import Request, Response
|
| 2 |
+
# from twilio.twiml.voice_response import VoiceResponse, Connect
|
| 3 |
+
# from app.session_manager import create_session
|
| 4 |
+
# import os
|
| 5 |
+
# from dotenv import load_dotenv
|
| 6 |
+
# load_dotenv()
|
| 7 |
+
|
| 8 |
+
# ws_url = os.getenv("WS_URL")
|
| 9 |
+
|
| 10 |
+
# async def handle_incoming_call(request: Request):
|
| 11 |
+
# form = await request.form()
|
| 12 |
+
# call_sid = form.get('CallSid')
|
| 13 |
+
# print(f"Incoming call SID: {call_sid}")
|
| 14 |
+
# from_number = form.get('From')
|
| 15 |
+
|
| 16 |
+
# # Create a new session for this call
|
| 17 |
+
# session_id = create_session(call_sid, from_number)
|
| 18 |
+
|
| 19 |
+
# # Construct the TwiML response
|
| 20 |
+
# response = VoiceResponse()
|
| 21 |
+
# connect = Connect()
|
| 22 |
+
# connect.conversation_relay(
|
| 23 |
+
# url=f"{ws_url}/{session_id}",
|
| 24 |
+
# welcome_greeting="Hello! I am CentraLogic AI bot. How can I assist you today?",
|
| 25 |
+
# language="en-US",
|
| 26 |
+
# transcription_language="en-US",
|
| 27 |
+
# tts_provider="ElevenLabs",
|
| 28 |
+
# voice="XjLkpWUlnhS8i7gGz3lZ",
|
| 29 |
+
# tts_language="en-US",
|
| 30 |
+
# interruptible="false",
|
| 31 |
+
# # transcription_provider="Deepgram",
|
| 32 |
+
# )
|
| 33 |
+
# response.append(connect)
|
| 34 |
+
|
| 35 |
+
# return Response(content=str(response), media_type="application/xml")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ######### Working code
|
| 39 |
+
# from fastapi import Request, Response
|
| 40 |
+
# from twilio.twiml.voice_response import VoiceResponse, Connect
|
| 41 |
+
# from app.session_manager import create_session
|
| 42 |
+
# import os
|
| 43 |
+
# from dotenv import load_dotenv
|
| 44 |
+
# load_dotenv()
|
| 45 |
+
|
| 46 |
+
# ws_url = os.getenv("WS_URL")
|
| 47 |
+
|
| 48 |
+
# async def handle_incoming_call(request: Request):
|
| 49 |
+
# form = await request.form()
|
| 50 |
+
# call_sid = form.get('CallSid')
|
| 51 |
+
# print(f"Incoming call SID: {call_sid}")
|
| 52 |
+
# from_number = form.get('From')
|
| 53 |
+
|
| 54 |
+
# # Create a new session for this call
|
| 55 |
+
# session_id = create_session(call_sid, from_number)
|
| 56 |
+
|
| 57 |
+
# # Construct the TwiML response
|
| 58 |
+
# response = VoiceResponse()
|
| 59 |
+
# connect = Connect()
|
| 60 |
+
# connect.conversation_relay(
|
| 61 |
+
# url=f"{ws_url}/{session_id}",
|
| 62 |
+
# welcome_greeting="Hello! I am CentraLogic AI bot. How can I assist you today?",
|
| 63 |
+
# language="en-US",
|
| 64 |
+
# transcription_language="en-US",
|
| 65 |
+
# tts_language="en-US",
|
| 66 |
+
# interruptible="false",
|
| 67 |
+
# debug=True,
|
| 68 |
+
# )
|
| 69 |
+
# response.append(connect)
|
| 70 |
+
|
| 71 |
+
# return Response(content=str(response), media_type="application/xml")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# ##### new code
|
| 76 |
+
from fastapi import Request, Response
|
| 77 |
+
from twilio.twiml.voice_response import VoiceResponse, Connect
|
| 78 |
+
from app.session_manager import create_session
|
| 79 |
+
import os
|
| 80 |
+
from dotenv import load_dotenv
|
| 81 |
+
load_dotenv()
|
| 82 |
+
|
| 83 |
+
ws_url = os.getenv("WS_URL")
|
| 84 |
+
|
| 85 |
+
async def handle_incoming_call(request: Request):
|
| 86 |
+
form = await request.form()
|
| 87 |
+
call_sid = form.get('CallSid')
|
| 88 |
+
print(f"Incoming call SID: {call_sid}")
|
| 89 |
+
from_number = form.get('From')
|
| 90 |
+
dial = form.get('Dial')
|
| 91 |
+
if dial:
|
| 92 |
+
if dial:
|
| 93 |
+
xml = f"""<?xml version="1.0" encoding="UTF-8"?>
|
| 94 |
+
<Response>
|
| 95 |
+
<Dial>{dial}</Dial>
|
| 96 |
+
</Response>"""
|
| 97 |
+
return Response(content=xml, media_type="text/xml")
|
| 98 |
+
else:
|
| 99 |
+
# Create a new session for this call
|
| 100 |
+
session_id = create_session(call_sid, from_number)
|
| 101 |
+
|
| 102 |
+
# Construct the TwiML response
|
| 103 |
+
response = VoiceResponse()
|
| 104 |
+
connect = Connect()
|
| 105 |
+
connect.conversation_relay(
|
| 106 |
+
url=f"{ws_url}/{session_id}",
|
| 107 |
+
welcome_greeting="Hello! I am CentraLogic AI bot. How can I assist you today?",
|
| 108 |
+
language="en-US",
|
| 109 |
+
transcription_language="en-US",
|
| 110 |
+
tts_language="en-US",
|
| 111 |
+
interruptible="false",
|
| 112 |
+
debug=True,
|
| 113 |
+
)
|
| 114 |
+
response.append(connect)
|
| 115 |
+
|
| 116 |
+
return Response(content=str(response), media_type="application/xml")
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
######### respone = VoiceResponse()
|
| 120 |
+
######### response.dial()
|
| 121 |
+
|
| 122 |
+
# from app.session_manager import create_session
|
| 123 |
+
# from fastapi import Request, Response
|
| 124 |
+
# from twilio.twiml.voice_response import Connect, ConversationRelay, Language, VoiceResponse
|
| 125 |
+
# from dotenv import load_dotenv
|
| 126 |
+
# load_dotenv()
|
| 127 |
+
# import os
|
| 128 |
+
|
| 129 |
+
# ws_url = os.getenv("WS_URL")
|
| 130 |
+
|
| 131 |
+
# async def handle_incoming_call(request: Request):
|
| 132 |
+
# form = await request.form()
|
| 133 |
+
# call_sid = form.get('CallSid')
|
| 134 |
+
# print(f"Incoming call SID: {call_sid}")
|
| 135 |
+
# from_number = form.get('From')
|
| 136 |
+
|
| 137 |
+
# # Create a new session for this call
|
| 138 |
+
# session_id = create_session(call_sid, from_number)
|
| 139 |
+
|
| 140 |
+
# # Construct the TwiML response
|
| 141 |
+
# response = VoiceResponse()
|
| 142 |
+
# connect = Connect()
|
| 143 |
+
# connect.conversation_relay(
|
| 144 |
+
# url=f"{ws_url}/{session_id}",
|
| 145 |
+
# welcome_greeting="Hello! I am CentraLogic AI bot.",
|
| 146 |
+
# )
|
| 147 |
+
# response.append(connect)
|
| 148 |
+
|
| 149 |
+
# return Response(content=str(response), media_type="application/xml")
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
# transcriptionProvider, speechModel, transcriptionLanguage
|
| 154 |
+
# Twilio uses a range of providers, including Google, Amazon, and Deepgram, for speech-to-text conversions
|
app/websocket_manager.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import WebSocket, WebSocketDisconnect
|
| 2 |
+
from app.ai_responder import ai_answer
|
| 3 |
+
from app.session_manager import get_session
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
SALES_NUMBER = "+91 7249477972"
|
| 7 |
+
|
| 8 |
+
class ConnectionManager:
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.active_connections = {}
|
| 11 |
+
|
| 12 |
+
async def connect(self, websocket: WebSocket, session_id: str):
|
| 13 |
+
await websocket.accept()
|
| 14 |
+
self.active_connections[session_id] = websocket
|
| 15 |
+
|
| 16 |
+
def disconnect(self, session_id: str):
|
| 17 |
+
self.active_connections.pop(session_id, None)
|
| 18 |
+
|
| 19 |
+
async def receive_audio(self, session_id: str):
|
| 20 |
+
websocket = self.active_connections.get(session_id)
|
| 21 |
+
session = get_session(session_id)
|
| 22 |
+
if not session:
|
| 23 |
+
print(f"Session {session_id} not found, disconnecting...")
|
| 24 |
+
print(f"current active connections: {self.active_connections}")
|
| 25 |
+
if websocket:
|
| 26 |
+
try:
|
| 27 |
+
while True:
|
| 28 |
+
message = await websocket.receive_json()
|
| 29 |
+
print(f"Received message: {message}")
|
| 30 |
+
message_type = message.get("type")
|
| 31 |
+
print(f"Received message type: {message_type}")
|
| 32 |
+
|
| 33 |
+
if message_type == "setup":
|
| 34 |
+
print(f"This is the setup message:- {message}")
|
| 35 |
+
print(f"Setup message received for session {session_id}.")
|
| 36 |
+
|
| 37 |
+
elif message_type == "prompt":
|
| 38 |
+
user_input = message["voicePrompt"]
|
| 39 |
+
print(f"Received transcription:- {user_input}")
|
| 40 |
+
ai_response = await ai_answer(user_input)
|
| 41 |
+
lower = ai_answer.lower()
|
| 42 |
+
if "sales" in lower:
|
| 43 |
+
dept = SALES_NUMBER
|
| 44 |
+
else:
|
| 45 |
+
dept = None
|
| 46 |
+
|
| 47 |
+
if dept:
|
| 48 |
+
await websocket.send_text(json.dumps({
|
| 49 |
+
"type": "handoff",
|
| 50 |
+
"dialNumber": dept
|
| 51 |
+
}))
|
| 52 |
+
break # end session so Twilio exits ConversationRelay
|
| 53 |
+
await websocket.send_json({
|
| 54 |
+
"type": "text",
|
| 55 |
+
"token": ai_response,
|
| 56 |
+
"last": True # Assuming this is the last message in the conversation
|
| 57 |
+
})
|
| 58 |
+
|
| 59 |
+
elif message_type == "interrupt":
|
| 60 |
+
print(f"Interrupt message received for session {session_id}.")
|
| 61 |
+
# break
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
except WebSocketDisconnect:
|
| 65 |
+
self.disconnect(session_id)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
manager = ConnectionManager()
|
| 69 |
+
async def websocket_endpoint(websocket: WebSocket, session_id: str):
|
| 70 |
+
await manager.connect(websocket, session_id)
|
| 71 |
+
await manager.receive_audio(session_id)
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi[all]==0.109.0
|
| 2 |
+
uvicorn[standard]==0.27.0
|
| 3 |
+
gunicorn==21.2.0
|
| 4 |
+
twilio
|
| 5 |
+
python-dotenv
|
| 6 |
+
ffmpeg-python
|
| 7 |
+
langchain-ollama
|
| 8 |
+
langchain-core
|
| 9 |
+
numpy
|
| 10 |
+
langchain_groq
|
simple.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
print("Hello")
|