iajitpanday commited on
Commit
5b532b5
·
verified ·
1 Parent(s): 581e023

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +405 -201
app.py CHANGED
@@ -1,229 +1,433 @@
1
- """
2
- Simplified Flask application for the AI call assistant system that doesn't rely on pipecat.
3
- """
4
-
5
- from flask import Flask, request
6
- from twilio.twiml.voice_response import VoiceResponse, Gather
7
  import os
8
- import requests
9
  import json
10
- import logging
11
- from datetime import datetime
12
- from utils import transcribe_audio, classify_intent, get_rag_response, text_to_speech, get_fallback_response
13
-
14
- # Configure logging
15
- logging.basicConfig(
16
- level=logging.INFO,
17
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
18
- handlers=[
19
- logging.FileHandler("call_assistant.log"),
20
- logging.StreamHandler()
21
- ]
22
- )
23
- logger = logging.getLogger(__name__)
24
-
25
- app = Flask(__name__)
26
-
27
- # Twilio credentials
28
- TWILIO_ACCOUNT_SID = "ACacb8e8cc6de0f7d6c6b9c9f252a38c67"
29
- TWILIO_AUTH_TOKEN = "1b2826409367e9799262553538489e54"
30
- TWILIO_PHONE_NUMBER = "+19704064410"
31
-
32
- # Hugging Face Space URL
33
- HF_SPACE_URL = "https://huggingface.co/spaces/iajitpanday/vBot-1.5"
34
-
35
- # Session storage (in production, replace with a database)
36
- call_sessions = {}
37
-
38
- @app.route("/", methods=['GET'])
39
- def index():
40
- return "AI Call Assistant is running!"
41
-
42
- @app.route("/answer", methods=['POST'])
43
- def answer_call():
44
- """Handle incoming Twilio calls"""
45
- call_sid = request.values.get('CallSid')
46
- caller = request.values.get('From', 'Unknown')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- logger.info(f"Received call from {caller} with SID: {call_sid}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- # Initialize session for this call
51
- call_sessions[call_sid] = {
52
- 'caller': caller,
53
- 'start_time': datetime.now().isoformat(),
54
- 'conversation': []
55
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- response = VoiceResponse()
 
 
 
 
 
58
 
59
- # Welcome message
60
- response.say("Hello! Thank you for calling. How can I help you today?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- # Gather speech input
63
- gather = Gather(input='speech',
64
- action='/process_speech',
65
- method='POST',
66
- speechTimeout='auto',
67
- speechModel='phone_call',
68
- language='en-US')
69
 
70
- response.append(gather)
 
 
71
 
72
- # If no input is received
73
- response.say("I didn't hear anything. Please call back when you're ready.")
74
- response.hangup()
75
 
76
- return str(response)
77
-
78
- def handle_call(audio_url, transcription):
79
- """Process the call audio and transcription directly without pipecat"""
80
  try:
81
- # Step 1: Use transcription or enhance with Whisper if needed
82
- if not transcription:
83
- transcription = transcribe_audio(audio_url)
84
 
85
- # Step 2: Classify intent
86
- intent, confidence = classify_intent(transcription)
87
 
88
- # Step 3: Generate response
89
- if confidence >= 0.6 and transcription:
90
- response = get_rag_response(transcription, intent, HF_SPACE_URL)
 
 
 
 
 
 
 
 
 
 
91
  else:
92
- response = get_fallback_response()
93
-
94
- return {
95
- "transcription": transcription,
96
- "intent": intent,
97
- "confidence": confidence,
98
- "response": response
99
- }
100
  except Exception as e:
101
- logger.error(f"Error handling call: {e}")
102
- return {
103
- "transcription": transcription,
104
- "intent": "error",
105
- "confidence": 0.0,
106
- "response": get_fallback_response()
107
- }
108
-
109
- @app.route("/process_speech", methods=['POST'])
110
- def process_speech():
111
- """Process speech input from the caller"""
112
- call_sid = request.values.get('CallSid')
113
- speech_result = request.values.get('SpeechResult', '')
114
-
115
- logger.info(f"Call {call_sid} - Speech input: {speech_result}")
116
-
117
- if call_sid in call_sessions:
118
- call_sessions[call_sid]['conversation'].append({
119
- 'role': 'user',
120
- 'content': speech_result,
121
- 'timestamp': datetime.now().isoformat()
122
- })
123
-
124
- # Get the recording URL if available
125
- recording_url = request.values.get('RecordingUrl')
126
-
127
- # Process the call without pipecat
128
- call_result = handle_call(recording_url, speech_result)
129
-
130
- # Create response
131
- response = VoiceResponse()
132
-
133
- # Use the generated response
134
- ai_response = call_result["response"]
135
- response.say(ai_response)
136
-
137
- if call_sid in call_sessions:
138
- call_sessions[call_sid]['conversation'].append({
139
- 'role': 'assistant',
140
- 'content': ai_response,
141
- 'timestamp': datetime.now().isoformat()
142
- })
143
-
144
- # Ask if there's anything else
145
- response.redirect('/anything_else')
146
-
147
- return str(response)
148
 
149
- @app.route("/anything_else", methods=['POST'])
150
- def anything_else():
151
- """Ask if there's anything else the caller needs help with"""
152
- call_sid = request.values.get('CallSid')
153
-
154
- response = VoiceResponse()
155
-
156
- response.say("Is there anything else I can help you with today?")
157
 
158
- # Gather speech input
159
- gather = Gather(input='speech',
160
- action='/handle_followup',
161
- method='POST',
162
- speechTimeout='auto',
163
- speechModel='phone_call')
164
- response.append(gather)
165
 
166
- # If no input is received
167
- response.say("Thank you for calling. Have a great day!")
168
- response.hangup()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- return str(response)
 
 
 
 
 
 
 
 
 
 
171
 
172
- @app.route("/handle_followup", methods=['POST'])
173
- def handle_followup():
174
- """Handle the caller's follow-up response"""
175
- call_sid = request.values.get('CallSid')
176
- speech_result = request.values.get('SpeechResult', '')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
- logger.info(f"Call {call_sid} - Follow-up response: {speech_result}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
- # Simple yes/no detection
181
- if any(word in speech_result.lower() for word in ["yes", "yeah", "sure", "please", "correct"]):
182
- response = VoiceResponse()
183
- response.say("Great! How else can I help you today?")
184
-
185
- # Gather speech input again
186
- gather = Gather(input='speech',
187
- action='/process_speech',
188
- method='POST',
189
- speechTimeout='auto',
190
- speechModel='phone_call')
191
- response.append(gather)
192
-
193
- # If no input is received
194
- response.say("I didn't hear anything. Thank you for calling. Goodbye!")
195
- response.hangup()
196
 
197
- else:
198
- response = VoiceResponse()
199
- response.say("Thank you for calling. Have a great day!")
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- # Save conversation log before hanging up
202
- if call_sid in call_sessions:
203
- call_sessions[call_sid]['end_time'] = datetime.now().isoformat()
204
- # In production, save this to a database
205
- logger.info(f"Call {call_sid} completed - Conversation log saved")
206
 
207
- response.hangup()
 
 
 
 
208
 
209
- return str(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
- @app.route("/call_status", methods=['POST'])
212
- def call_status():
213
- """Handle call status updates from Twilio"""
214
- call_sid = request.values.get('CallSid')
215
- call_status = request.values.get('CallStatus')
216
-
217
- logger.info(f"Call {call_sid} status: {call_status}")
218
-
219
- # If call ended abnormally, save the log
220
- if call_status in ['completed', 'busy', 'failed', 'no-answer', 'canceled']:
221
- if call_sid in call_sessions:
222
- call_sessions[call_sid]['end_time'] = datetime.now().isoformat()
223
- call_sessions[call_sid]['final_status'] = call_status
224
- # In production, save this to a database
225
-
226
- return "OK"
 
227
 
228
- if __name__ == "__main__":
229
- app.run(debug=True, host='0.0.0.0', port=5000)
 
 
1
+ import gradio as gr
 
 
 
 
 
2
  import os
3
+ import tempfile
4
  import json
5
+ import requests
6
+ import base64
7
+ from pathlib import Path
8
+ from transformers import pipeline
9
+ from langchain_community.document_loaders import PyPDFLoader, WebBaseLoader
10
+ from langchain_community.vectorstores import FAISS
11
+ from langchain_community.embeddings import HuggingFaceEmbeddings
12
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
13
+
14
+ # Define paths
15
+ DOCUMENTS_DIR = Path("documents")
16
+ DOCUMENTS_DIR.mkdir(exist_ok=True)
17
+ VECTOR_DB_PATH = Path("vector_db")
18
+
19
+ # Initialize models
20
+ model_name = "sentence-transformers/all-MiniLM-L6-v2"
21
+ embeddings = HuggingFaceEmbeddings(model_name=model_name)
22
+
23
+ # Initialize vector store
24
+ if VECTOR_DB_PATH.exists():
25
+ try:
26
+ vector_db = FAISS.load_local(str(VECTOR_DB_PATH), embeddings)
27
+ print("Loaded existing vector database.")
28
+ except Exception as e:
29
+ print(f"Error loading vector database: {e}")
30
+ vector_db = None
31
+ else:
32
+ vector_db = None
33
+
34
+ # Define possible intents
35
+ POSSIBLE_INTENTS = [
36
+ "product_inquiry",
37
+ "technical_support",
38
+ "billing_question",
39
+ "general_information",
40
+ "appointment_scheduling",
41
+ "complaint",
42
+ "other"
43
+ ]
44
+
45
+ # Default responses for when RAG fails or no documents are available
46
+ DEFAULT_RESPONSES = {
47
+ "product_inquiry": "Thank you for your interest in our products. I'll gather the information and have someone contact you with more details.",
48
+ "technical_support": "I understand you're experiencing technical issues. Let me find the right person to help you resolve this.",
49
+ "billing_question": "Thank you for your billing inquiry. I'll connect you with our billing department for assistance.",
50
+ "general_information": "Thank you for reaching out. I'll make sure you get the information you need.",
51
+ "appointment_scheduling": "I'd be happy to help schedule an appointment for you. Let me find the next available slot.",
52
+ "complaint": "I'm sorry to hear about your experience. Your feedback is important to us, and we'll address this promptly.",
53
+ "other": "Thank you for your call. I'll make sure your message gets to the right person."
54
+ }
55
+
56
+ # Create a classifier
57
+ try:
58
+ classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
59
+ except Exception as e:
60
+ print(f"Error loading classifier: {e}")
61
+ classifier = None
62
+
63
+ def classify_intent(text):
64
+ """Classify the intent of the user's message"""
65
+ if not text or not classifier:
66
+ return "other", 0.0
67
 
68
+ try:
69
+ results = classifier(
70
+ text,
71
+ candidate_labels=POSSIBLE_INTENTS,
72
+ hypothesis_template="This is a {} request."
73
+ )
74
+
75
+ top_intent = results["labels"][0]
76
+ confidence = results["scores"][0]
77
+
78
+ return top_intent, confidence
79
+ except Exception as e:
80
+ print(f"Error classifying intent: {e}")
81
+ return "other", 0.0
82
+
83
+ def load_pdf(file):
84
+ """Load a PDF document into the vector store"""
85
+ global vector_db
86
 
87
+ try:
88
+ # Save the uploaded file temporarily
89
+ temp_dir = tempfile.mkdtemp()
90
+ temp_path = os.path.join(temp_dir, file.name)
91
+
92
+ with open(temp_path, "wb") as f:
93
+ f.write(file.read())
94
+
95
+ # Save a copy to the documents directory
96
+ target_path = os.path.join(DOCUMENTS_DIR, file.name)
97
+ with open(target_path, "wb") as f:
98
+ with open(temp_path, "rb") as src:
99
+ f.write(src.read())
100
+
101
+ # Load and process the PDF
102
+ loader = PyPDFLoader(temp_path)
103
+ documents = loader.load()
104
+
105
+ # Split the documents
106
+ text_splitter = RecursiveCharacterTextSplitter(
107
+ chunk_size=1000,
108
+ chunk_overlap=200
109
+ )
110
+ chunks = text_splitter.split_documents(documents)
111
+
112
+ # Update or create vector store
113
+ if vector_db is None:
114
+ vector_db = FAISS.from_documents(chunks, embeddings)
115
+ vector_db.save_local(str(VECTOR_DB_PATH))
116
+ else:
117
+ vector_db.add_documents(chunks)
118
+ vector_db.save_local(str(VECTOR_DB_PATH))
119
+
120
+ return f"Successfully added {file.name} to the knowledge base with {len(chunks)} chunks."
121
 
122
+ except Exception as e:
123
+ return f"Error processing PDF: {str(e)}"
124
+
125
+ def load_website(url):
126
+ """Load a website into the vector store"""
127
+ global vector_db
128
 
129
+ try:
130
+ # Load content from website
131
+ loader = WebBaseLoader(url)
132
+ documents = loader.load()
133
+
134
+ # Save the URL reference
135
+ with open(os.path.join(DOCUMENTS_DIR, "websites.txt"), "a") as f:
136
+ f.write(f"{url}\n")
137
+
138
+ # Split the documents
139
+ text_splitter = RecursiveCharacterTextSplitter(
140
+ chunk_size=1000,
141
+ chunk_overlap=200
142
+ )
143
+ chunks = text_splitter.split_documents(documents)
144
+
145
+ # Update or create vector store
146
+ if vector_db is None:
147
+ vector_db = FAISS.from_documents(chunks, embeddings)
148
+ vector_db.save_local(str(VECTOR_DB_PATH))
149
+ else:
150
+ vector_db.add_documents(chunks)
151
+ vector_db.save_local(str(VECTOR_DB_PATH))
152
+
153
+ return f"Successfully added {url} to the knowledge base with {len(chunks)} chunks."
154
 
155
+ except Exception as e:
156
+ return f"Error processing website: {str(e)}"
157
+
158
+ def generate_response(query, intent=None):
159
+ """Generate a response based on the query and intent"""
160
+ global vector_db
 
161
 
162
+ # If no intent provided, use a default
163
+ if not intent or intent not in POSSIBLE_INTENTS:
164
+ intent = "general_information"
165
 
166
+ # If no vector database, return default response
167
+ if vector_db is None:
168
+ return DEFAULT_RESPONSES.get(intent, DEFAULT_RESPONSES["other"])
169
 
 
 
 
 
170
  try:
171
+ # Query the vector database
172
+ retrieved_docs = vector_db.similarity_search(query, k=3)
 
173
 
174
+ if not retrieved_docs:
175
+ return DEFAULT_RESPONSES.get(intent, DEFAULT_RESPONSES["other"])
176
 
177
+ # Combine retrieved document chunks
178
+ context = "\n\n".join([doc.page_content for doc in retrieved_docs])
179
+
180
+ # Simple response generation by combining context with templates
181
+ if len(context) > 10:
182
+ if intent == "product_inquiry":
183
+ return f"Based on the information I have: {context[:300]}... Would you like to know more specific details?"
184
+ elif intent == "technical_support":
185
+ return f"I found some information that might help with your issue: {context[:300]}... Is there a specific part you'd like me to explain further?"
186
+ elif intent == "billing_question":
187
+ return f"Regarding your billing question: {context[:300]}... Would you like me to connect you with our billing department for more details?"
188
+ else:
189
+ return f"Here's what I found that might help answer your question: {context[:300]}... Is there anything specific you'd like me to clarify?"
190
  else:
191
+ return DEFAULT_RESPONSES.get(intent, DEFAULT_RESPONSES["other"])
192
+
 
 
 
 
 
 
193
  except Exception as e:
194
+ print(f"Error generating response: {e}")
195
+ return DEFAULT_RESPONSES.get(intent, DEFAULT_RESPONSES["other"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
+ def list_documents():
198
+ """List all documents in the knowledge base"""
199
+ files = list(DOCUMENTS_DIR.glob("*.pdf"))
 
 
 
 
 
200
 
201
+ # Add websites if available
202
+ website_file = DOCUMENTS_DIR / "websites.txt"
203
+ websites = []
204
+ if website_file.exists():
205
+ with open(website_file, "r") as f:
206
+ websites = [line.strip() for line in f if line.strip()]
 
207
 
208
+ return {
209
+ "PDFs": [f.name for f in files],
210
+ "Websites": websites
211
+ }
212
+
213
+ # Special handler for Twilio
214
+ def handle_twilio_request(data):
215
+ """Process Twilio request data"""
216
+ try:
217
+ # Extract relevant information from Twilio data
218
+ if "SpeechResult" in data:
219
+ # This is a speech transcription
220
+ query = data.get("SpeechResult", "")
221
+ intent, _ = classify_intent(query)
222
+ response = generate_response(query, intent)
223
+
224
+ # Create TwiML response
225
+ twiml = f"""<?xml version="1.0" encoding="UTF-8"?>
226
+ <Response>
227
+ <Say>{response}</Say>
228
+ <Pause length="1"/>
229
+ <Say>Is there anything else I can help you with today?</Say>
230
+ <Gather input="speech" action="https://huggingface.co/spaces/iajitpanday/vBot-1.5/api/twilio/followup" method="POST" speechTimeout="auto" speechModel="phone_call"/>
231
+ <Say>Thank you for calling. Have a great day!</Say>
232
+ </Response>
233
+ """
234
+ return twiml
235
+
236
+ elif "TranscriptionText" in data:
237
+ # This is a transcription callback
238
+ query = data.get("TranscriptionText", "")
239
+ intent, _ = classify_intent(query)
240
+ response = generate_response(query, intent)
241
+
242
+ # Create SMS response using Twilio API
243
+ # Note: This requires Twilio credentials which we're avoiding
244
+ return f"Response would be sent via SMS: {response}"
245
+
246
+ elif "CallStatus" in data and data.get("CallStatus") == "ringing":
247
+ # Initial call handling
248
+ twiml = """<?xml version="1.0" encoding="UTF-8"?>
249
+ <Response>
250
+ <Say>Hello! Thank you for calling. How can I help you today?</Say>
251
+ <Gather input="speech" action="https://huggingface.co/spaces/iajitpanday/vBot-1.5/api/twilio/speech" method="POST" speechTimeout="auto" speechModel="phone_call"/>
252
+ <Say>I didn't hear anything. Please call back when you're ready.</Say>
253
+ </Response>
254
+ """
255
+ return twiml
256
+
257
+ else:
258
+ # Follow-up or fallback
259
+ twiml = """<?xml version="1.0" encoding="UTF-8"?>
260
+ <Response>
261
+ <Say>Thank you for your call. I've recorded your message and will process it shortly.</Say>
262
+ </Response>
263
+ """
264
+ return twiml
265
+
266
+ except Exception as e:
267
+ print(f"Error processing Twilio request: {e}")
268
+ # Return a generic TwiML response
269
+ twiml = """<?xml version="1.0" encoding="UTF-8"?>
270
+ <Response>
271
+ <Say>I'm sorry, I encountered an error processing your request. Please try again later.</Say>
272
+ </Response>
273
+ """
274
+ return twiml
275
+
276
+ # API endpoints for Twilio
277
+ def twilio_speech_handler(query):
278
+ """API endpoint for Twilio speech processing"""
279
+ # Process the query
280
+ intent, _ = classify_intent(query)
281
+ response = generate_response(query, intent)
282
 
283
+ # Create TwiML response
284
+ twiml = f"""<?xml version="1.0" encoding="UTF-8"?>
285
+ <Response>
286
+ <Say>{response}</Say>
287
+ <Pause length="1"/>
288
+ <Say>Is there anything else I can help you with today?</Say>
289
+ <Gather input="speech" action="https://huggingface.co/spaces/iajitpanday/vBot-1.5/api/twilio/followup" method="POST" speechTimeout="auto" speechModel="phone_call"/>
290
+ <Say>Thank you for calling. Have a great day!</Say>
291
+ </Response>
292
+ """
293
+ return twiml
294
 
295
+ def twilio_followup_handler(query):
296
+ """API endpoint for Twilio follow-up handling"""
297
+ if any(word in query.lower() for word in ["yes", "yeah", "sure", "please", "correct"]):
298
+ twiml = """<?xml version="1.0" encoding="UTF-8"?>
299
+ <Response>
300
+ <Say>Great! How else can I help you today?</Say>
301
+ <Gather input="speech" action="https://huggingface.co/spaces/iajitpanday/vBot-1.5/api/twilio/speech" method="POST" speechTimeout="auto" speechModel="phone_call"/>
302
+ <Say>I didn't hear anything. Thank you for calling. Goodbye!</Say>
303
+ </Response>
304
+ """
305
+ else:
306
+ twiml = """<?xml version="1.0" encoding="UTF-8"?>
307
+ <Response>
308
+ <Say>Thank you for calling. Have a great day!</Say>
309
+ </Response>
310
+ """
311
+ return twiml
312
+
313
+ def twilio_call_handler():
314
+ """API endpoint for initial Twilio call handling"""
315
+ twiml = """<?xml version="1.0" encoding="UTF-8"?>
316
+ <Response>
317
+ <Say>Hello! Thank you for calling. How can I help you today?</Say>
318
+ <Gather input="speech" action="https://huggingface.co/spaces/iajitpanday/vBot-1.5/api/twilio/speech" method="POST" speechTimeout="auto" speechModel="phone_call"/>
319
+ <Say>I didn't hear anything. Please call back when you're ready.</Say>
320
+ </Response>
321
+ """
322
+ return twiml
323
+
324
+ # Create Gradio interface
325
+ with gr.Blocks(title="Call Assistant RAG System") as demo:
326
+ gr.Markdown("# Call Assistant RAG System")
327
+ gr.Markdown("Add documents and websites to the knowledge base, and test the response generation.")
328
 
329
+ with gr.Tab("Add Knowledge"):
330
+ with gr.Row():
331
+ with gr.Column():
332
+ pdf_input = gr.File(label="Upload PDF Document")
333
+ pdf_button = gr.Button("Add PDF to Knowledge Base")
334
+ pdf_output = gr.Textbox(label="PDF Upload Status")
335
+
336
+ pdf_button.click(
337
+ load_pdf,
338
+ inputs=[pdf_input],
339
+ outputs=[pdf_output]
340
+ )
341
+
342
+ with gr.Column():
343
+ url_input = gr.Textbox(label="Website URL")
344
+ url_button = gr.Button("Add Website to Knowledge Base")
345
+ url_output = gr.Textbox(label="Website Status")
346
+
347
+ url_button.click(
348
+ load_website,
349
+ inputs=[url_input],
350
+ outputs=[url_output]
351
+ )
352
 
353
+ with gr.Tab("Knowledge Base"):
354
+ list_button = gr.Button("List Documents in Knowledge Base")
355
+ knowledge_output = gr.JSON(label="Documents")
 
 
 
 
 
 
 
 
 
 
 
 
 
356
 
357
+ list_button.click(
358
+ list_documents,
359
+ inputs=[],
360
+ outputs=[knowledge_output]
361
+ )
362
+
363
+ with gr.Tab("Test Response Generation"):
364
+ with gr.Row():
365
+ query_input = gr.Textbox(label="Query / Transcription")
366
+ intent_input = gr.Dropdown(
367
+ choices=POSSIBLE_INTENTS,
368
+ label="Intent",
369
+ value="general_information"
370
+ )
371
 
372
+ test_button = gr.Button("Generate Response")
373
+ response_output = gr.Textbox(label="Generated Response")
 
 
 
374
 
375
+ test_button.click(
376
+ generate_response,
377
+ inputs=[query_input, intent_input],
378
+ outputs=[response_output]
379
+ )
380
 
381
+ with gr.Tab("Twilio Integration"):
382
+ gr.Markdown("""
383
+ ## Twilio Integration Instructions
384
+
385
+ This Gradio app provides API endpoints for Twilio integration. Follow these steps to set up:
386
+
387
+ 1. Log into your Twilio account
388
+ 2. Go to Phone Numbers → Manage → Active numbers
389
+ 3. Select your number (+19704064410)
390
+ 4. For "A Call Comes In", select "Webhook" and enter:
391
+ - URL: `https://huggingface.co/spaces/iajitpanday/vBot-1.5/api/twilio/call`
392
+ - Method: HTTP POST
393
+
394
+ The system will automatically:
395
+ - Answer incoming calls
396
+ - Process speech input
397
+ - Generate responses using your knowledge base
398
+ - Handle follow-up questions
399
+ """)
400
+
401
+ gr.Markdown("""
402
+ ## API Documentation
403
+
404
+ This app exposes several API endpoints for Twilio integration:
405
+
406
+ 1. `/api/twilio/call` - Initial call handling
407
+ 2. `/api/twilio/speech` - Processes speech input
408
+ 3. `/api/twilio/followup` - Handles follow-up responses
409
+
410
+ All endpoints return TwiML responses that Twilio can understand.
411
+ """)
412
 
413
+ # Define API functions (these are needed for Gradio API endpoints)
414
+ def api_response(query, intent=None):
415
+ """Standard API function for response generation"""
416
+ response = generate_response(query, intent)
417
+ return [response]
418
+
419
+ def api_twilio_call():
420
+ """API function for initial Twilio call handling"""
421
+ return twilio_call_handler()
422
+
423
+ def api_twilio_speech(speech_result=None):
424
+ """API function for Twilio speech processing"""
425
+ return twilio_speech_handler(speech_result)
426
+
427
+ def api_twilio_followup(speech_result=None):
428
+ """API function for Twilio follow-up handling"""
429
+ return twilio_followup_handler(speech_result)
430
 
431
+ # Mount these functions as API endpoints
432
+ demo.queue()
433
+ demo.launch()