iajitpanday commited on
Commit
371de18
·
verified ·
1 Parent(s): d1b7cba

Rename utils.py to twilio_webhook.py

Browse files
Files changed (2) hide show
  1. twilio_webhook.py +60 -0
  2. utils.py +0 -50
twilio_webhook.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # twilio_webhook.py
2
+ from flask import Flask, request
3
+ from twilio.rest import Client
4
+ from twilio.twiml.voice_response import VoiceResponse
5
+ import requests
6
+
7
+ app = Flask(__name__)
8
+
9
+ @app.route("/voice", methods=['GET', 'POST'])
10
+ def voice():
11
+ # Create TwiML response
12
+ resp = VoiceResponse()
13
+
14
+ # Greet the caller
15
+ resp.say("Hello! Welcome to our customer support. Please state your query.")
16
+
17
+ # Record the caller's message
18
+ resp.record(
19
+ action='/handle_recording',
20
+ method='POST',
21
+ max_length=30,
22
+ finish_on_key='#'
23
+ )
24
+
25
+ return str(resp)
26
+
27
+ @app.route("/handle_recording", methods=['POST'])
28
+ def handle_recording():
29
+ # Get the recording URL from Twilio
30
+ recording_url = request.values.get('RecordingUrl')
31
+
32
+ # Download the recording
33
+ audio_response = requests.get(recording_url)
34
+
35
+ # Send to Hugging Face Space for processing
36
+ hf_space_url = "YOUR_HUGGING_FACE_SPACE_URL/api/predict"
37
+
38
+ files = {"data": [audio_response.content]}
39
+ response = requests.post(hf_space_url, json=files)
40
+
41
+ # Get the AI response
42
+ ai_response = response.json()
43
+
44
+ # Create TwiML to speak the response
45
+ resp = VoiceResponse()
46
+ resp.say(ai_response['data'][0])
47
+
48
+ # Ask if they need more help
49
+ resp.say("Is there anything else I can help you with?")
50
+ resp.record(
51
+ action='/handle_recording',
52
+ method='POST',
53
+ max_length=30,
54
+ finish_on_key='#'
55
+ )
56
+
57
+ return str(resp)
58
+
59
+ if __name__ == "__main__":
60
+ app.run(debug=True)
utils.py DELETED
@@ -1,50 +0,0 @@
1
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, VitsModel
2
- import soundfile as sf
3
- import torch
4
- import io
5
- import os
6
-
7
- # Speech-to-Text (Whisper)
8
- def transcribe_audio(audio_path):
9
- try:
10
- whisper = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
11
- audio, sample_rate = sf.read(audio_path)
12
- if sample_rate != 8000: # Convert to 8kHz for Twilio compatibility
13
- audio = sf.read(audio_path, samplerate=8000)[0]
14
- sf.write(audio_path, audio, 8000)
15
- result = whisper(audio_path)
16
- return result["text"]
17
- except Exception as e:
18
- print(f"STT Error: {e}")
19
- return "Sorry, I couldn't understand that."
20
-
21
- # NLP (Falcon-7B-Instruct)
22
- def generate_response(text):
23
- try:
24
- tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct")
25
- model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct")
26
- prompt = (
27
- "You are a polite and helpful customer support agent. Respond professionally.\n"
28
- f"User: {text}\nAgent:"
29
- )
30
- inputs = tokenizer(prompt, return_tensors="pt")
31
- outputs = model.generate(**inputs, max_length=200, do_sample=True, top_p=0.9)
32
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
- return response.split("Agent:")[1].strip()
34
- except Exception as e:
35
- print(f"NLP Error: {e}")
36
- return "I'm having trouble processing your request. Please try again."
37
-
38
- # Text-to-Speech (VITS)
39
- def text_to_speech(text, output_path="output.wav"):
40
- try:
41
- tts_model = VitsModel.from_pretrained("facebook/mms-tts-eng")
42
- tts_tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")
43
- inputs = tts_tokenizer(text, return_tensors="pt")
44
- with torch.no_grad():
45
- waveform = tts_model(**inputs).waveform
46
- sf.write(output_path, waveform.squeeze().numpy(), 8000) # 8kHz for Twilio
47
- return output_path
48
- except Exception as e:
49
- print(f"TTS Error: {e}")
50
- return None