nagasurendra commited on
Commit
ddf45e9
·
verified ·
1 Parent(s): 402321e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -1
app.py CHANGED
@@ -94,7 +94,79 @@ html_code = """
94
  <div class="status" id="status">Press the mic button to start...</div>
95
  <div class="response" id="response">Response will appear here...</div>
96
  <script>
97
- // JS code remains unchanged
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  </script>
99
  </body>
100
  </html>
 
94
  <div class="status" id="status">Press the mic button to start...</div>
95
  <div class="response" id="response">Response will appear here...</div>
96
  <script>
97
+ const micButton = document.getElementById('mic-button');
98
+ const status = document.getElementById('status');
99
+ const response = document.getElementById('response');
100
+ let mediaRecorder;
101
+ let audioChunks = [];
102
+ let isConversationActive = false;
103
+ micButton.addEventListener('click', () => {
104
+ if (!isConversationActive) {
105
+ isConversationActive = true;
106
+ startConversation();
107
+ }
108
+ });
109
+ function startConversation() {
110
+ const utterance = new SpeechSynthesisUtterance('Please choose your preference: All, Vegetarian, Non-Vegetarian, or Guilt-Free.');
111
+ speechSynthesis.speak(utterance);
112
+ utterance.onend = () => {
113
+ status.textContent = 'Listening...';
114
+ startListening();
115
+ };
116
+ }
117
+ function startListening() {
118
+ navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
119
+ mediaRecorder = new MediaRecorder(stream, { mimeType: 'audio/webm;codecs=opus' });
120
+ mediaRecorder.start();
121
+ audioChunks = [];
122
+ mediaRecorder.ondataavailable = event => audioChunks.push(event.data);
123
+ mediaRecorder.onstop = async () => {
124
+ const audioBlob = new Blob(audioChunks, { type: 'audio/webm' });
125
+ const formData = new FormData();
126
+ formData.append('audio', audioBlob);
127
+ status.textContent = 'Processing...';
128
+ try {
129
+ const result = await fetch('/process-audio', { method: 'POST', body: formData });
130
+ const data = await result.json();
131
+ response.textContent = data.response;
132
+ response.style.display = 'block';
133
+ const utterance = new SpeechSynthesisUtterance(data.response);
134
+ speechSynthesis.speak(utterance);
135
+ utterance.onend = () => {
136
+ console.log("Speech synthesis completed.");
137
+ if (data.response.includes("Goodbye")) {
138
+ status.textContent = 'Conversation ended. Press the mic button to start again.';
139
+ isConversationActive = false;
140
+ fetch('/reset-cart'); // Reset the cart dynamically on end
141
+ } else if (data.response.includes("Your order is complete")) {
142
+ status.textContent = 'Order complete. Thank you for using AI Dining Assistant.';
143
+ isConversationActive = false;
144
+ fetch('/reset-cart'); // Reset the cart after final order
145
+ } else {
146
+ status.textContent = 'Listening...';
147
+ setTimeout(() => {
148
+ startListening();
149
+ }, 500);
150
+ }
151
+ };
152
+ utterance.onerror = (e) => {
153
+ console.error("Speech synthesis error:", e.error);
154
+ status.textContent = 'Error with speech output.';
155
+ isConversationActive = false;
156
+ };
157
+ } catch (error) {
158
+ response.textContent = 'Sorry, I could not understand. Please try again.';
159
+ response.style.display = 'block';
160
+ status.textContent = 'Press the mic button to restart the conversation.';
161
+ isConversationActive = false;
162
+ }
163
+ };
164
+ setTimeout(() => mediaRecorder.stop(), 5000);
165
+ }).catch(() => {
166
+ status.textContent = 'Microphone access denied.';
167
+ isConversationActive = false;
168
+ });
169
+ }
170
  </script>
171
  </body>
172
  </html>