Hamxa1997 commited on
Commit
047777e
·
verified ·
1 Parent(s): dcc7060

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -3,13 +3,14 @@ import os
3
  import whisper
4
  import torch
5
  from gtts import gTTS
 
6
  from sentence_transformers import SentenceTransformer
7
  import faiss
8
  import pandas as pd
9
  from datasets import load_dataset
10
  from deep_translator import GoogleTranslator
11
  from langdetect import detect
12
- #import groq # Correct import for Groq API
13
 
14
  # Set up Whisper with a smaller model or on CPU
15
  model_name = "small" # Use "small", "base", or "medium" for smaller models
@@ -31,10 +32,10 @@ embedder = SentenceTransformer('paraphrase-MiniLM-L6-v2')
31
  dataset_embeddings = embedder.encode(train_dataset['question'], convert_to_tensor=True)
32
  index = faiss.IndexFlatL2(dataset_embeddings.shape[1]) # Create an index based on L2 distance
33
  index.add(dataset_embeddings.cpu().numpy()) # Add the embeddings to the index
34
- api_key = os.getenv("api_key")
35
  # Set up Groq API with direct API key
36
 
37
- #client = groq(api_key=api_key)
38
  import torch
39
  from transformers import pipeline
40
  from langdetect import detect
@@ -263,7 +264,7 @@ with gr.Blocks(css=custom_css) as iface:
263
  gr.Markdown("<h1>Multilingual Customer Service Chatbot</h1>")
264
  gr.Markdown("<h2>Ask your questions</h2>")
265
  gr.Markdown("<p class='instructions'>If you type in Urdu, it will respond in Urdu. If in English, it will respond in English. Same with voice.</p>")
266
-
267
  with gr.Row():
268
  with gr.Column():
269
  text_input = gr.Textbox(lines=2, placeholder="Type your query here...", label="Text Input (Optional)")
@@ -272,11 +273,11 @@ with gr.Blocks(css=custom_css) as iface:
272
  transcription_output = gr.Textbox(label="Transcription") # Add transcription output
273
  response_text = gr.Textbox(label="Chatbot Response")
274
  response_audio = gr.Audio(label="Response Audio (if applicable)")
275
-
276
  with gr.Row():
277
  submit_btn = gr.Button("Submit", elem_id="submit-btn", variant="primary")
278
  clear_btn = gr.Button("Clear", elem_id="clear-btn", variant="secondary")
279
-
280
  submit_btn.click(chatbot, inputs=[text_input, audio_input], outputs=[transcription_output, response_text, response_audio])
281
  clear_btn.click(lambda: (None, None, None, None, None), inputs=[], outputs=[text_input, audio_input, transcription_output, response_text, response_audio])
282
 
 
3
  import whisper
4
  import torch
5
  from gtts import gTTS
6
+ import IPython.display as ipd
7
  from sentence_transformers import SentenceTransformer
8
  import faiss
9
  import pandas as pd
10
  from datasets import load_dataset
11
  from deep_translator import GoogleTranslator
12
  from langdetect import detect
13
+ from groq import Groq # Correct import for Groq API
14
 
15
  # Set up Whisper with a smaller model or on CPU
16
  model_name = "small" # Use "small", "base", or "medium" for smaller models
 
32
  dataset_embeddings = embedder.encode(train_dataset['question'], convert_to_tensor=True)
33
  index = faiss.IndexFlatL2(dataset_embeddings.shape[1]) # Create an index based on L2 distance
34
  index.add(dataset_embeddings.cpu().numpy()) # Add the embeddings to the index
35
+
36
  # Set up Groq API with direct API key
37
 
38
+ client = Groq(api_key=api_key)
39
  import torch
40
  from transformers import pipeline
41
  from langdetect import detect
 
264
  gr.Markdown("<h1>Multilingual Customer Service Chatbot</h1>")
265
  gr.Markdown("<h2>Ask your questions</h2>")
266
  gr.Markdown("<p class='instructions'>If you type in Urdu, it will respond in Urdu. If in English, it will respond in English. Same with voice.</p>")
267
+
268
  with gr.Row():
269
  with gr.Column():
270
  text_input = gr.Textbox(lines=2, placeholder="Type your query here...", label="Text Input (Optional)")
 
273
  transcription_output = gr.Textbox(label="Transcription") # Add transcription output
274
  response_text = gr.Textbox(label="Chatbot Response")
275
  response_audio = gr.Audio(label="Response Audio (if applicable)")
276
+
277
  with gr.Row():
278
  submit_btn = gr.Button("Submit", elem_id="submit-btn", variant="primary")
279
  clear_btn = gr.Button("Clear", elem_id="clear-btn", variant="secondary")
280
+
281
  submit_btn.click(chatbot, inputs=[text_input, audio_input], outputs=[transcription_output, response_text, response_audio])
282
  clear_btn.click(lambda: (None, None, None, None, None), inputs=[], outputs=[text_input, audio_input, transcription_output, response_text, response_audio])
283