mishiawan commited on
Commit
ce6080c
·
verified ·
1 Parent(s): e356c8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -54
app.py CHANGED
@@ -1,55 +1,54 @@
1
- import streamlit as st
2
- from transformers import pipeline
3
-
4
- # Title of the application
5
- st.title("AI Code Bot")
6
-
7
- # Introduction
8
- st.write("""
9
- ### Generate Code Snippets with AI
10
- Enter a programming-related prompt, and the AI will generate a code snippet with explanatory comments.
11
- """)
12
-
13
- # Input box for user query
14
- user_input = st.text_area("Enter your prompt (e.g., 'Write a Python function to reverse a list'):")
15
-
16
- # Load the Hugging Face pipeline (code generation)
17
- @st.cache_resource
18
- def load_model():
19
- # Authenticate automatically in Hugging Face Spaces
20
- model = pipeline(
21
- "text-generation",
22
- model="EleutherAI/gpt-neo-1.3B" # The model to use
 
 
23
  )
24
- return model
25
-
26
- model = load_model()
27
-
28
- # When the user clicks the button
29
- if st.button("Generate Code"):
30
- if user_input.strip() == "":
31
- st.warning("Please enter a prompt.")
32
- else:
33
- with st.spinner("Generating code..."):
34
- try:
35
- # Generate code using the model
36
- result = model(user_input, max_length=200, num_return_sequences=1)
37
- generated_text = result[0]["generated_text"]
38
-
39
- # Split comments and code for better presentation
40
- lines = generated_text.split("\n")
41
- comments = [line for line in lines if line.strip().startswith("#")]
42
- code = [line for line in lines if not line.strip().startswith("#")]
43
-
44
- # Display comments as markdown
45
- if comments:
46
- st.subheader("Comments")
47
- for comment in comments:
48
- st.markdown(comment)
49
-
50
- # Display code as Python syntax-highlighted block
51
- if code:
52
- st.subheader("Code")
53
- st.code("\n".join(code), language="python")
54
- except Exception as e:
55
- st.error(f"An error occurred: {e}")
 
1
+ import os
2
+ import whisper
3
+ from gtts import gTTS
4
+ from groq import Groq
5
+ import gradio as gr
6
+ from tempfile import NamedTemporaryFile
7
+
8
+ # Set up Groq client
9
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
10
+
11
+ # Load OpenAI Whisper model
12
+ whisper_model = whisper.load_model("base")
13
+
14
+ def voice_to_voice_chat(audio_file):
15
+ # Step 1: Transcribe Audio Input
16
+ transcription = whisper_model.transcribe(audio_file)["text"]
17
+
18
+ # Step 2: Query Groq's LLM
19
+ chat_completion = client.chat.completions.create(
20
+ messages=[
21
+ {"role": "user", "content": transcription}
22
+ ],
23
+ model="llama3-8b-8192",
24
+ stream=False,
25
  )
26
+ llm_response = chat_completion.choices[0].message.content
27
+
28
+ # Step 3: Convert LLM Response to Audio
29
+ tts = gTTS(llm_response)
30
+ audio_output = NamedTemporaryFile(delete=False, suffix=".mp3")
31
+ tts.save(audio_output.name)
32
+
33
+ return llm_response, audio_output.name
34
+
35
+ # Gradio Interface
36
+ def chatbot_interface(audio_input):
37
+ response, audio_response_file = voice_to_voice_chat(audio_input)
38
+ return response, audio_response_file
39
+
40
+ # Build Gradio App
41
+ interface = gr.Interface(
42
+ fn=chatbot_interface,
43
+ inputs=gr.Audio(source="microphone", type="filepath", label="Speak into the Microphone"),
44
+ outputs=[
45
+ gr.Textbox(label="Chatbot Response"),
46
+ gr.Audio(type="filepath", label="Voice Output")
47
+ ],
48
+ title="Real-Time Voice-to-Voice Chatbot",
49
+ description="Speak to the chatbot and get a spoken response!",
50
+ )
51
+
52
+ # Launch Interface
53
+ if __name__ == "__main__":
54
+ interface.launch()