meesamraza commited on
Commit
ad5e7c5
·
verified ·
1 Parent(s): 62adc9f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -0
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from groq import Groq
3
+ import streamlit as st
4
+ from dotenv import load_dotenv
5
+
6
+ # Load API key from .env file
7
+ load_dotenv()
8
+ api_key = os.getenv("GROQ_API_KEY")
9
+
10
+ # Initialize the Groq client
11
+ client = Groq(api_key=api_key)
12
+
13
+ # Define the programming development topics for the chatbot
14
+ developer_topics = [
15
+ "best programming languages", "web development frameworks", "version control with Git",
16
+ "debugging tips", "data structures and algorithms", "object-oriented programming",
17
+ "functional programming", "software design patterns", "API design and development",
18
+ "devops practices", "cloud computing", "front-end development", "back-end development",
19
+ "machine learning", "deep learning", "software testing and QA", "agile methodologies",
20
+ "CI/CD pipelines", "database design", "programming best practices", "security in development",
21
+ "mobile app development", "project management for developers", "open source contribution",
22
+ "developer tools and IDEs", "documentation and code commenting", "coding interview preparation"
23
+ ]
24
+
25
+ # Function to fetch chatbot completion from Groq API
26
+ def get_response(query):
27
+ completion = client.chat.completions.create(
28
+ model="llama-3.3-70b-versatile",
29
+ messages=[{"role": "user", "content": query}],
30
+ temperature=0.7,
31
+ max_completion_tokens=1024,
32
+ top_p=1,
33
+ )
34
+ response = completion.choices[0].message.content
35
+ return response
36
+
37
+ # Function to transcribe audio using Whisper model
38
+ def transcribe_audio(file):
39
+ transcription = client.audio.transcriptions.create(
40
+ file=(file.name, file.read()),
41
+ model="whisper-large-v3-turbo",
42
+ response_format="verbose_json"
43
+ )
44
+ return transcription.text
45
+
46
+ def main():
47
+ st.title("Programming Developer Advisor Chatbot")
48
+
49
+ # Let the user choose a developer-related topic or type a custom query
50
+ topic = st.selectbox("Choose a programming topic", developer_topics)
51
+ user_input = st.text_area("Or ask a programming-related question:", "")
52
+
53
+ # Audio file uploader
54
+ uploaded_file = st.file_uploader("Upload an audio file for transcription", type=["m4a", "mp3", "wav"])
55
+
56
+ # If an audio file is uploaded, transcribe it and use the transcription for querying
57
+ if uploaded_file is not None:
58
+ st.write("Transcribing the audio...")
59
+ transcription = transcribe_audio(uploaded_file)
60
+ st.write("Transcribed text:")
61
+ st.write(transcription)
62
+
63
+ # Use the transcribed text as the query for the chatbot if no custom query was provided
64
+ query = transcription if not user_input else user_input
65
+
66
+ # Get response from the chatbot based on the query
67
+ if query:
68
+ response = get_response(query)
69
+ st.write("### Response:")
70
+ st.write(response)
71
+
72
+ # If the user provides a query (not from audio), use that directly
73
+ elif user_input:
74
+ query = user_input
75
+ response = get_response(query)
76
+ st.write("### Response:")
77
+ st.write(response)
78
+
79
+ # Handle unrelated queries
80
+ if user_input and not any(topic in user_input.lower() for topic in developer_topics):
81
+ st.write("Sorry, I can only answer programming-related questions.")
82
+
83
+ if __name__ == "__main__":
84
+ main()