rashid01 commited on
Commit
cb6544e
Β·
verified Β·
1 Parent(s): ddb6374

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -74
app.py CHANGED
@@ -1,23 +1,16 @@
1
  import streamlit as st
2
  import openai
3
  from langchain_google_genai import ChatGoogleGenerativeAI
4
- # Assuming you have a package or class for Groq integration. Check actual package/class.
5
- # from langchain_groq import ChatGroqGenerativeAI
6
  from datetime import datetime, timedelta
7
  import time
8
 
9
  # API keys
10
  GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
11
  OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
12
- GROQ_API_KEY = st.secrets["GROQ_API_KEY"] # Assume you have an API key for Groq
13
 
14
  # Initialize OpenAI
15
  openai.api_key = OPENAI_API_KEY
16
 
17
- # Initialize Groq
18
- # Uncomment and update if you find the correct class for Groq.
19
- # groq_llm = ChatGroqGenerativeAI(model="groq-pro", groq_api_key=GROQ_API_KEY)
20
-
21
  # In-memory storage for progress tracking
22
  progress_data = {
23
  "questions_solved": {
@@ -33,10 +26,6 @@ def get_llm(model_choice):
33
  return ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
34
  elif model_choice == "OpenAI":
35
  return None
36
- elif model_choice == "Groq":
37
- # Replace with actual initialization if you find the correct class.
38
- # return groq_llm
39
- raise ValueError("Groq LLM is not initialized. Please check the correct class.")
40
  else:
41
  raise ValueError("Unsupported model choice.")
42
 
@@ -51,15 +40,11 @@ def generate_questions(model_choice, role, question_type, num_questions, difficu
51
  prompt=prompt,
52
  max_tokens=150
53
  )
54
- return response.choices[0].text.strip().split('\n')
55
  elif model_choice == "Gemini":
56
  llm = get_llm(model_choice)
57
  response = llm.invoke(prompt)
58
- return response.content.split('\n')
59
- elif model_choice == "Groq":
60
- llm = get_llm(model_choice)
61
- response = llm.invoke(prompt)
62
- return response.content.split('\n')
63
  else:
64
  raise ValueError("Unsupported model choice.")
65
 
@@ -76,10 +61,6 @@ def provide_feedback(model_choice, answer):
76
  llm = get_llm(model_choice)
77
  response = llm.invoke(prompt)
78
  return response.content
79
- elif model_choice == "Groq":
80
- llm = get_llm(model_choice)
81
- response = llm.invoke(prompt)
82
- return response.content
83
  else:
84
  raise ValueError("Unsupported model choice.")
85
 
@@ -96,10 +77,6 @@ def get_tips(model_choice, role):
96
  llm = get_llm(model_choice)
97
  response = llm.invoke(prompt)
98
  return response.content
99
- elif model_choice == "Groq":
100
- llm = get_llm(model_choice)
101
- response = llm.invoke(prompt)
102
- return response.content
103
  else:
104
  raise ValueError("Unsupported model choice.")
105
 
@@ -293,56 +270,64 @@ with st.sidebar:
293
 
294
  st.title("πŸ’Ό TechPrep")
295
 
296
- def main():
297
- st.title("Interview Preparation Assistant")
298
-
299
- st.sidebar.header("Menu")
300
- selection = st.sidebar.radio("Choose an option", ["Generate Questions", "Provide Feedback", "Get Tips", "Schedule Mock Interview", "Track Progress", "Connect with Resources"])
301
-
302
- if selection == "Generate Questions":
303
- st.subheader("Generate Interview Questions")
304
- model_choice = st.selectbox("Select Model", ["OpenAI", "Gemini", "Groq"])
305
- role = st.text_input("Enter Role", "Software Engineer")
306
- question_type = st.selectbox("Select Question Type", ["Behavioral", "Technical", "Situational", "Case Study", "Problem Solving"])
307
- num_questions = st.slider("Number of Questions", 1, 10, 5)
308
- difficulty = st.selectbox("Select Difficulty", ["Easy", "Medium", "Hard"])
309
-
310
- if st.button("Generate Questions"):
311
- with st.spinner("Generating questions..."):
312
- questions = generate_questions(model_choice, role, question_type, num_questions, difficulty)
313
- if isinstance(questions, list):
314
- for question in questions:
315
- st.write(question)
316
- time.sleep(1) # Display questions one by one
317
-
318
- elif selection == "Provide Feedback":
319
- st.subheader("Provide Feedback on an Answer")
320
- model_choice = st.selectbox("Select Model", ["OpenAI", "Gemini", "Groq"])
321
- answer = st.text_area("Enter Interview Answer")
322
-
323
- if st.button("Get Feedback"):
 
 
 
324
  feedback = provide_feedback(model_choice, answer)
325
- st.write(style_output(feedback, "blue"))
 
326
  progress_data["feedback_provided"] += 1
327
 
328
- elif selection == "Get Tips":
329
- st.subheader("Get Interview Tips")
330
- model_choice = st.selectbox("Select Model", ["OpenAI", "Gemini", "Groq"])
331
- role = st.text_input("Enter Role", "Software Engineer")
332
-
333
- if st.button("Get Tips"):
334
- tips = get_tips(model_choice, role)
335
- st.write(style_output(tips, "green"))
336
- progress_data["tips_retrieved"] += 1
337
-
338
- elif selection == "Schedule Mock Interview":
339
- schedule_mock_interview()
340
-
341
- elif selection == "Track Progress":
342
- track_progress()
343
-
344
- elif selection == "Connect with Resources":
345
- connect_resources()
 
 
 
 
 
 
346
 
347
- if __name__ == "__main__":
348
- main()
 
1
  import streamlit as st
2
  import openai
3
  from langchain_google_genai import ChatGoogleGenerativeAI
 
 
4
  from datetime import datetime, timedelta
5
  import time
6
 
7
  # API keys
8
  GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"]
9
  OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"]
 
10
 
11
  # Initialize OpenAI
12
  openai.api_key = OPENAI_API_KEY
13
 
 
 
 
 
14
  # In-memory storage for progress tracking
15
  progress_data = {
16
  "questions_solved": {
 
26
  return ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
27
  elif model_choice == "OpenAI":
28
  return None
 
 
 
 
29
  else:
30
  raise ValueError("Unsupported model choice.")
31
 
 
40
  prompt=prompt,
41
  max_tokens=150
42
  )
43
+ return response.choices[0].text.strip()
44
  elif model_choice == "Gemini":
45
  llm = get_llm(model_choice)
46
  response = llm.invoke(prompt)
47
+ return response.content
 
 
 
 
48
  else:
49
  raise ValueError("Unsupported model choice.")
50
 
 
61
  llm = get_llm(model_choice)
62
  response = llm.invoke(prompt)
63
  return response.content
 
 
 
 
64
  else:
65
  raise ValueError("Unsupported model choice.")
66
 
 
77
  llm = get_llm(model_choice)
78
  response = llm.invoke(prompt)
79
  return response.content
 
 
 
 
80
  else:
81
  raise ValueError("Unsupported model choice.")
82
 
 
270
 
271
  st.title("πŸ’Ό TechPrep")
272
 
273
+ model_choice = st.selectbox("Select AI Model", ["Gemini", "OpenAI"], key="select_model")
274
+
275
+ # Updated role dropdown with 20 roles
276
+ roles = [
277
+ "Software Developer", "Data Analyst", "Marketing Manager", "Project Manager", "UX Designer",
278
+ "Machine Learning Engineer", "Data Scientist", "AI Researcher", "Product Manager", "Business Analyst",
279
+ "Web Developer", "Mobile App Developer", "Cloud Engineer", "DevOps Engineer", "System Analyst",
280
+ "Database Administrator", "Cybersecurity Specialist", "QA Engineer", "IT Support Specialist", "Network Engineer",
281
+ "Graphic Designer"
282
+ ]
283
+ role = st.selectbox("Select Role", roles, key="select_role")
284
+
285
+ question_type = st.selectbox("Select Question Type", ["Behavioral", "Technical", "Situational", "Case Study", "Problem Solving"], key="select_question_type")
286
+ num_questions = st.slider("Number of Questions", 1, 10, key="slider_num_questions")
287
+ difficulty = st.selectbox("Select Difficulty Level", ["Easy", "Medium", "Hard"], key="select_difficulty")
288
+
289
+ st.header("πŸ“ Generate Interview Questions")
290
+ if st.button("Generate Questions", key="generate_questions"):
291
+ with st.spinner("Generating questions..."):
292
+ questions = generate_questions(model_choice, role, question_type, num_questions, difficulty)
293
+ st.markdown(style_output("Questions Generated:", "#4CAF50"), unsafe_allow_html=True)
294
+ st.write(questions)
295
+ progress_data["questions_solved"][question_type] += num_questions
296
+
297
+ st.header("πŸ—£οΈ Provide Feedback")
298
+ answer = st.text_area("Submit Your Answer", key="text_area_answer")
299
+ if st.button("Submit Answer", key="submit_answer"):
300
+ if not answer:
301
+ st.error("Please enter an answer to receive feedback.")
302
+ else:
303
+ with st.spinner("Providing feedback..."):
304
  feedback = provide_feedback(model_choice, answer)
305
+ st.markdown(style_output("Feedback Received:", "#FF5722"), unsafe_allow_html=True)
306
+ st.write(feedback)
307
  progress_data["feedback_provided"] += 1
308
 
309
+ st.header("πŸ’‘ Interview Tips")
310
+ if st.button("Get Tips", key="get_tips"):
311
+ with st.spinner("Fetching tips..."):
312
+ tips = get_tips(model_choice, role)
313
+ st.markdown(style_output("Tips Received:", "#2196F3"), unsafe_allow_html=True)
314
+ st.write(tips)
315
+ progress_data["tips_retrieved"] += 1
316
+
317
+ st.header("πŸ“… Schedule Mock Interview")
318
+ schedule_mock_interview()
319
+
320
+ st.header("πŸ“ˆ Progress and Resources")
321
+ col1, col2 = st.columns(2)
322
+ with col1:
323
+ track_progress()
324
+ with col2:
325
+ connect_resources()
326
+
327
+ # Footer
328
+ st.markdown("""
329
+ <div class="footer">
330
+ <p>&copy; 2024 TechPrep. All rights reserved.</p>
331
+ </div>
332
+ """, unsafe_allow_html=True)
333