rashid01 commited on
Commit
8ec950e
·
verified ·
1 Parent(s): 662f06d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -31
app.py CHANGED
@@ -22,10 +22,8 @@ progress_data = {
22
  }
23
 
24
  def get_llm(model_choice):
25
- if model_choice == "Gemini":
26
  return ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
27
- elif model_choice == "Groq":
28
- return ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY) # Groq uses Gemini AI
29
  elif model_choice == "OpenAI":
30
  return None
31
  else:
@@ -43,7 +41,7 @@ def generate_questions(model_choice, role, question_type, num_questions, difficu
43
  max_tokens=150
44
  )
45
  return response.choices[0].text.strip().split("\n")
46
- elif model_choice in ["Gemini", "Groq"]:
47
  llm = get_llm(model_choice)
48
  response = llm.invoke(prompt)
49
  return response.content.split("\n")
@@ -59,7 +57,7 @@ def provide_feedback(model_choice, answer):
59
  max_tokens=150
60
  )
61
  return response.choices[0].text.strip()
62
- elif model_choice in ["Gemini", "Groq"]:
63
  llm = get_llm(model_choice)
64
  response = llm.invoke(prompt)
65
  return response.content
@@ -75,7 +73,7 @@ def get_tips(model_choice, role):
75
  max_tokens=150
76
  )
77
  return response.choices[0].text.strip()
78
- elif model_choice in ["Gemini", "Groq"]:
79
  llm = get_llm(model_choice)
80
  response = llm.invoke(prompt)
81
  return response.content
@@ -197,10 +195,6 @@ st.markdown(
197
  body {
198
  background-color: #e0f7fa; /* Light cyan background color */
199
  font-family: Arial, sans-serif;
200
- background-image: url('https://example.com/your-watermark-image.png'); /* URL to your watermark image */
201
- background-repeat: no-repeat;
202
- background-position: center;
203
- background-size: cover;
204
  }
205
  .stButton>button {
206
  width: 100%;
@@ -209,20 +203,14 @@ st.markdown(
209
  color: white;
210
  background-color: #4CAF50;
211
  border: none;
212
- border-radius: 5px;
213
  cursor: pointer;
 
214
  }
215
  .stButton>button:hover {
216
  background-color: #45a049;
217
  }
218
  .output-container {
219
- border: 2px solid #2196F3;
220
- border-radius: 8px;
221
- padding: 15px;
222
- margin: 15px 0;
223
- background-color: #f1f1f1;
224
- }
225
- .progress-container {
226
  border: 2px solid #2196F3;
227
  border-radius: 8px;
228
  padding: 15px;
@@ -266,11 +254,17 @@ with welcome_message.container():
266
  time.sleep(4) # Wait for 4 seconds
267
  welcome_message.empty() # Remove the welcome message
268
 
269
- # Initialize session state for questions and current index
270
  if 'questions' not in st.session_state:
271
  st.session_state.questions = []
 
 
 
 
272
  if 'question_index' not in st.session_state:
273
  st.session_state.question_index = 0
 
 
274
 
275
  # Sidebar Navigation
276
  st.sidebar.title("TechPrep Navigation")
@@ -282,11 +276,7 @@ if nav_option == "Generate Questions":
282
  st.header("📝 Generate Interview Questions")
283
 
284
  model_choice = st.selectbox("Choose Model:", ["OpenAI", "Gemini", "Groq"])
285
- role = st.selectbox("Role", [
286
- "Software Engineer", "Data Scientist", "ML Engineer", "GenAI Specialist",
287
- "Product Manager", "UX Designer", "DevOps Engineer", "Cloud Architect",
288
- "Business Analyst", "Database Administrator", "System Administrator", "Network Engineer"
289
- ])
290
  question_type = st.selectbox("Question Type:", ["Behavioral", "Technical", "Situational", "Case Study", "Problem Solving"])
291
  num_questions = st.number_input("Number of Questions:", min_value=1, max_value=20, value=5)
292
  difficulty = st.selectbox("Difficulty Level:", ["Basic", "Medium", "Complex"])
@@ -295,7 +285,10 @@ if nav_option == "Generate Questions":
295
  with st.spinner("Generating questions..."):
296
  questions = generate_questions(model_choice, role, question_type, num_questions, difficulty)
297
  st.session_state.questions = questions
 
 
298
  st.session_state.question_index = 0
 
299
  progress_data["questions_solved"][question_type] += num_questions
300
 
301
  # Display questions with navigation
@@ -303,28 +296,22 @@ if nav_option == "Generate Questions":
303
  question_list = st.session_state.questions
304
  index = st.session_state.question_index
305
 
306
- # Debugging information
307
- st.write(f"**Current Index:** {index}")
308
- st.write(f"**Total Questions:** {len(question_list)}")
309
-
310
  if index < len(question_list):
311
  st.write(f"**Question {index + 1}:** {question_list[index]}")
312
 
313
  # Answer input box
314
  answer = st.text_area("Your Answer", key="text_area_answer")
315
-
316
  col1, col2 = st.columns(2)
317
  with col1:
318
  if index > 0:
319
  if st.button("Previous"):
320
  st.session_state.question_index -= 1
321
- st.experimental_rerun() # Re-run to update display
322
 
323
  with col2:
324
  if index < len(question_list) - 1:
325
  if st.button("Next"):
326
  st.session_state.question_index += 1
327
- st.experimental_rerun() # Re-run to update display
328
 
329
  # Submit answer and provide feedback
330
  if st.button("Submit Answer"):
@@ -333,10 +320,35 @@ if nav_option == "Generate Questions":
333
  else:
334
  with st.spinner("Providing feedback..."):
335
  feedback = provide_feedback(model_choice, answer)
 
 
336
  st.markdown(style_output("Feedback Received:", "#FF5722"), unsafe_allow_html=True)
337
  st.write(feedback)
338
  progress_data["feedback_provided"] += 1
339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  elif nav_option == "Mock Interview":
341
  st.header("🎥 Mock Interview")
342
  schedule_mock_interview()
@@ -353,3 +365,4 @@ st.markdown("""
353
  <p>&copy; 2024 TechPrep. All rights reserved.</p>
354
  </div>
355
  """, unsafe_allow_html=True)
 
 
22
  }
23
 
24
  def get_llm(model_choice):
25
+ if model_choice == "Gemini" or model_choice == "Groq":
26
  return ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
 
 
27
  elif model_choice == "OpenAI":
28
  return None
29
  else:
 
41
  max_tokens=150
42
  )
43
  return response.choices[0].text.strip().split("\n")
44
+ elif model_choice == "Gemini" or model_choice == "Groq":
45
  llm = get_llm(model_choice)
46
  response = llm.invoke(prompt)
47
  return response.content.split("\n")
 
57
  max_tokens=150
58
  )
59
  return response.choices[0].text.strip()
60
+ elif model_choice == "Gemini" or model_choice == "Groq":
61
  llm = get_llm(model_choice)
62
  response = llm.invoke(prompt)
63
  return response.content
 
73
  max_tokens=150
74
  )
75
  return response.choices[0].text.strip()
76
+ elif model_choice == "Gemini" or model_choice == "Groq":
77
  llm = get_llm(model_choice)
78
  response = llm.invoke(prompt)
79
  return response.content
 
195
  body {
196
  background-color: #e0f7fa; /* Light cyan background color */
197
  font-family: Arial, sans-serif;
 
 
 
 
198
  }
199
  .stButton>button {
200
  width: 100%;
 
203
  color: white;
204
  background-color: #4CAF50;
205
  border: none;
206
+ border-radius: 8px;
207
  cursor: pointer;
208
+ transition: background-color 0.3s ease;
209
  }
210
  .stButton>button:hover {
211
  background-color: #45a049;
212
  }
213
  .output-container {
 
 
 
 
 
 
 
214
  border: 2px solid #2196F3;
215
  border-radius: 8px;
216
  padding: 15px;
 
254
  time.sleep(4) # Wait for 4 seconds
255
  welcome_message.empty() # Remove the welcome message
256
 
257
+ # Initialize session state for questions, answers, and current index
258
  if 'questions' not in st.session_state:
259
  st.session_state.questions = []
260
+ if 'answers' not in st.session_state:
261
+ st.session_state.answers = []
262
+ if 'feedback' not in st.session_state:
263
+ st.session_state.feedback = []
264
  if 'question_index' not in st.session_state:
265
  st.session_state.question_index = 0
266
+ if 'show_results' not in st.session_state:
267
+ st.session_state.show_results = False
268
 
269
  # Sidebar Navigation
270
  st.sidebar.title("TechPrep Navigation")
 
276
  st.header("📝 Generate Interview Questions")
277
 
278
  model_choice = st.selectbox("Choose Model:", ["OpenAI", "Gemini", "Groq"])
279
+ role = st.selectbox("Role:", ["GenAI", "ML", "DevOps", "Software Engineer", "Data Scientist", "Product Manager", "Designer", "Business Analyst"])
 
 
 
 
280
  question_type = st.selectbox("Question Type:", ["Behavioral", "Technical", "Situational", "Case Study", "Problem Solving"])
281
  num_questions = st.number_input("Number of Questions:", min_value=1, max_value=20, value=5)
282
  difficulty = st.selectbox("Difficulty Level:", ["Basic", "Medium", "Complex"])
 
285
  with st.spinner("Generating questions..."):
286
  questions = generate_questions(model_choice, role, question_type, num_questions, difficulty)
287
  st.session_state.questions = questions
288
+ st.session_state.answers = ["" for _ in questions]
289
+ st.session_state.feedback = ["" for _ in questions]
290
  st.session_state.question_index = 0
291
+ st.session_state.show_results = False
292
  progress_data["questions_solved"][question_type] += num_questions
293
 
294
  # Display questions with navigation
 
296
  question_list = st.session_state.questions
297
  index = st.session_state.question_index
298
 
 
 
 
 
299
  if index < len(question_list):
300
  st.write(f"**Question {index + 1}:** {question_list[index]}")
301
 
302
  # Answer input box
303
  answer = st.text_area("Your Answer", key="text_area_answer")
304
+
305
  col1, col2 = st.columns(2)
306
  with col1:
307
  if index > 0:
308
  if st.button("Previous"):
309
  st.session_state.question_index -= 1
 
310
 
311
  with col2:
312
  if index < len(question_list) - 1:
313
  if st.button("Next"):
314
  st.session_state.question_index += 1
 
315
 
316
  # Submit answer and provide feedback
317
  if st.button("Submit Answer"):
 
320
  else:
321
  with st.spinner("Providing feedback..."):
322
  feedback = provide_feedback(model_choice, answer)
323
+ st.session_state.answers[index] = answer
324
+ st.session_state.feedback[index] = feedback
325
  st.markdown(style_output("Feedback Received:", "#FF5722"), unsafe_allow_html=True)
326
  st.write(feedback)
327
  progress_data["feedback_provided"] += 1
328
 
329
+ # Show results and score when all questions have been answered
330
+ if index == len(question_list) - 1:
331
+ st.session_state.show_results = True
332
+
333
+ if st.session_state.show_results:
334
+ st.write("### Your Results")
335
+ total_questions = len(st.session_state.questions)
336
+ answered_questions = sum(1 for ans in st.session_state.answers if ans)
337
+ score = (answered_questions / total_questions) * 100
338
+ st.write(f"**Score:** {score:.2f}%")
339
+
340
+ # Display feedback and tips
341
+ st.write("### Feedback Summary")
342
+ for i, (q, ans, fb) in enumerate(zip(st.session_state.questions, st.session_state.answers, st.session_state.feedback)):
343
+ st.write(f"**Question {i + 1}:** {q}")
344
+ st.write(f"**Your Answer:** {ans}")
345
+ st.write(f"**Feedback:** {fb}")
346
+
347
+ tips = get_tips(model_choice, role)
348
+ st.write("### Tips to Improve")
349
+ st.write(tips)
350
+ progress_data["tips_retrieved"] += 1
351
+
352
  elif nav_option == "Mock Interview":
353
  st.header("🎥 Mock Interview")
354
  schedule_mock_interview()
 
365
  <p>&copy; 2024 TechPrep. All rights reserved.</p>
366
  </div>
367
  """, unsafe_allow_html=True)
368
+