pk75 commited on
Commit
8f736d9
·
verified ·
1 Parent(s): 7c82687

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -5
app.py CHANGED
@@ -17,22 +17,32 @@ def start_interview(interview_type, doc_file, name, num_questions):
17
  chatbot: gr.update(value=[[None, "Please select an interview type and upload a document to begin."]]),
18
  audio_in: gr.update(interactive=False)
19
  }
 
20
  doc_text = extract_text_from_document(doc_file.name)
21
  if "Error" in doc_text or "Unsupported" in doc_text:
22
  return {
23
  chatbot: gr.update(value=[[None, f"Error: {doc_text}"]]),
24
  audio_in: gr.update(interactive=False)
25
  }
 
26
  initial_state = {
27
- "interview_type": interview_type, "doc_text": doc_text,
28
- "name": name if name else "User", "question_count": int(num_questions),
29
- "current_question_num": 1, "interview_log": []
 
 
 
 
30
  }
 
31
  first_question = generate_question(interview_type, doc_text)
32
  initial_state["current_question_text"] = first_question
 
33
  greeting = f"Hello {initial_state['name']}. We'll go through {int(num_questions)} questions today. Here is your first question:"
 
34
  tts_prompt = f"{greeting} {first_question}"
35
  ai_voice_path = text_to_speech_file(tts_prompt)
 
36
  return {
37
  state: initial_state,
38
  chatbot: gr.update(value=[[None, f"{greeting}\n\n{first_question}"]]),
@@ -52,11 +62,14 @@ def handle_interview_turn(user_audio, chatbot_history, current_state):
52
  "answer": user_answer_text,
53
  "evaluation": evaluation_text
54
  })
 
55
  if current_state["current_question_num"] >= current_state["question_count"]:
56
  end_message = "This concludes the interview. Generating your final report now."
57
  chatbot_history.append([None, end_message])
 
58
  pdf_path = generate_pdf_file(current_state)
59
  ai_voice_path = text_to_speech_file(end_message)
 
60
  yield {
61
  chatbot: chatbot_history,
62
  audio_out: gr.update(value=ai_voice_path, autoplay=True),
@@ -66,10 +79,13 @@ def handle_interview_turn(user_audio, chatbot_history, current_state):
66
  current_state["current_question_num"] += 1
67
  next_question = generate_question(current_state["interview_type"], current_state["doc_text"])
68
  current_state["current_question_text"] = next_question
 
69
  q_num = current_state["current_question_num"]
70
  transition_message = f"Thank you. Here is question {q_num}:\n\n{next_question}"
71
  chatbot_history.append([None, transition_message])
 
72
  ai_voice_path = text_to_speech_file(transition_message)
 
73
  yield {
74
  state: current_state,
75
  chatbot: chatbot_history,
@@ -78,8 +94,15 @@ def handle_interview_turn(user_audio, chatbot_history, current_state):
78
  }
79
 
80
  def generate_pdf_file(state):
81
- final_data = { "name": state["name"], "type": state["interview_type"], "q_and_a": state["interview_log"] }
82
- file_name = f"Report_{state['name']}_{datetime.datetime.now().strftime('%Y-%m-%d')}.pdf"
 
 
 
 
 
 
 
83
  file_path = os.path.join(config.REPORT_FOLDER, file_name)
84
  generate_pdf_report(final_data, file_path)
85
  return file_path
@@ -95,6 +118,7 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
95
  num_questions_slider = gr.Slider(minimum=2, maximum=10, value=5, step=1, label="Number of Questions")
96
  doc_uploader = gr.File(label="Upload Resume/CV")
97
  start_btn = gr.Button("Start Interview", variant="primary")
 
98
  with gr.Column(scale=2):
99
  chatbot = gr.Chatbot(label="Conversation", height=500)
100
  audio_in = gr.Audio(sources=["microphone"], type="filepath", label="Record Your Answer", interactive=False)
@@ -106,6 +130,7 @@ with gr.Blocks(theme=gr.themes.Default()) as app:
106
  inputs=[interview_type_dd, doc_uploader, user_name, num_questions_slider],
107
  outputs=[state, chatbot, audio_out, audio_in, start_btn]
108
  )
 
109
  audio_in.stop_recording(
110
  fn=handle_interview_turn,
111
  inputs=[audio_in, chatbot, state],
 
17
  chatbot: gr.update(value=[[None, "Please select an interview type and upload a document to begin."]]),
18
  audio_in: gr.update(interactive=False)
19
  }
20
+
21
  doc_text = extract_text_from_document(doc_file.name)
22
  if "Error" in doc_text or "Unsupported" in doc_text:
23
  return {
24
  chatbot: gr.update(value=[[None, f"Error: {doc_text}"]]),
25
  audio_in: gr.update(interactive=False)
26
  }
27
+
28
  initial_state = {
29
+ "interview_type": interview_type,
30
+ "doc_text": doc_text,
31
+ "name": name if name else "User",
32
+ "question_count": int(num_questions),
33
+ "current_question_num": 1,
34
+ "interview_log": [],
35
+ "start_time": time.time()
36
  }
37
+
38
  first_question = generate_question(interview_type, doc_text)
39
  initial_state["current_question_text"] = first_question
40
+
41
  greeting = f"Hello {initial_state['name']}. We'll go through {int(num_questions)} questions today. Here is your first question:"
42
+
43
  tts_prompt = f"{greeting} {first_question}"
44
  ai_voice_path = text_to_speech_file(tts_prompt)
45
+
46
  return {
47
  state: initial_state,
48
  chatbot: gr.update(value=[[None, f"{greeting}\n\n{first_question}"]]),
 
62
  "answer": user_answer_text,
63
  "evaluation": evaluation_text
64
  })
65
+
66
  if current_state["current_question_num"] >= current_state["question_count"]:
67
  end_message = "This concludes the interview. Generating your final report now."
68
  chatbot_history.append([None, end_message])
69
+
70
  pdf_path = generate_pdf_file(current_state)
71
  ai_voice_path = text_to_speech_file(end_message)
72
+
73
  yield {
74
  chatbot: chatbot_history,
75
  audio_out: gr.update(value=ai_voice_path, autoplay=True),
 
79
  current_state["current_question_num"] += 1
80
  next_question = generate_question(current_state["interview_type"], current_state["doc_text"])
81
  current_state["current_question_text"] = next_question
82
+
83
  q_num = current_state["current_question_num"]
84
  transition_message = f"Thank you. Here is question {q_num}:\n\n{next_question}"
85
  chatbot_history.append([None, transition_message])
86
+
87
  ai_voice_path = text_to_speech_file(transition_message)
88
+
89
  yield {
90
  state: current_state,
91
  chatbot: chatbot_history,
 
94
  }
95
 
96
  def generate_pdf_file(state):
97
+ # This function now correctly creates the dictionary for the report generator
98
+ total_duration_minutes = (time.time() - state.get("start_time", time.time())) / 60
99
+ final_data = {
100
+ "name": state.get("name", "N/A"),
101
+ "type": state.get("interview_type", "N/A"),
102
+ "duration": total_duration_minutes,
103
+ "q_and_a": state.get("interview_log", [])
104
+ }
105
+ file_name = f"Report_{state.get('name', 'User')}_{datetime.datetime.now().strftime('%Y-%m-%d')}.pdf"
106
  file_path = os.path.join(config.REPORT_FOLDER, file_name)
107
  generate_pdf_report(final_data, file_path)
108
  return file_path
 
118
  num_questions_slider = gr.Slider(minimum=2, maximum=10, value=5, step=1, label="Number of Questions")
119
  doc_uploader = gr.File(label="Upload Resume/CV")
120
  start_btn = gr.Button("Start Interview", variant="primary")
121
+
122
  with gr.Column(scale=2):
123
  chatbot = gr.Chatbot(label="Conversation", height=500)
124
  audio_in = gr.Audio(sources=["microphone"], type="filepath", label="Record Your Answer", interactive=False)
 
130
  inputs=[interview_type_dd, doc_uploader, user_name, num_questions_slider],
131
  outputs=[state, chatbot, audio_out, audio_in, start_btn]
132
  )
133
+
134
  audio_in.stop_recording(
135
  fn=handle_interview_turn,
136
  inputs=[audio_in, chatbot, state],