karthigrj commited on
Commit
66bcc77
·
verified ·
1 Parent(s): aebb7b9

Update pages/02_Take_Interview.py

Browse files
Files changed (1) hide show
  1. pages/02_Take_Interview.py +91 -101
pages/02_Take_Interview.py CHANGED
@@ -1,65 +1,25 @@
1
  import streamlit as st
2
- import sounddevice as sd
3
- import soundfile as sf
4
- import numpy as np
5
- import scipy.io.wavfile as wav
6
  import openai
7
  from langchain_openai import ChatOpenAI
8
  import os
9
  import io
10
- import time
11
- import soundfile as sf
12
- from openai import OpenAI
13
-
14
- client = OpenAI()
15
 
16
  # Set OpenAI API key
17
- os.environ["OPENAI_API_KEY"] = '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'
 
18
 
19
- # Session state variables to control recording status
20
- if "recording" not in st.session_state:
21
- st.session_state.recording = False
22
- if "audio_data" not in st.session_state:
23
- st.session_state.audio_data = None
24
  if "responses" not in st.session_state:
25
  st.session_state.responses = []
 
 
 
 
 
 
26
 
27
- # Function to record audio using sounddevice
28
- def start_recording(file_path, duration=10, fs=44100, device_index=0):
29
- st.session_state.recording = True
30
- st.write(f"Recording Started....Duration is {duration} secs.")
31
- audio_data = sd.rec(int(duration * fs), samplerate=fs, channels=1, dtype='int16', device=device_index)
32
- sd.wait() # Wait until the recording is finished
33
- st.session_state.recording = False
34
- sf.write(file_path, audio_data, fs)
35
- st.write(f"Recording complete. File saved at {file_path}")
36
-
37
- # Convert to BytesIO object in WAV format
38
- audio_buffer = io.BytesIO()
39
- wav.write(audio_buffer, fs, audio_data)
40
- audio_buffer.seek(0) # Ensure the buffer starts at the beginning
41
- st.session_state.audio_data = audio_buffer
42
-
43
- # Function to transcribe audio using OpenAI's Whisper API
44
- # Function to transcribe audio using OpenAI's Whisper API
45
- def transcribe_audio(file_path):
46
- with open(file_path, 'rb') as audio_file:
47
- transcription = client.audio.transcriptions.create(
48
- model="whisper-1",
49
- file=audio_file,
50
- response_format="text"
51
- )
52
-
53
- # Print transcription to verify the format
54
- st.write("Transcription response:", transcription)
55
-
56
- # Check if transcription is a string or dictionary
57
- if isinstance(transcription, dict) and 'text' in transcription:
58
- return transcription['text']
59
- elif isinstance(transcription, str):
60
- return transcription # Return the string if it's already in text format
61
- else:
62
- return "Error: Unexpected response format."
63
 
64
  # Function to generate questions based on insights and job description
65
  def generate_questions(insights, jd_text):
@@ -79,6 +39,27 @@ def generate_questions(insights, jd_text):
79
  questions = response.generations[0][0].text.strip().split('\n')
80
  return questions
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  # Function to analyze the transcribed response
83
  def analyze_response(transcript):
84
  prompt = f"""
@@ -96,59 +77,68 @@ def analyze_response(transcript):
96
  analysis = response.generations[0][0].text.strip()
97
  return analysis
98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  st.title("Interview Preparation")
100
 
101
  # Display candidate insights and job description
102
- if "insights" in st.session_state and "jd_text" in st.session_state:
103
- st.write("### Candidate Insights")
104
- st.write(st.session_state.insights)
105
-
106
- st.write("### Job Description")
107
- st.write(st.session_state.jd_text)
108
-
109
- # Generate interview questions
110
- if "questions" not in st.session_state:
111
- st.session_state.questions = generate_questions(st.session_state.insights, st.session_state.jd_text)
112
-
113
- st.write("### Interview Questions")
114
- responses = []
115
- for i, question in enumerate(st.session_state.questions, 1):
116
- st.write(f"**Question {i}:** {question}")
117
-
118
- # Record or input text response
119
- col1, col2 = st.columns(2)
120
- with col1:
121
- file_path = f"recording_question_{i}.wav"
122
- # Start and Stop buttons for recording
123
- if st.button(f"Start Answer for Question {i}"):
124
- st.write(f"Starting to record for Question {i}...")
125
- start_recording(file_path=file_path, duration=10)
126
-
127
- if st.button(f"Analyze Answer for Question {i}") and st.session_state.audio_data:
128
- transcript = transcribe_audio(file_path)
129
-
130
- # Display the transcribed response
131
- st.write("### Transcription of Your Response")
132
- st.write(transcript)
133
-
134
- # Analyze the transcribed response
135
- analysis = analyze_response(transcript)
136
- st.write("### Analysis of Your Response")
137
- st.write(analysis)
138
- responses.append((transcript, analysis))
139
-
140
- with col2:
141
- text_response = st.text_area(f"Or type your answer for Question {i}")
142
- if text_response:
143
- st.write("### Analysis of Your Response")
144
- analysis = analyze_response(text_response)
145
- st.write(analysis)
146
- responses.append((text_response, analysis))
147
-
148
- # Save responses for interviewer insights
149
- st.session_state.responses = responses
150
-
151
- # Submit interview and generate final interviewer insights
152
  if st.button("Submit Interview"):
153
  insights_summary = "\n".join([response[1] for response in st.session_state.responses if response[1]])
154
  prompt = f"""
 
1
  import streamlit as st
 
 
 
 
2
  import openai
3
  from langchain_openai import ChatOpenAI
4
  import os
5
  import io
6
+ import datetime
7
+ from audio_recorder_streamlit import audio_recorder
 
 
 
8
 
9
  # Set OpenAI API key
10
+ os.environ["OPENAI_API_KEY"] = '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'
11
+ client = openai.OpenAI()
12
 
13
+ # Initialize session state variables
 
 
 
 
14
  if "responses" not in st.session_state:
15
  st.session_state.responses = []
16
+ if "questions" not in st.session_state:
17
+ st.session_state.questions = []
18
+ if "insights" not in st.session_state:
19
+ st.session_state.insights = "Candidate insights text here."
20
+ if "jd_text" not in st.session_state:
21
+ st.session_state.jd_text = "Job description text here."
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  # Function to generate questions based on insights and job description
25
  def generate_questions(insights, jd_text):
 
39
  questions = response.generations[0][0].text.strip().split('\n')
40
  return questions
41
 
42
+
43
+ # Function to save the audio file
44
+ def save_audio_file(audio_bytes, file_extension="wav"):
45
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
46
+ file_name = f"audio_{timestamp}.{file_extension}"
47
+ file_path = os.path.join("saved_audios", file_name)
48
+
49
+ os.makedirs("saved_audios", exist_ok=True) # Ensure the directory exists
50
+
51
+ with open(file_path, "wb") as f:
52
+ f.write(audio_bytes)
53
+ return file_path
54
+
55
+
56
+ # Function to transcribe audio using Whisper API
57
+ def transcribe_audio(audio_bytes):
58
+ audio_file = io.BytesIO(audio_bytes)
59
+ response = openai.Audio.transcribe("whisper-1", audio_file)
60
+ return response
61
+
62
+
63
  # Function to analyze the transcribed response
64
  def analyze_response(transcript):
65
  prompt = f"""
 
77
  analysis = response.generations[0][0].text.strip()
78
  return analysis
79
 
80
+
81
+ # Function to handle audio recording and transcription
82
+ def handle_audio_recording(question_index):
83
+ audio_bytes = audio_recorder(pause_threshold=2.0, sample_rate=41000, key=f"audio_recorder_{question_index}")
84
+ if audio_bytes:
85
+ st.audio(audio_bytes, format="audio/wav") # Play recorded audio
86
+
87
+ # Save audio and provide feedback on file path
88
+ file_path = save_audio_file(audio_bytes, "wav")
89
+ st.write(f"Audio saved to: {file_path}")
90
+
91
+ # Transcribe the audio and analyze the response
92
+ transcript = transcribe_audio(audio_bytes) # Transcribe using Whisper
93
+ st.write("### Transcription of Your Response")
94
+ st.write(transcript)
95
+
96
+ analysis = analyze_response(transcript)
97
+ st.write("### Analysis of Your Response")
98
+ st.write(analysis)
99
+
100
+ return transcript, analysis
101
+ return None, None
102
+
103
+
104
  st.title("Interview Preparation")
105
 
106
  # Display candidate insights and job description
107
+ st.write("### Candidate Insights")
108
+ st.write(st.session_state.insights)
109
+
110
+ st.write("### Job Description")
111
+ st.write(st.session_state.jd_text)
112
+
113
+ # Generate interview questions if they haven't been generated yet
114
+ if not st.session_state.questions:
115
+ st.session_state.questions = generate_questions(st.session_state.insights, st.session_state.jd_text)
116
+
117
+ # Display and interact with each question
118
+ responses = []
119
+ for i, question in enumerate(st.session_state.questions, 1):
120
+ st.write(f"**Question {i}:** {question}")
121
+
122
+ col1, col2 = st.columns(2)
123
+ with col1:
124
+ st.write("#### Record Your Answer")
125
+ transcript, analysis = handle_audio_recording(i) # Pass the question index to avoid duplicate IDs
126
+ if transcript and analysis:
127
+ responses.append((transcript, analysis))
128
+
129
+ with col2:
130
+ st.write("#### Or Type Your Answer")
131
+ text_response = st.text_area(f"Type your answer for Question {i}", key=f"text_response_{i}")
132
+ if text_response:
133
+ analysis = analyze_response(text_response)
134
+ st.write("### Analysis of Your Response")
135
+ st.write(analysis)
136
+ responses.append((text_response, analysis))
137
+
138
+ # Save responses in session state
139
+ st.session_state.responses = responses
140
+
141
+ # Submit interview and generate final insights
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  if st.button("Submit Interview"):
143
  insights_summary = "\n".join([response[1] for response in st.session_state.responses if response[1]])
144
  prompt = f"""