Update app.py (#1)
Browse files- Update app.py (67017fe94d1883c6779ba0802f8e63063d9d6e04)
app.py
CHANGED
|
@@ -131,52 +131,81 @@
|
|
| 131 |
|
| 132 |
|
| 133 |
|
| 134 |
-
|
| 135 |
-
|
| 136 |
import openai
|
| 137 |
import streamlit as st
|
| 138 |
-
from
|
| 139 |
|
| 140 |
-
# Set your OpenAI API key
|
| 141 |
-
openai.api_key = st.secrets["OPENAI_API_KEY"]
|
| 142 |
|
| 143 |
def generate_question(role, topic, difficulty_level):
|
| 144 |
prompt = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
return response
|
| 151 |
|
| 152 |
def evaluate_answer(question, user_answer):
|
| 153 |
prompt = f"Question: {question}\nUser's Answer: {user_answer}\nEvaluate the answer and provide feedback. Also, provide the best possible answer."
|
| 154 |
-
llm =
|
| 155 |
-
response = llm(prompt)
|
| 156 |
-
|
|
|
|
| 157 |
return response
|
| 158 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
st.title("Mock Interview Bot")
|
| 160 |
|
| 161 |
role = st.selectbox("Select the role:", ["Software Engineer", "Data Scientist", "Product Manager"])
|
| 162 |
topic = st.text_input("Enter the topic:")
|
| 163 |
difficulty_level = st.selectbox("Select difficulty level:", ["Easy", "Medium", "Hard"])
|
| 164 |
|
| 165 |
-
if st.button("
|
| 166 |
if role and topic and difficulty_level:
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
|
|
|
|
|
|
| 170 |
st.session_state['question_answered'] = False
|
| 171 |
|
| 172 |
-
if '
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
if
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
|
| 133 |
|
|
|
|
|
|
|
| 134 |
import openai
|
| 135 |
import streamlit as st
|
| 136 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 137 |
|
|
|
|
|
|
|
| 138 |
|
| 139 |
def generate_question(role, topic, difficulty_level):
|
| 140 |
prompt = f"Generate an interview question for the role of {role} on the topic of {topic} with difficulty level {difficulty_level}."
|
| 141 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
| 142 |
+
response = llm.invoke(prompt)
|
| 143 |
+
response = response.content
|
| 144 |
+
|
|
|
|
| 145 |
return response
|
| 146 |
|
| 147 |
def evaluate_answer(question, user_answer):
|
| 148 |
prompt = f"Question: {question}\nUser's Answer: {user_answer}\nEvaluate the answer and provide feedback. Also, provide the best possible answer."
|
| 149 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
| 150 |
+
response = llm.invoke(prompt)
|
| 151 |
+
response = response.content
|
| 152 |
+
|
| 153 |
return response
|
| 154 |
|
| 155 |
+
# Initialize session state
|
| 156 |
+
if 'questions' not in st.session_state:
|
| 157 |
+
st.session_state['questions'] = []
|
| 158 |
+
if 'answers' not in st.session_state:
|
| 159 |
+
st.session_state['answers'] = []
|
| 160 |
+
if 'feedback' not in st.session_state:
|
| 161 |
+
st.session_state['feedback'] = []
|
| 162 |
+
if 'current_question' not in st.session_state:
|
| 163 |
+
st.session_state['current_question'] = 0
|
| 164 |
+
if 'total_questions' not in st.session_state:
|
| 165 |
+
st.session_state['total_questions'] = 10
|
| 166 |
+
|
| 167 |
st.title("Mock Interview Bot")
|
| 168 |
|
| 169 |
role = st.selectbox("Select the role:", ["Software Engineer", "Data Scientist", "Product Manager"])
|
| 170 |
topic = st.text_input("Enter the topic:")
|
| 171 |
difficulty_level = st.selectbox("Select difficulty level:", ["Easy", "Medium", "Hard"])
|
| 172 |
|
| 173 |
+
if st.button("Start Interview"):
|
| 174 |
if role and topic and difficulty_level:
|
| 175 |
+
for _ in range(st.session_state['total_questions']):
|
| 176 |
+
question = generate_question(role, topic, difficulty_level)
|
| 177 |
+
st.session_state['questions'].append(question)
|
| 178 |
+
st.session_state['current_question'] = 0
|
| 179 |
+
st.write(f"Question 1: {st.session_state['questions'][0]}")
|
| 180 |
st.session_state['question_answered'] = False
|
| 181 |
|
| 182 |
+
if 'questions' in st.session_state and st.session_state['current_question'] < st.session_state['total_questions']:
|
| 183 |
+
if not st.session_state.get('question_answered', False):
|
| 184 |
+
answer = st.text_area("Your Answer:")
|
| 185 |
+
if st.button("Submit Answer"):
|
| 186 |
+
if answer:
|
| 187 |
+
current_question = st.session_state['current_question']
|
| 188 |
+
st.session_state['answers'].append(answer)
|
| 189 |
+
feedback = evaluate_answer(st.session_state['questions'][current_question], answer)
|
| 190 |
+
st.session_state['feedback'].append(feedback)
|
| 191 |
+
st.session_state['question_answered'] = True
|
| 192 |
+
st.write(f"Feedback: {feedback}")
|
| 193 |
+
|
| 194 |
+
if current_question + 1 < st.session_state['total_questions']:
|
| 195 |
+
st.session_state['current_question'] += 1
|
| 196 |
+
st.session_state['question_answered'] = False
|
| 197 |
+
st.write(f"Question {st.session_state['current_question'] + 1}: {st.session_state['questions'][st.session_state['current_question']]}")
|
| 198 |
+
else:
|
| 199 |
+
st.write("Interview Complete! Generating Report...")
|
| 200 |
+
generate_report()
|
| 201 |
+
|
| 202 |
+
def generate_report():
|
| 203 |
+
st.write("### Interview Report")
|
| 204 |
+
for i in range(st.session_state['total_questions']):
|
| 205 |
+
st.write(f"**Question {i+1}:** {st.session_state['questions'][i]}")
|
| 206 |
+
st.write(f"**Your Answer:** {st.session_state['answers'][i]}")
|
| 207 |
+
st.write(f"**Feedback:** {st.session_state['feedback'][i]}")
|
| 208 |
+
st.write("---")
|
| 209 |
+
|
| 210 |
+
if 'current_question' in st.session_state and st.session_state['current_question'] == st.session_state['total_questions']:
|
| 211 |
+
generate_report()
|