|
|
import streamlit as st |
|
|
import openai |
|
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
|
from datetime import datetime, timedelta |
|
|
import time |
|
|
|
|
|
|
|
|
GOOGLE_API_KEY = st.secrets["GOOGLE_API_KEY"] |
|
|
OPENAI_API_KEY = st.secrets["OPENAI_API_KEY"] |
|
|
|
|
|
|
|
|
openai.api_key = OPENAI_API_KEY |
|
|
|
|
|
|
|
|
progress_data = { |
|
|
"questions_solved": { |
|
|
"Behavioral": 0, "Technical": 0, "Situational": 0, "Case Study": 0, "Problem Solving": 0 |
|
|
}, |
|
|
"mock_interviews_taken": 0, |
|
|
"feedback_provided": 0, |
|
|
"tips_retrieved": 0 |
|
|
} |
|
|
|
|
|
def get_llm(model_choice): |
|
|
if model_choice == "Gemini" or model_choice == "Groq": |
|
|
return ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY) |
|
|
elif model_choice == "OpenAI": |
|
|
return None |
|
|
else: |
|
|
raise ValueError("Unsupported model choice.") |
|
|
|
|
|
def generate_questions(model_choice, role, question_type, num_questions, difficulty): |
|
|
prompt = ( |
|
|
f"Generate {num_questions} {difficulty} {question_type.lower()} interview questions for the role of {role}. " |
|
|
f"Only include {question_type.lower()} questions." |
|
|
) |
|
|
if model_choice == "OpenAI": |
|
|
response = openai.Completion.create( |
|
|
engine="text-davinci-003", |
|
|
prompt=prompt, |
|
|
max_tokens=150 |
|
|
) |
|
|
return response.choices[0].text.strip().split("\n") |
|
|
elif model_choice == "Gemini" or model_choice == "Groq": |
|
|
llm = get_llm(model_choice) |
|
|
response = llm.invoke(prompt) |
|
|
return response.content.split("\n") |
|
|
else: |
|
|
raise ValueError("Unsupported model choice.") |
|
|
|
|
|
def provide_feedback(model_choice, answer): |
|
|
prompt = f"Provide constructive feedback on the following interview answer: {answer}" |
|
|
if model_choice == "OpenAI": |
|
|
response = openai.Completion.create( |
|
|
engine="text-davinci-003", |
|
|
prompt=prompt, |
|
|
max_tokens=150 |
|
|
) |
|
|
return response.choices[0].text.strip() |
|
|
elif model_choice == "Gemini" or model_choice == "Groq": |
|
|
llm = get_llm(model_choice) |
|
|
response = llm.invoke(prompt) |
|
|
return response.content |
|
|
else: |
|
|
raise ValueError("Unsupported model choice.") |
|
|
|
|
|
def get_tips(model_choice, role): |
|
|
prompt = f"Provide useful interview tips for the role of {role}. Include body language, dress code, etiquette, and role-specific advice." |
|
|
if model_choice == "OpenAI": |
|
|
response = openai.Completion.create( |
|
|
engine="text-davinci-003", |
|
|
prompt=prompt, |
|
|
max_tokens=150 |
|
|
) |
|
|
return response.choices[0].text.strip() |
|
|
elif model_choice == "Gemini" or model_choice == "Groq": |
|
|
llm = get_llm(model_choice) |
|
|
response = llm.invoke(prompt) |
|
|
return response.content |
|
|
else: |
|
|
raise ValueError("Unsupported model choice.") |
|
|
|
|
|
def start_mock_interview(): |
|
|
st.write("### Mock Interview Starting") |
|
|
st.write("The mock interview is starting now. Please connect with your interviewer.") |
|
|
|
|
|
|
|
|
st.markdown(""" |
|
|
<div style=" |
|
|
width: 100%; |
|
|
height: 400px; |
|
|
background-color: #000; |
|
|
color: #fff; |
|
|
display: flex; |
|
|
flex-direction: column; |
|
|
align-items: center; |
|
|
justify-content: center; |
|
|
font-size: 24px; |
|
|
font-weight: bold; |
|
|
border-radius: 8px; |
|
|
border: 2px solid #2196F3; |
|
|
"> |
|
|
<p style="margin: 10px;"> |
|
|
<i class="fa fa-circle" aria-hidden="true" style="font-size: 50px; color: red;"></i><br> |
|
|
Call is being RECORDED |
|
|
</p> |
|
|
<p style="font-size: 18px;">Interview with: John Doe</p> |
|
|
</div> |
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
countdown_end = datetime.now() + timedelta(seconds=60) |
|
|
|
|
|
while datetime.now() < countdown_end: |
|
|
remaining_time = countdown_end - datetime.now() |
|
|
if remaining_time <= timedelta(seconds=3): |
|
|
st.write(f"**Time Remaining:** {str(remaining_time).split('.')[0]}") |
|
|
time.sleep(1) |
|
|
|
|
|
st.write("Mock Interview Session Ended.") |
|
|
progress_data["mock_interviews_taken"] += 1 |
|
|
|
|
|
def schedule_mock_interview(): |
|
|
st.subheader("Schedule a Mock Interview") |
|
|
date = st.date_input("Select Date", min_value=datetime.today()) |
|
|
time = st.time_input("Select Time", value=datetime.now().time()) |
|
|
duration = st.selectbox("Duration (Minutes)", [30, 45, 60]) |
|
|
now = datetime.now() |
|
|
selected_datetime = datetime.combine(date, time) |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
if st.button("Start Interview Now"): |
|
|
start_mock_interview() |
|
|
|
|
|
with col2: |
|
|
if st.button("Schedule Interview"): |
|
|
if selected_datetime < now: |
|
|
st.error("Selected time is in the past. Please choose a future time.") |
|
|
else: |
|
|
|
|
|
st.success(f"Mock interview scheduled for {selected_datetime.strftime('%Y-%m-%d %H:%M:%S')} with a duration of {duration} minutes.") |
|
|
progress_data["mock_interviews_taken"] += 1 |
|
|
|
|
|
def track_progress(): |
|
|
st.subheader("Track Your Progress") |
|
|
st.write("Here's your detailed progress data:") |
|
|
st.markdown(f""" |
|
|
<div class="progress-container"> |
|
|
<p><strong>Behavioral Questions Solved:</strong> {progress_data['questions_solved']['Behavioral']}</p> |
|
|
<p><strong>Technical Questions Solved:</strong> {progress_data['questions_solved']['Technical']}</p> |
|
|
<p><strong>Situational Questions Solved:</strong> {progress_data['questions_solved']['Situational']}</p> |
|
|
<p><strong>Case Study Questions Solved:</strong> {progress_data['questions_solved']['Case Study']}</p> |
|
|
<p><strong>Problem Solving Questions Solved:</strong> {progress_data['questions_solved']['Problem Solving']}</p> |
|
|
<p><strong>Mock Interviews Taken:</strong> {progress_data['mock_interviews_taken']}</p> |
|
|
<p><strong>Feedback Provided:</strong> {progress_data['feedback_provided']}</p> |
|
|
<p><strong>Tips Retrieved:</strong> {progress_data['tips_retrieved']}</p> |
|
|
</div> |
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
def connect_resources(): |
|
|
st.subheader("Connect with Resources") |
|
|
st.write("### Articles & Books") |
|
|
st.write("1. [The Complete Guide to Job Interviews](https://example.com)") |
|
|
st.write("2. [Cracking the Coding Interview](https://example.com)") |
|
|
|
|
|
st.write("### Videos") |
|
|
st.write("1. [Top 10 Interview Tips](https://example.com)") |
|
|
st.write("2. [Behavioral Interview Questions Explained](https://example.com)") |
|
|
|
|
|
st.write("### Connect with Career Coaches") |
|
|
st.write("If you need personalized help, please fill out the form below or contact us through [Career Coaches Contact](https://example.com).") |
|
|
|
|
|
|
|
|
with st.form("contact_form"): |
|
|
st.write("For personalized assistance, please fill out this form:") |
|
|
name = st.text_input("Name") |
|
|
email = st.text_input("Email") |
|
|
message = st.text_area("Message") |
|
|
submit_button = st.form_submit_button("Submit") |
|
|
|
|
|
if submit_button: |
|
|
if not name or not email or not message: |
|
|
st.error("Please fill out all fields.") |
|
|
else: |
|
|
st.success("Thank you for contacting us! We will get back to you soon.") |
|
|
|
|
|
def style_output(text, color): |
|
|
return f'<div class="output-container"><span style="color: {color}; font-weight: bold;">{text}</span></div>' |
|
|
|
|
|
|
|
|
st.set_page_config(page_title="TechPrep", layout="wide") |
|
|
st.markdown( |
|
|
""" |
|
|
<style> |
|
|
body { |
|
|
background-color: #e0f7fa; /* Light cyan background color */ |
|
|
font-family: Arial, sans-serif; |
|
|
} |
|
|
.stButton>button { |
|
|
width: 100%; |
|
|
height: 3em; |
|
|
font-size: 1.2em; |
|
|
color: white; |
|
|
background-color: #4CAF50; |
|
|
border: none; |
|
|
border-radius: 8px; |
|
|
cursor: pointer; |
|
|
transition: background-color 0.3s ease; |
|
|
} |
|
|
.stButton>button:hover { |
|
|
background-color: #45a049; |
|
|
} |
|
|
.output-container { |
|
|
border: 2px solid #2196F3; |
|
|
border-radius: 8px; |
|
|
padding: 15px; |
|
|
margin: 15px 0; |
|
|
background-color: #f9f9f9; |
|
|
} |
|
|
.sidebar { |
|
|
background-color: #ffffff; /* Sidebar background color */ |
|
|
padding: 1em; |
|
|
} |
|
|
.footer { |
|
|
background-color: #4CAF50; |
|
|
color: white; |
|
|
text-align: center; |
|
|
padding: 1em; |
|
|
position: fixed; |
|
|
bottom: 0; |
|
|
width: 100%; |
|
|
border-top: 2px solid #ffffff; |
|
|
} |
|
|
</style> |
|
|
""", |
|
|
unsafe_allow_html=True |
|
|
) |
|
|
|
|
|
|
|
|
welcome_message = st.empty() |
|
|
with welcome_message.container(): |
|
|
st.markdown(""" |
|
|
<div style=" |
|
|
text-align: center; |
|
|
padding: 20px; |
|
|
background-color: #4CAF50; |
|
|
color: white; |
|
|
border-radius: 8px; |
|
|
font-size: 24px; |
|
|
"> |
|
|
<i class="fa fa-smile-o" aria-hidden="true" style="font-size: 40px;"></i> Welcome to TechPrep! |
|
|
</div> |
|
|
""", unsafe_allow_html=True) |
|
|
time.sleep(4) |
|
|
welcome_message.empty() |
|
|
|
|
|
|
|
|
if 'questions' not in st.session_state: |
|
|
st.session_state.questions = [] |
|
|
if 'answers' not in st.session_state: |
|
|
st.session_state.answers = [] |
|
|
if 'feedback' not in st.session_state: |
|
|
st.session_state.feedback = [] |
|
|
if 'question_index' not in st.session_state: |
|
|
st.session_state.question_index = 0 |
|
|
if 'show_results' not in st.session_state: |
|
|
st.session_state.show_results = False |
|
|
|
|
|
|
|
|
st.sidebar.title("TechPrep Navigation") |
|
|
nav_option = st.sidebar.radio("Choose an option:", |
|
|
["Generate Questions", "Mock Interview", "Track Progress", "Connect with Resources"]) |
|
|
|
|
|
|
|
|
if nav_option == "Generate Questions": |
|
|
st.header("📝 Generate Interview Questions") |
|
|
|
|
|
model_choice = st.selectbox("Choose Model:", ["OpenAI", "Gemini", "Groq"]) |
|
|
role = st.selectbox("Role:", ["GenAI", "ML", "DevOps", "Software Engineer", "Data Scientist", "Product Manager", "Designer", "Business Analyst"]) |
|
|
question_type = st.selectbox("Question Type:", ["Behavioral", "Technical", "Situational", "Case Study", "Problem Solving"]) |
|
|
num_questions = st.number_input("Number of Questions:", min_value=1, max_value=20, value=5) |
|
|
difficulty = st.selectbox("Difficulty Level:", ["Basic", "Medium", "Complex"]) |
|
|
|
|
|
if st.button("Generate Questions", key="generate_questions"): |
|
|
with st.spinner("Generating questions..."): |
|
|
questions = generate_questions(model_choice, role, question_type, num_questions, difficulty) |
|
|
st.session_state.questions = questions |
|
|
st.session_state.answers = ["" for _ in questions] |
|
|
st.session_state.feedback = ["" for _ in questions] |
|
|
st.session_state.question_index = 0 |
|
|
st.session_state.show_results = False |
|
|
progress_data["questions_solved"][question_type] += num_questions |
|
|
|
|
|
|
|
|
if st.session_state.questions: |
|
|
question_list = st.session_state.questions |
|
|
index = st.session_state.question_index |
|
|
|
|
|
if index < len(question_list): |
|
|
st.write(f"**Question {index + 1}:** {question_list[index]}") |
|
|
|
|
|
|
|
|
answer = st.text_area("Your Answer", key="text_area_answer") |
|
|
|
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
if index > 0: |
|
|
if st.button("Previous"): |
|
|
st.session_state.question_index -= 1 |
|
|
|
|
|
with col2: |
|
|
if index < len(question_list) - 1: |
|
|
if st.button("Next"): |
|
|
st.session_state.question_index += 1 |
|
|
|
|
|
|
|
|
if st.button("Submit Answer"): |
|
|
if not answer: |
|
|
st.error("Please enter an answer to receive feedback.") |
|
|
else: |
|
|
with st.spinner("Providing feedback..."): |
|
|
feedback = provide_feedback(model_choice, answer) |
|
|
st.session_state.answers[index] = answer |
|
|
st.session_state.feedback[index] = feedback |
|
|
st.markdown(style_output("Feedback Received:", "#FF5722"), unsafe_allow_html=True) |
|
|
st.write(feedback) |
|
|
progress_data["feedback_provided"] += 1 |
|
|
|
|
|
|
|
|
if index == len(question_list) - 1: |
|
|
st.session_state.show_results = True |
|
|
|
|
|
if st.session_state.show_results: |
|
|
st.write("### Your Results") |
|
|
total_questions = len(st.session_state.questions) |
|
|
answered_questions = sum(1 for ans in st.session_state.answers if ans) |
|
|
score = (answered_questions / total_questions) * 100 |
|
|
st.write(f"**Score:** {score:.2f}%") |
|
|
|
|
|
|
|
|
st.write("### Feedback Summary") |
|
|
for i, (q, ans, fb) in enumerate(zip(st.session_state.questions, st.session_state.answers, st.session_state.feedback)): |
|
|
st.write(f"**Question {i + 1}:** {q}") |
|
|
st.write(f"**Your Answer:** {ans}") |
|
|
st.write(f"**Feedback:** {fb}") |
|
|
|
|
|
tips = get_tips(model_choice, role) |
|
|
st.write("### Tips to Improve") |
|
|
st.write(tips) |
|
|
progress_data["tips_retrieved"] += 1 |
|
|
|
|
|
elif nav_option == "Mock Interview": |
|
|
st.header("🎥 Mock Interview") |
|
|
schedule_mock_interview() |
|
|
|
|
|
elif nav_option == "Track Progress": |
|
|
track_progress() |
|
|
|
|
|
elif nav_option == "Connect with Resources": |
|
|
connect_resources() |
|
|
|
|
|
|
|
|
st.markdown(""" |
|
|
<div class="footer"> |
|
|
<p>© 2024 TechPrep. All rights reserved.</p> |
|
|
</div> |
|
|
""", unsafe_allow_html=True) |