Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,20 +1,120 @@
|
|
| 1 |
import streamlit as st
|
|
|
|
|
|
|
| 2 |
import pandas as pd
|
| 3 |
import numpy as np
|
| 4 |
-
from
|
| 5 |
-
from
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
# Load metadata and embeddings once at app startup
|
| 10 |
metadata = pd.read_csv(metadata_path)
|
| 11 |
embeddings = np.load(embeddings_path)
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
|
| 15 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
with st.sidebar.form(key="input_form"):
|
| 19 |
st.markdown("## Generate a New Question")
|
| 20 |
company = st.text_input("Company", value="Google") # Default value: Google
|
|
@@ -29,14 +129,26 @@ if generate_button:
|
|
| 29 |
|
| 30 |
# Create a query from user inputs and find the most relevant question
|
| 31 |
query = f"{company} {difficulty} {topic}"
|
| 32 |
-
top_question = find_top_question(query
|
| 33 |
|
| 34 |
# Prepare a detailed prompt for GPT using the top question's details
|
| 35 |
-
detailed_prompt =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
# Generate response using OpenAI API with detailed prompt and debugging logs
|
| 38 |
-
response = generate_response([{"role": "user", "content": detailed_prompt}])
|
| 39 |
-
|
| 40 |
# Store generated question in session state for persistence in sidebar and follow-up conversation state
|
| 41 |
st.session_state.generated_question = response
|
| 42 |
|
|
@@ -64,14 +176,17 @@ if st.session_state.follow_up_mode:
|
|
| 64 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
| 65 |
|
| 66 |
# Prepare messages to send to the assistant
|
|
|
|
|
|
|
| 67 |
assistant_instruction = (
|
| 68 |
"As a real-world interviewer, please reply to the candidate's follow-up questions "
|
| 69 |
"specific to the generated interview question, to the point, and in a natural, human-sounding way."
|
| 70 |
)
|
| 71 |
|
| 72 |
messages_to_send = [
|
| 73 |
-
{"role": "user", "content":
|
| 74 |
-
{"role": "assistant", "content": st.session_state.generated_question}
|
|
|
|
| 75 |
] + st.session_state.messages
|
| 76 |
|
| 77 |
assistant_response = generate_response(messages_to_send)
|
|
@@ -98,9 +213,41 @@ else:
|
|
| 98 |
code_input = st.sidebar.text_area("Write your Python code here:", height=300)
|
| 99 |
|
| 100 |
if st.sidebar.button("Run Code"):
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
|
| 105 |
-
if
|
| 106 |
-
st.write(st.session_state.debug_logs)
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from openai import OpenAI
|
| 3 |
+
import os
|
| 4 |
import pandas as pd
|
| 5 |
import numpy as np
|
| 6 |
+
from sentence_transformers import SentenceTransformer
|
| 7 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 8 |
+
import torch
|
| 9 |
+
import re
|
| 10 |
+
|
| 11 |
+
# Set up OpenAI client
|
| 12 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
| 13 |
+
|
| 14 |
+
# Check if GPU is available
|
| 15 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 16 |
+
print(f"Using device: {device}")
|
| 17 |
+
|
| 18 |
+
# Load metadata and embeddings (ensure these files are in your working directory or update paths)
|
| 19 |
+
metadata_path = 'question_metadata.csv' # Update this path if needed
|
| 20 |
+
embeddings_path = 'question_dataset_embeddings.npy' # Update this path if needed
|
| 21 |
|
|
|
|
| 22 |
metadata = pd.read_csv(metadata_path)
|
| 23 |
embeddings = np.load(embeddings_path)
|
| 24 |
|
| 25 |
+
# Load the SentenceTransformer model
|
| 26 |
+
model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
|
| 27 |
+
|
| 28 |
+
# Load prompts from files
|
| 29 |
+
with open("technical_interviewer_prompt.txt", "r") as file:
|
| 30 |
+
technical_interviewer_prompt = file.read()
|
| 31 |
+
|
| 32 |
+
with open("question_generation_prompt.txt", "r") as file:
|
| 33 |
+
question_generation_prompt = file.read()
|
| 34 |
+
|
| 35 |
+
st.title("Mock Interview: Real-World Programming")
|
| 36 |
+
|
| 37 |
+
# Initialize session state variables
|
| 38 |
+
if "messages" not in st.session_state:
|
| 39 |
+
st.session_state.messages = []
|
| 40 |
|
| 41 |
+
if "follow_up_mode" not in st.session_state:
|
| 42 |
+
st.session_state.follow_up_mode = False # Tracks whether we're in follow-up mode
|
| 43 |
+
|
| 44 |
+
if "generated_question" not in st.session_state:
|
| 45 |
+
st.session_state.generated_question = None # Stores the generated question for persistence
|
| 46 |
+
|
| 47 |
+
if "code_template" not in st.session_state:
|
| 48 |
+
st.session_state.code_template = "" # Stores the code template
|
| 49 |
+
|
| 50 |
+
if "sample_test_case" not in st.session_state:
|
| 51 |
+
st.session_state.sample_test_case = "" # Stores the sample test case
|
| 52 |
+
|
| 53 |
+
if "expected_output" not in st.session_state:
|
| 54 |
+
st.session_state.expected_output = "" # Stores the expected output
|
| 55 |
+
|
| 56 |
+
if "debug_logs" not in st.session_state:
|
| 57 |
+
st.session_state.debug_logs = None # Stores debug logs for toggling
|
| 58 |
+
|
| 59 |
+
# Function to find the top 1 most similar question based on user input
|
| 60 |
+
def find_top_question(query):
|
| 61 |
+
# Generate embedding for the query
|
| 62 |
+
query_embedding = model.encode(query, convert_to_tensor=True, device=device).cpu().numpy()
|
| 63 |
+
|
| 64 |
+
# Reshape query_embedding to ensure it is a 2D array
|
| 65 |
+
query_embedding = query_embedding.reshape(1, -1) # Reshape to (1, n_features)
|
| 66 |
|
| 67 |
+
# Compute cosine similarity between query embedding and dataset embeddings
|
| 68 |
+
similarities = cosine_similarity(query_embedding, embeddings).flatten() # Flatten to get a 1D array of similarities
|
| 69 |
+
|
| 70 |
+
# Get the index of the most similar result (top 1)
|
| 71 |
+
top_index = similarities.argsort()[-1] # Index of highest similarity
|
| 72 |
+
|
| 73 |
+
# Retrieve metadata for the top result
|
| 74 |
+
top_result = metadata.iloc[top_index].copy()
|
| 75 |
+
top_result['similarity_score'] = similarities[top_index]
|
| 76 |
+
|
| 77 |
+
return top_result
|
| 78 |
+
|
| 79 |
+
# Function to generate response using OpenAI API with debugging logs
|
| 80 |
+
def generate_response(messages):
|
| 81 |
+
# For debug logs, store only the follow-up conversation history
|
| 82 |
+
st.session_state.debug_logs = st.session_state.messages # Update debug logs with current conversation
|
| 83 |
+
|
| 84 |
+
response = client.chat.completions.create(
|
| 85 |
+
model="o1-mini",
|
| 86 |
+
messages=messages,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
return response.choices[0].message.content
|
| 90 |
+
|
| 91 |
+
# Function to extract code template and sample test case from the generated question
|
| 92 |
+
def extract_code_and_test_case(generated_question):
|
| 93 |
+
code_template = ""
|
| 94 |
+
sample_test_case = ""
|
| 95 |
+
expected_output = ""
|
| 96 |
+
|
| 97 |
+
# Extract code template
|
| 98 |
+
code_match = re.search(r'```python(.*?)```', generated_question, re.DOTALL)
|
| 99 |
+
if code_match:
|
| 100 |
+
code_template = code_match.group(1).strip()
|
| 101 |
+
else:
|
| 102 |
+
# Default code template if none is found
|
| 103 |
+
code_template = "# Write your code here\n"
|
| 104 |
+
|
| 105 |
+
# Extract sample test case and expected output
|
| 106 |
+
test_case_match = re.search(r'Sample Input:\s*(.*?)\n', generated_question, re.DOTALL)
|
| 107 |
+
expected_output_match = re.search(r'Expected Output:\s*(.*?)\n', generated_question, re.DOTALL)
|
| 108 |
+
if test_case_match and expected_output_match:
|
| 109 |
+
sample_test_case = test_case_match.group(1).strip()
|
| 110 |
+
expected_output = expected_output_match.group(1).strip()
|
| 111 |
+
else:
|
| 112 |
+
sample_test_case = ""
|
| 113 |
+
expected_output = ""
|
| 114 |
+
|
| 115 |
+
return code_template, sample_test_case, expected_output
|
| 116 |
+
|
| 117 |
+
# Move the input form to the sidebar to make it always visible and more compact
|
| 118 |
with st.sidebar.form(key="input_form"):
|
| 119 |
st.markdown("## Generate a New Question")
|
| 120 |
company = st.text_input("Company", value="Google") # Default value: Google
|
|
|
|
| 129 |
|
| 130 |
# Create a query from user inputs and find the most relevant question
|
| 131 |
query = f"{company} {difficulty} {topic}"
|
| 132 |
+
top_question = find_top_question(query)
|
| 133 |
|
| 134 |
# Prepare a detailed prompt for GPT using the top question's details
|
| 135 |
+
detailed_prompt = (
|
| 136 |
+
f"Transform this LeetCode question into a real-world interview scenario.\n\n"
|
| 137 |
+
f"**Company**: {top_question['company']}\n"
|
| 138 |
+
f"**Question Name**: {top_question['questionName']}\n"
|
| 139 |
+
f"**Difficulty Level**: {top_question['difficulty level']}\n"
|
| 140 |
+
f"**Tags**: {top_question['Tags']}\n"
|
| 141 |
+
f"**Content**: {top_question['Content']}\n"
|
| 142 |
+
f"\nPlease create a real-world interview question based on this information. "
|
| 143 |
+
f"Include the following sections:\n\n"
|
| 144 |
+
f"- Problem Description\n"
|
| 145 |
+
f"- Code Template (in a Python code block)\n"
|
| 146 |
+
f"- Sample Input and Expected Output (clearly separated)\n"
|
| 147 |
+
)
|
| 148 |
|
| 149 |
# Generate response using OpenAI API with detailed prompt and debugging logs
|
| 150 |
+
response = generate_response([{"role": "user", "content": detailed_prompt}]) # Question generation prompt excluded here
|
| 151 |
+
|
| 152 |
# Store generated question in session state for persistence in sidebar and follow-up conversation state
|
| 153 |
st.session_state.generated_question = response
|
| 154 |
|
|
|
|
| 176 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
| 177 |
|
| 178 |
# Prepare messages to send to the assistant
|
| 179 |
+
# Include the technical interviewer prompt and generated question, but do not display them
|
| 180 |
+
# Add an instruction for the assistant to reply as a real-world interviewer would
|
| 181 |
assistant_instruction = (
|
| 182 |
"As a real-world interviewer, please reply to the candidate's follow-up questions "
|
| 183 |
"specific to the generated interview question, to the point, and in a natural, human-sounding way."
|
| 184 |
)
|
| 185 |
|
| 186 |
messages_to_send = [
|
| 187 |
+
{"role": "user", "content": technical_interviewer_prompt},
|
| 188 |
+
{"role": "assistant", "content": st.session_state.generated_question},
|
| 189 |
+
{"role": "user", "content": assistant_instruction}
|
| 190 |
] + st.session_state.messages
|
| 191 |
|
| 192 |
assistant_response = generate_response(messages_to_send)
|
|
|
|
| 213 |
code_input = st.sidebar.text_area("Write your Python code here:", height=300)
|
| 214 |
|
| 215 |
if st.sidebar.button("Run Code"):
|
| 216 |
+
try:
|
| 217 |
+
# Prepare the code for execution
|
| 218 |
+
exec_globals = {}
|
| 219 |
+
# Create a function wrapper to execute the user's code
|
| 220 |
+
exec(f"def user_solution():\n{code_input}", exec_globals)
|
| 221 |
+
user_solution = exec_globals.get('user_solution', None)
|
| 222 |
+
|
| 223 |
+
# Prepare sample test case execution
|
| 224 |
+
if st.session_state.sample_test_case:
|
| 225 |
+
# Assume the sample test case is in the format of arguments to the function
|
| 226 |
+
test_case = st.session_state.sample_test_case
|
| 227 |
+
# Evaluate the test case safely
|
| 228 |
+
test_args = eval(test_case)
|
| 229 |
+
if not isinstance(test_args, tuple):
|
| 230 |
+
test_args = (test_args,)
|
| 231 |
+
# Capture the output
|
| 232 |
+
returned_output = user_solution(*test_args)
|
| 233 |
+
else:
|
| 234 |
+
returned_output = user_solution()
|
| 235 |
+
|
| 236 |
+
# Display the expected output and returned output
|
| 237 |
+
st.sidebar.markdown("### Sample Test Case Result:")
|
| 238 |
+
st.sidebar.markdown(f"**Sample Input:** {st.session_state.sample_test_case}")
|
| 239 |
+
st.sidebar.markdown(f"**Expected Output:** {st.session_state.expected_output}")
|
| 240 |
+
st.sidebar.markdown(f"**Your Output:** {returned_output}")
|
| 241 |
+
|
| 242 |
+
# Compare outputs
|
| 243 |
+
if str(returned_output) == st.session_state.expected_output:
|
| 244 |
+
st.sidebar.success("Your output matches the expected output!")
|
| 245 |
+
else:
|
| 246 |
+
st.sidebar.error("Your output does not match the expected output.")
|
| 247 |
+
except Exception as e:
|
| 248 |
+
st.sidebar.error(f"Error: {e}")
|
| 249 |
+
|
| 250 |
+
# Right sidebar toggleable debug logs and code interpreter section
|
| 251 |
with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
|
| 252 |
+
if st.session_state.debug_logs:
|
| 253 |
+
st.write(st.session_state.debug_logs)
|