Spaces:
Sleeping
Sleeping
File size: 1,398 Bytes
13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 13a270c 7dff9c2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import streamlit as st
from utils.constants import metadata_path, embeddings_path
from question_handler import find_top_question, generate_detailed_prompt
from code_executor import execute_code
from utils.openai_client import generate_response
# Load metadata and embeddings
metadata = pd.read_csv(metadata_path)
embeddings = np.load(embeddings_path)
# Streamlit UI components (e.g., sidebar, chat interface)
st.title("Real-World Programming Question Mock Interview")
# Sidebar form for generating questions
with st.sidebar.form(key="input_form"):
company = st.text_input("Company", value="Google")
difficulty = st.selectbox("Difficulty", ["Easy", "Medium", "Hard"], index=1)
topic = st.text_input("Topic", value="Binary Search")
generate_button = st.form_submit_button(label="Generate")
if generate_button:
query = f"{company} {difficulty} {topic}"
top_question = find_top_question(query, metadata, embeddings)
detailed_prompt = generate_detailed_prompt(top_question)
response = generate_response(detailed_prompt)
st.session_state.generated_question = response
# Code execution section in the sidebar
st.sidebar.markdown("## Python Code Interpreter")
code_input = st.sidebar.text_area("Write your Python code here:", height=300)
if st.sidebar.button("Run Code"):
execute_code(code_input)
# Display generated questions and follow-up chat logic here...
|