nsgupta1 commited on
Commit
16930b2
·
verified ·
1 Parent(s): d6b72d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -14
app.py CHANGED
@@ -1,36 +1,110 @@
1
  import streamlit as st
2
- from utils.constants import metadata_path, embeddings_path
3
- from question_handler import find_top_question, generate_detailed_prompt
4
- from code_executor import execute_code
5
- from utils.openai_client import generate_response
6
  import pandas as pd
7
  import numpy as np
 
 
 
 
 
8
 
9
- # Load metadata and embeddings
10
  metadata = pd.read_csv(metadata_path)
11
  embeddings = np.load(embeddings_path)
12
 
13
- # Streamlit UI components (e.g., sidebar, chat interface)
 
 
 
 
14
  st.title("Real-World Programming Question Mock Interview")
15
 
16
  # Sidebar form for generating questions
17
  with st.sidebar.form(key="input_form"):
18
- company = st.text_input("Company", value="Google")
19
- difficulty = st.selectbox("Difficulty", ["Easy", "Medium", "Hard"], index=1)
20
- topic = st.text_input("Topic", value="Binary Search")
 
21
  generate_button = st.form_submit_button(label="Generate")
22
 
23
  if generate_button:
 
 
 
 
 
24
  query = f"{company} {difficulty} {topic}"
25
- top_question = find_top_question(query, metadata, embeddings)
 
 
26
  detailed_prompt = generate_detailed_prompt(top_question)
27
- response = generate_response(detailed_prompt)
 
 
 
 
28
  st.session_state.generated_question = response
29
 
30
- # Code execution section in the sidebar
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  st.sidebar.markdown("## Python Code Interpreter")
32
- code_input = st.sidebar.text_area("Write your Python code here:", height=300)
 
 
 
 
 
 
33
  if st.sidebar.button("Run Code"):
34
  execute_code(code_input)
35
 
36
- # Display generated questions and follow-up chat logic here...
 
 
 
 
1
  import streamlit as st
 
 
 
 
2
  import pandas as pd
3
  import numpy as np
4
+ from utils.constants import metadata_path, embeddings_path
5
+ from utils.embeddings_utils import load_model
6
+ from question_handler import find_top_question, generate_detailed_prompt, extract_code_and_test_case
7
+ from utils.openai_client import generate_response
8
+ from code_executor import execute_code
9
 
10
+ # Load metadata and embeddings once at app startup
11
  metadata = pd.read_csv(metadata_path)
12
  embeddings = np.load(embeddings_path)
13
 
14
+ # Load the SentenceTransformer model once at app startup
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ model = load_model(device)
17
+
18
+ # Streamlit UI components
19
  st.title("Real-World Programming Question Mock Interview")
20
 
21
  # Sidebar form for generating questions
22
  with st.sidebar.form(key="input_form"):
23
+ st.markdown("## Generate a New Question")
24
+ company = st.text_input("Company", value="Google") # Default value: Google
25
+ difficulty = st.selectbox("Difficulty", ["Easy", "Medium", "Hard"], index=1) # Default: Medium
26
+ topic = st.text_input("Topic", value="Binary Search") # Default: Binary Search
27
  generate_button = st.form_submit_button(label="Generate")
28
 
29
  if generate_button:
30
+ # Clear session state and start fresh with follow-up mode disabled
31
+ st.session_state.messages = []
32
+ st.session_state.follow_up_mode = False
33
+
34
+ # Create a query from user inputs and find the most relevant question
35
  query = f"{company} {difficulty} {topic}"
36
+ top_question = find_top_question(query, metadata, embeddings, model)
37
+
38
+ # Prepare a detailed prompt for GPT using the top question's details
39
  detailed_prompt = generate_detailed_prompt(top_question)
40
+
41
+ # Generate response using OpenAI API with detailed prompt and debugging logs
42
+ response = generate_response([{"role": "user", "content": detailed_prompt}])
43
+
44
+ # Store generated question in session state for persistence in sidebar and follow-up conversation state
45
  st.session_state.generated_question = response
46
 
47
+ # Extract code template and sample test case
48
+ code_template, sample_test_case, expected_output = extract_code_and_test_case(response)
49
+ st.session_state.code_template = code_template
50
+ st.session_state.sample_test_case = sample_test_case
51
+ st.session_state.expected_output = expected_output
52
+
53
+ # Enable follow-up mode after generating the initial question
54
+ st.session_state.follow_up_mode = True
55
+
56
+ # Display chat messages from history on app rerun (for subsequent conversation)
57
+ for message in st.session_state.messages:
58
+ with st.chat_message(message["role"]):
59
+ st.markdown(message["content"])
60
+
61
+ # Chatbox for subsequent conversations with assistant (follow-up mode)
62
+ if st.session_state.follow_up_mode:
63
+ if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
64
+ # Display user message in chat message container and add to session history
65
+ with st.chat_message("user"):
66
+ st.markdown(user_input)
67
+
68
+ st.session_state.messages.append({"role": "user", "content": user_input})
69
+
70
+ # Prepare messages to send to the assistant
71
+ assistant_instruction = (
72
+ "As a real-world interviewer, please reply to the candidate's follow-up questions "
73
+ "specific to the generated interview question, to the point, and in a natural, human-sounding way."
74
+ )
75
+
76
+ messages_to_send = [
77
+ {"role": "user", "content": assistant_instruction},
78
+ {"role": "assistant", "content": st.session_state.generated_question}
79
+ ] + st.session_state.messages
80
+
81
+ assistant_response = generate_response(messages_to_send)
82
+
83
+ with st.chat_message("assistant"):
84
+ st.markdown(assistant_response)
85
+
86
+ st.session_state.messages.append({"role": "assistant", "content": assistant_response})
87
+
88
+ st.sidebar.markdown("---")
89
+ st.sidebar.markdown("## Generated Question")
90
+ if st.session_state.generated_question:
91
+ st.sidebar.markdown(st.session_state.generated_question)
92
+ else:
93
+ st.sidebar.markdown("_No question generated yet._")
94
+
95
+ st.sidebar.markdown("---")
96
  st.sidebar.markdown("## Python Code Interpreter")
97
+
98
+ # Pre-fill code interpreter with code template after question generation
99
+ if st.session_state.code_template:
100
+ code_input = st.sidebar.text_area("Write your Python code here:", value=st.session_state.code_template, height=300)
101
+ else:
102
+ code_input = st.sidebar.text_area("Write your Python code here:", height=300)
103
+
104
  if st.sidebar.button("Run Code"):
105
  execute_code(code_input)
106
 
107
+ # Right sidebar toggleable debug logs section
108
+ with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
109
+ if "debug_logs" in st.session_state:
110
+ st.write(st.session_state.debug_logs)