Seth0330 commited on
Commit
69aafdb
·
verified ·
1 Parent(s): a84926c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -143,7 +143,7 @@ class SQLiteVectorRetriever(BaseRetriever):
143
  def _get_relevant_documents(self, query, run_manager=None, **kwargs):
144
  return query_vector_db(query, self.top_k)
145
 
146
- # --- FINETUNED SYSTEM PROMPT FOR DIRECT ANSWERS ---
147
  system_prompt = (
148
  "You are a JSON data assistant. Always give a direct, concise answer based only on the context provided. "
149
  "If you do not see the answer in the context, reply: 'I don’t have that information.' "
@@ -152,10 +152,10 @@ system_prompt = (
152
 
153
  prompt = ChatPromptTemplate.from_messages([
154
  ("system", system_prompt),
155
- ("human", "{question}")
156
  ])
157
 
158
- llm = ChatOpenAI(model="gpt-4.1", openai_api_key=OPENAI_API_KEY, temperature=0)
159
 
160
  retriever = SQLiteVectorRetriever(top_k=5)
161
  qa_chain = RetrievalQA.from_chain_type(
@@ -176,7 +176,6 @@ for msg in st.session_state.messages:
176
  st.markdown(f"<details><summary><b>Function Output:</b></summary><pre>{msg['content']}</pre></details>", unsafe_allow_html=True)
177
 
178
  def show_json_links_and_modal():
179
- # Look for last function message (top results) and display view buttons
180
  for msg in reversed(st.session_state.messages):
181
  if msg.get("role") == "function" and msg.get("content"):
182
  try:
@@ -205,7 +204,7 @@ def send_message():
205
  return
206
  st.session_state.messages.append({"role": "user", "content": user_input})
207
  with st.spinner("Thinking..."):
208
- # Use the chain with { "question": ... } to match prompt format
209
  result = qa_chain({"question": user_input})
210
  answer = result['result']
211
  st.session_state.messages.append({"role": "assistant", "content": answer})
 
143
  def _get_relevant_documents(self, query, run_manager=None, **kwargs):
144
  return query_vector_db(query, self.top_k)
145
 
146
+ # --- SYSTEM PROMPT & CORRECT PROMPT FORMAT ---
147
  system_prompt = (
148
  "You are a JSON data assistant. Always give a direct, concise answer based only on the context provided. "
149
  "If you do not see the answer in the context, reply: 'I don’t have that information.' "
 
152
 
153
  prompt = ChatPromptTemplate.from_messages([
154
  ("system", system_prompt),
155
+ ("human", "Context:\n{context}\n\nQuestion: {question}")
156
  ])
157
 
158
+ llm = ChatOpenAI(model="gpt-4o", openai_api_key=OPENAI_API_KEY, temperature=0)
159
 
160
  retriever = SQLiteVectorRetriever(top_k=5)
161
  qa_chain = RetrievalQA.from_chain_type(
 
176
  st.markdown(f"<details><summary><b>Function Output:</b></summary><pre>{msg['content']}</pre></details>", unsafe_allow_html=True)
177
 
178
  def show_json_links_and_modal():
 
179
  for msg in reversed(st.session_state.messages):
180
  if msg.get("role") == "function" and msg.get("content"):
181
  try:
 
204
  return
205
  st.session_state.messages.append({"role": "user", "content": user_input})
206
  with st.spinner("Thinking..."):
207
+ # Use the chain with {"question": ...} to match prompt format
208
  result = qa_chain({"question": user_input})
209
  answer = result['result']
210
  st.session_state.messages.append({"role": "assistant", "content": answer})