omarkashif commited on
Commit
113896e
·
verified ·
1 Parent(s): fbb0072

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +14 -14
src/streamlit_app.py CHANGED
@@ -62,10 +62,16 @@ def retrieve_documents(query, top_k=10):
62
 
63
  def generate_response(user_query, docs):
64
  context = "\n\n---\n\n".join(d['metadata']['text'] for d in docs)
65
- messages = [{"role": "system", "content":
66
- "You are a helpful legal assistant. Use provided context from documents. Answer only using the context."}]
 
 
 
 
 
67
  messages.extend(st.session_state.history)
68
  messages.append({"role": "user", "content": f"Context:\n{context}\n\nQuestion:\n{user_query}"})
 
69
  try:
70
  resp = client.chat.completions.create(
71
  model="gpt-4o-mini",
@@ -78,23 +84,17 @@ def generate_response(user_query, docs):
78
  st.error(f"Response error: {e}")
79
  reply = "Sorry, I encountered an error generating the answer."
80
 
81
-
82
- # unique_sources = sorted({d['metadata']['source'] for d in docs})
83
- # if unique_sources:
84
- # reply += "\n\n---\n\n**Sources used:**\n"
85
- # for src in unique_sources:
86
- # reply += f"> “{src}”\n"
87
-
88
- # unique_sources_txt = sorted({d['metadata']['text'] for d in docs})
89
- # if unique_sources_txt:
90
- # reply += "\n\n---\n\n**Document(s) retrieved:**\n"
91
- # for src in unique_sources_txt:
92
- # reply += f"> “{src}”\n"
93
 
94
  st.session_state.history.append({"role": "assistant", "content": reply})
95
  return reply
96
 
97
 
 
98
  # Chat UI
99
  with st.form("chat_input", clear_on_submit=True):
100
  user_input = st.text_input("You:", "")
 
62
 
63
  def generate_response(user_query, docs):
64
  context = "\n\n---\n\n".join(d['metadata']['text'] for d in docs)
65
+ sources = sorted({d['metadata']['source'] for d in docs if 'source' in d['metadata']})
66
+ messages = [
67
+ {"role": "system", "content":
68
+ "You are a helpful legal assistant. Use the provided context from the documents to answer the user's question. "
69
+ "At the end of your answer, write a single line starting with 'Source: ' and list the sources of the documents you used. "
70
+ "If multiple sources are used, separate them with commas. The user should be able to clearly understand where the information came from."}
71
+ ]
72
  messages.extend(st.session_state.history)
73
  messages.append({"role": "user", "content": f"Context:\n{context}\n\nQuestion:\n{user_query}"})
74
+
75
  try:
76
  resp = client.chat.completions.create(
77
  model="gpt-4o-mini",
 
84
  st.error(f"Response error: {e}")
85
  reply = "Sorry, I encountered an error generating the answer."
86
 
87
+ # Optional: force clean source line if LLM misses it
88
+ if sources:
89
+ clean_sources = ", ".join(sources)
90
+ if "Source:" not in reply:
91
+ reply += f"\n\nSource: {clean_sources}"
 
 
 
 
 
 
 
92
 
93
  st.session_state.history.append({"role": "assistant", "content": reply})
94
  return reply
95
 
96
 
97
+
98
  # Chat UI
99
  with st.form("chat_input", clear_on_submit=True):
100
  user_input = st.text_input("You:", "")