bsmith3715 commited on
Commit
b3a8976
·
verified ·
1 Parent(s): ac85da0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -15
app.py CHANGED
@@ -121,18 +121,26 @@ async def handle_message(message: cl.Message):
121
  await cl.Message(content=f"⚠️ Error generating image: {str(e)}").send()
122
 
123
  else:
124
- # Handle regular QA queries with streaming
125
- chain = cl.user_session.get("qa_chain")
126
- if chain:
127
- try:
128
- # Create a message placeholder
129
- msg = cl.Message(content="")
130
- await msg.send()
131
-
132
- # Stream the response
133
- async for chunk in chain.astream({"query": message.content}):
134
- if "result" in chunk:
135
- await msg.stream_token(chunk["result"])
136
-
137
- except Exception as e:
138
- await cl.Message(content=f"⚠️ Error: {str(e)}").send()
 
 
 
 
 
 
 
 
 
121
  await cl.Message(content=f"⚠️ Error generating image: {str(e)}").send()
122
 
123
  else:
124
+ retriever = cl.user_session.get("qa_chain").retriever
125
+ prompt_template = rag_prompt
126
+ llm = cl.user_session.get("qa_chain").llm
127
+
128
+ try:
129
+ # Create a message placeholder
130
+ msg = cl.Message(content="")
131
+ await msg.send()
132
+
133
+ # Step 1: Retrieve relevant documents
134
+ docs = retriever.get_relevant_documents(message.content)
135
+ context = "\n\n".join([doc.page_content for doc in docs])
136
+
137
+ # Step 2: Format prompt
138
+ formatted_prompt = prompt_template.format(context=context, question=message.content)
139
+
140
+ # Step 3: Stream the response directly from the LLM
141
+ async for chunk in llm.astream(formatted_prompt):
142
+ if hasattr(chunk, "content") and chunk.content:
143
+ await msg.stream_token(chunk.content)
144
+
145
+ except Exception as e:
146
+ await cl.Message(content=f"⚠️ Error: {str(e)}").send()