cryogenic22 commited on
Commit
a35c160
·
verified ·
1 Parent(s): 0b9345a

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +30 -31
backend.py CHANGED
@@ -209,45 +209,44 @@ def get_embeddings_model():
209
 
210
 
211
  @st.cache_resource
212
- def initialize_qa_system(_vector_store):
 
213
  try:
214
  llm = ChatOpenAI(
215
- temperature=0,
216
- model_name="gpt-4", # Or another OpenAI model like "gpt-3.5-turbo"
217
- api_key=os.environ.get("OPENAI_API_KEY"),
218
  )
219
 
220
- # Define the prompt template (ADD agent_scratchpad)
221
- prompt = ChatPromptTemplate.from_messages(
222
- [
223
- ("system", "You are a helpful assistant"),
224
- MessagesPlaceholder(variable_name="chat_history"),
225
- ("human", "{input}"),
226
- MessagesPlaceholder(variable_name="agent_scratchpad"),
227
- ]
 
 
 
 
228
  )
229
 
230
- # Define the tools
231
- tools = [
232
- Tool(
233
- name="Search",
234
- func=_vector_store.as_retriever(
235
- search_kwargs={"k": 2}
236
- ).get_relevant_documents,
237
- description="useful for when you need to answer questions about the documents you have been uploaded. Input should be a fully formed question.",
238
- )
239
- ]
240
-
241
- # Create the agent and executor
242
- agent = create_openai_tools_agent(llm=llm, tools=tools, prompt=prompt)
243
- agent_executor = AgentExecutor(
244
- agent=agent,
245
- tools=tools,
246
- verbose=True,
247
- memory=ConversationBufferMemory(memory_key="chat_history"),
248
  )
249
 
250
- return agent_executor # Return the agent executor
 
251
  except Exception as e:
252
  st.error(f"Error initializing QA system: {e}")
253
  return None
 
209
 
210
 
211
  @st.cache_resource
212
+ def initialize_qa_system(vector_store):
213
+ """Initialize QA system with proper chat handling."""
214
  try:
215
  llm = ChatOpenAI(
216
+ temperature=0.5,
217
+ model_name="gpt-4",
218
+ api_key=os.environ.get("OPENAI_API_KEY")
219
  )
220
 
221
+ # Create a more basic prompt template
222
+ prompt = ChatPromptTemplate.from_messages([
223
+ ("system", """You are an expert consultant specializing in analyzing Request for Proposal (RFP) documents.
224
+ Your goal is to provide clear, accurate responses based on the provided context.
225
+ Start with a direct answer and organize additional details under relevant headers."""),
226
+ ("human", "{input}")
227
+ ])
228
+
229
+ # Create the retriever chain
230
+ retriever = vector_store.as_retriever(
231
+ search_type="similarity",
232
+ search_kwargs={"k": 3}
233
  )
234
 
235
+ chain = (
236
+ {
237
+ "input": RunnablePassthrough()
238
+ }
239
+ | {"input": lambda x: x["input"], "docs": retriever}
240
+ | {
241
+ "input": lambda x: x["input"],
242
+ "context": lambda x: "\n\n".join([doc.page_content for doc in x["docs"]])
243
+ }
244
+ | prompt
245
+ | llm
 
 
 
 
 
 
 
246
  )
247
 
248
+ return chain
249
+
250
  except Exception as e:
251
  st.error(f"Error initializing QA system: {e}")
252
  return None