cryogenic22 commited on
Commit
8a72544
·
verified ·
1 Parent(s): 6891ce1

Update utils/database.py

Browse files
Files changed (1) hide show
  1. utils/database.py +19 -21
utils/database.py CHANGED
@@ -3,13 +3,14 @@ import streamlit as st
3
  import sqlite3
4
  from sqlite3 import Error
5
  from datetime import datetime
6
- from langchain.chains.conversational_retrieval.base import (
7
- ConversationalRetrievalChain,
8
- )
9
- from langchain.memory import ConversationBufferMemory
 
 
10
  from langchain.chat_models import ChatOpenAI
11
  import os
12
-
13
 
14
  def create_connection(db_file):
15
  try:
@@ -90,38 +91,35 @@ def insert_document(conn, doc_name, doc_content):
90
  return False
91
 
92
 
 
 
 
 
 
 
 
93
  def initialize_qa_system(vector_store):
94
  """Initialize QA system with proper chat handling"""
95
  try:
96
- from langchain.prompts import ChatPromptTemplate
97
- from langchain.prompts import MessagesPlaceholder
98
-
99
  llm = ChatOpenAI(
100
  temperature=0,
101
  model_name="gpt-4",
102
  api_key=os.environ.get("OPENAI_API_KEY"),
103
  )
104
 
105
- # Create prompt template
106
- prompt = ChatPromptTemplate.from_messages([
107
- ("system", "You are a helpful assistant analyzing RFP documents."),
108
- MessagesPlaceholder(variable_name="chat_history"),
109
- ("human", "{input}"),
110
- MessagesPlaceholder(variable_name="agent_scratchpad"),
111
- ])
112
-
113
- memory = ConversationBufferMemory(
114
  memory_key="chat_history",
115
- return_messages=True
 
116
  )
117
 
118
  qa_chain = ConversationalRetrievalChain.from_llm(
119
  llm=llm,
120
  retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
121
  memory=memory,
122
- combine_docs_chain_kwargs={"prompt": prompt},
123
- return_source_documents=True,
124
- verbose=True
125
  )
126
 
127
  return qa_chain
 
3
  import sqlite3
4
  from sqlite3 import Error
5
  from datetime import datetime
6
+ #from langchain.memory import ConversationBufferMemory
7
+ from langchain.chat_models import ChatOpenAI
8
+ import os
9
+ from langchain.memory import ConversationBufferWindowMemory
10
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
11
+ from langchain.chains import ConversationalRetrievalChain
12
  from langchain.chat_models import ChatOpenAI
13
  import os
 
14
 
15
  def create_connection(db_file):
16
  try:
 
91
  return False
92
 
93
 
94
+ # utils/database.py
95
+ from langchain.memory import ConversationBufferWindowMemory
96
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
97
+ from langchain.chains import ConversationalRetrievalChain
98
+ from langchain.chat_models import ChatOpenAI
99
+ import os
100
+
101
  def initialize_qa_system(vector_store):
102
  """Initialize QA system with proper chat handling"""
103
  try:
 
 
 
104
  llm = ChatOpenAI(
105
  temperature=0,
106
  model_name="gpt-4",
107
  api_key=os.environ.get("OPENAI_API_KEY"),
108
  )
109
 
110
+ # Initialize memory with proper configuration
111
+ memory = ConversationBufferWindowMemory(
 
 
 
 
 
 
 
112
  memory_key="chat_history",
113
+ return_messages=True,
114
+ k=5 # Keep last 5 interactions
115
  )
116
 
117
  qa_chain = ConversationalRetrievalChain.from_llm(
118
  llm=llm,
119
  retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
120
  memory=memory,
121
+ verbose=True,
122
+ return_source_documents=True
 
123
  )
124
 
125
  return qa_chain