Spaces:
Paused
Paused
Update utils/database.py
Browse files- utils/database.py +14 -3
utils/database.py
CHANGED
|
@@ -93,23 +93,35 @@ def insert_document(conn, doc_name, doc_content):
|
|
| 93 |
def initialize_qa_system(vector_store):
|
| 94 |
"""Initialize QA system with proper chat handling"""
|
| 95 |
try:
|
|
|
|
|
|
|
|
|
|
| 96 |
llm = ChatOpenAI(
|
| 97 |
temperature=0,
|
| 98 |
model_name="gpt-4",
|
| 99 |
api_key=os.environ.get("OPENAI_API_KEY"),
|
| 100 |
)
|
| 101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
memory = ConversationBufferMemory(
|
| 103 |
memory_key="chat_history",
|
| 104 |
-
return_messages=True
|
| 105 |
)
|
| 106 |
|
| 107 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
| 108 |
llm=llm,
|
| 109 |
retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
|
| 110 |
memory=memory,
|
|
|
|
| 111 |
return_source_documents=True,
|
| 112 |
-
verbose=True
|
| 113 |
)
|
| 114 |
|
| 115 |
return qa_chain
|
|
@@ -118,7 +130,6 @@ def initialize_qa_system(vector_store):
|
|
| 118 |
st.error(f"Error initializing QA system: {e}")
|
| 119 |
return None
|
| 120 |
|
| 121 |
-
|
| 122 |
def initialize_faiss(embeddings, documents, document_names):
|
| 123 |
"""Initialize FAISS vector store"""
|
| 124 |
try:
|
|
|
|
| 93 |
def initialize_qa_system(vector_store):
|
| 94 |
"""Initialize QA system with proper chat handling"""
|
| 95 |
try:
|
| 96 |
+
from langchain.prompts import ChatPromptTemplate
|
| 97 |
+
from langchain.prompts import MessagesPlaceholder
|
| 98 |
+
|
| 99 |
llm = ChatOpenAI(
|
| 100 |
temperature=0,
|
| 101 |
model_name="gpt-4",
|
| 102 |
api_key=os.environ.get("OPENAI_API_KEY"),
|
| 103 |
)
|
| 104 |
|
| 105 |
+
# Create prompt template
|
| 106 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 107 |
+
("system", "You are a helpful assistant analyzing RFP documents."),
|
| 108 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
| 109 |
+
("human", "{input}"),
|
| 110 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
| 111 |
+
])
|
| 112 |
+
|
| 113 |
memory = ConversationBufferMemory(
|
| 114 |
memory_key="chat_history",
|
| 115 |
+
return_messages=True
|
| 116 |
)
|
| 117 |
|
| 118 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
| 119 |
llm=llm,
|
| 120 |
retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
|
| 121 |
memory=memory,
|
| 122 |
+
combine_docs_chain_kwargs={"prompt": prompt},
|
| 123 |
return_source_documents=True,
|
| 124 |
+
verbose=True
|
| 125 |
)
|
| 126 |
|
| 127 |
return qa_chain
|
|
|
|
| 130 |
st.error(f"Error initializing QA system: {e}")
|
| 131 |
return None
|
| 132 |
|
|
|
|
| 133 |
def initialize_faiss(embeddings, documents, document_names):
|
| 134 |
"""Initialize FAISS vector store"""
|
| 135 |
try:
|