Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -41,7 +41,8 @@ from langchain.document_loaders import UnstructuredFileLoader, TextLoader
|
|
| 41 |
from langchain import PromptTemplate
|
| 42 |
|
| 43 |
from langchain.chains import RetrievalQA
|
| 44 |
-
from langchain.memory import ConversationBufferWindowMemory
|
|
|
|
| 45 |
|
| 46 |
from transformers import LlamaTokenizer, AutoTokenizer
|
| 47 |
|
|
@@ -119,7 +120,7 @@ def getLLMModel(LLMID):
|
|
| 119 |
model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
|
| 120 |
print("Mistral AI LLM Selected")
|
| 121 |
else:
|
| 122 |
-
llm = OpenAI(model_name="gpt-3.5-turbo-
|
| 123 |
print("Open AI LLM Selected")
|
| 124 |
return llm
|
| 125 |
|
|
@@ -208,7 +209,8 @@ def getRAGChain(customerName, customerDistrict, custDetailsPresent, vectordb,llm
|
|
| 208 |
# Retrieve conversation history if available
|
| 209 |
#memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question")
|
| 210 |
global memory
|
| 211 |
-
memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
|
|
|
|
| 212 |
|
| 213 |
# chain = RetrievalQA.from_chain_type(
|
| 214 |
# llm=getLLMModel(llmID),
|
|
@@ -229,6 +231,7 @@ def getRAGChain(customerName, customerDistrict, custDetailsPresent, vectordb,llm
|
|
| 229 |
llm=getLLMModel(llmID),
|
| 230 |
chain_type='stuff',
|
| 231 |
retriever=getRetriever(vectordb),
|
|
|
|
| 232 |
#retriever=vectordb.as_retriever(),
|
| 233 |
verbose=False,
|
| 234 |
chain_type_kwargs={
|
|
|
|
| 41 |
from langchain import PromptTemplate
|
| 42 |
|
| 43 |
from langchain.chains import RetrievalQA
|
| 44 |
+
#from langchain.memory import ConversationBufferWindowMemory
|
| 45 |
+
from langchain.memory import ConversationBufferMemory
|
| 46 |
|
| 47 |
from transformers import LlamaTokenizer, AutoTokenizer
|
| 48 |
|
|
|
|
| 120 |
model_kwargs={"temperature": 0.2,"max_new_tokens":2500})
|
| 121 |
print("Mistral AI LLM Selected")
|
| 122 |
else:
|
| 123 |
+
llm = OpenAI(model_name="gpt-3.5-turbo-0125",temperature=0.0)
|
| 124 |
print("Open AI LLM Selected")
|
| 125 |
return llm
|
| 126 |
|
|
|
|
| 209 |
# Retrieve conversation history if available
|
| 210 |
#memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question")
|
| 211 |
global memory
|
| 212 |
+
#memory = ConversationBufferWindowMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
|
| 213 |
+
memory = ConversationBufferMemory(k=3, memory_key="history", input_key="question", initial_memory=conversation_history)
|
| 214 |
|
| 215 |
# chain = RetrievalQA.from_chain_type(
|
| 216 |
# llm=getLLMModel(llmID),
|
|
|
|
| 231 |
llm=getLLMModel(llmID),
|
| 232 |
chain_type='stuff',
|
| 233 |
retriever=getRetriever(vectordb),
|
| 234 |
+
memory=memory,
|
| 235 |
#retriever=vectordb.as_retriever(),
|
| 236 |
verbose=False,
|
| 237 |
chain_type_kwargs={
|