zoya-hammad commited on
Commit
fd1d333
·
1 Parent(s): 720da31

Updated app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -12,6 +12,7 @@ from langchain.text_splitter import CharacterTextSplitter
12
  from langchain.schema import Document
13
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
14
  from langchain_chroma import Chroma
 
15
  from langchain.memory import ConversationBufferMemory
16
  from langchain.chains import ConversationalRetrievalChain
17
  from langchain_ollama import ChatOllama
@@ -82,7 +83,7 @@ def process_files(files):
82
  # HF Pipeline
83
  hf_pipeline = pipeline("document-question-answering", model="impira/layoutlm-document-qa")
84
 
85
- llm = HuggingFacePipeline(pipeline=hf_pipeline)
86
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
87
  retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
88
  global conversation_chain
 
12
  from langchain.schema import Document
13
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
14
  from langchain_chroma import Chroma
15
+ from langchain.chat_models import ChatHuggingFace
16
  from langchain.memory import ConversationBufferMemory
17
  from langchain.chains import ConversationalRetrievalChain
18
  from langchain_ollama import ChatOllama
 
83
  # HF Pipeline
84
  hf_pipeline = pipeline("document-question-answering", model="impira/layoutlm-document-qa")
85
 
86
+ llm = ChatHuggingFace(model_name="HuggingFaceH4/zephyr-7b-alpha")
87
  memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
88
  retriever = vectorstore.as_retriever(search_kwargs={"k": 10})
89
  global conversation_chain