Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,35 +10,43 @@ from langchain_community.vectorstores import Qdrant
|
|
| 10 |
from langchain.chains import RetrievalQA
|
| 11 |
from langchain.llms import HuggingFaceHub
|
| 12 |
hf_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
| 13 |
-
with open("brookline_data.txt", "r") as f:
|
| 14 |
-
data = f.read()
|
| 15 |
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
|
| 16 |
-
splitted_data = text_splitter.split_text(data)
|
| 17 |
-
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-mpnet-base-v2')
|
| 18 |
-
#retriever = Qdrant.as_retriever()
|
| 19 |
-
client = QdrantClient(":memory:")
|
| 20 |
-
qdrant_vectorstore = Qdrant(
|
| 21 |
-
client,
|
| 22 |
-
embeddings.embed_query,
|
| 23 |
-
collection_name="my_documents"
|
| 24 |
-
)
|
| 25 |
-
retriever = qdrant_vectorstore.as_retriever()
|
| 26 |
-
llm = HuggingFaceHub(repo_id="ahmadmac/Trained-T5-large", model_kwargs={"temperature": 0.5, "max_length": 512},huggingfacehub_api_token=hf_token)
|
| 27 |
-
qna = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
|
| 28 |
prompt_template = """ you are a highly knowledgeable AI assistant. Engage in a conversation with the user. Your main goal is to provide clear and informative answers to the user's questions.
|
| 29 |
User: {question}
|
| 30 |
Assistant:"""
|
| 31 |
prompt = PromptTemplate(template=prompt_template, input_variables=["question"])
|
| 32 |
chain = LLMChain(llm=llm, prompt=prompt)
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
| 38 |
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
demo = gr.ChatInterface(
|
| 43 |
fn=chatbot,
|
| 44 |
title="Chatbot",
|
|
|
|
| 10 |
from langchain.chains import RetrievalQA
|
| 11 |
from langchain.llms import HuggingFaceHub
|
| 12 |
hf_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
prompt_template = """ you are a highly knowledgeable AI assistant. Engage in a conversation with the user. Your main goal is to provide clear and informative answers to the user's questions.
|
| 14 |
User: {question}
|
| 15 |
Assistant:"""
|
| 16 |
prompt = PromptTemplate(template=prompt_template, input_variables=["question"])
|
| 17 |
chain = LLMChain(llm=llm, prompt=prompt)
|
| 18 |
|
| 19 |
+
# Load and split the document for the retrieval-based QA system
|
| 20 |
+
with open("brookline_data.txt", "r") as f:
|
| 21 |
+
data = f.read()
|
| 22 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
|
| 23 |
+
splitted_data = text_splitter.split_text(data)
|
| 24 |
|
| 25 |
+
# Generate embeddings for the split text
|
| 26 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-mpnet-base-v2')
|
| 27 |
+
metadatas = [{"source": f"source_{i}"} for i in range(len(splitted_data))]
|
| 28 |
+
documents = [Document(page_content=text, metadata=metadata) for text, metadata in zip(splitted_data, metadatas)]
|
| 29 |
|
| 30 |
+
# Initialize Qdrant vector store
|
| 31 |
+
qdrant = Qdrant.from_documents(
|
| 32 |
+
documents,
|
| 33 |
+
embeddings,
|
| 34 |
+
location=":memory:",
|
| 35 |
+
collection_name="my_documents",
|
| 36 |
+
)
|
| 37 |
+
retriever = qdrant.as_retriever()
|
| 38 |
+
qna = RetrievalQA.from_chain_type(
|
| 39 |
+
llm=HuggingFaceHub(repo_id="ahmadmac/Trained-T5-large", model_kwargs={"temperature": 0.5, "max_length": 512},
|
| 40 |
+
huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]),
|
| 41 |
+
chain_type="stuff",
|
| 42 |
+
retriever=retriever
|
| 43 |
+
)
|
| 44 |
+
def chatbot(question, chat_history):
|
| 45 |
+
response = chain.run(question)
|
| 46 |
+
retrieval_result = qna(question)
|
| 47 |
+
retrieval_answer = retrieval_result['result']
|
| 48 |
+
combined_response = f"{response}\n\nBased on the information available:\n{retrieval_answer}"
|
| 49 |
+
return combined_response
|
| 50 |
demo = gr.ChatInterface(
|
| 51 |
fn=chatbot,
|
| 52 |
title="Chatbot",
|