Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,21 +10,24 @@ from langchain.schema import Document
|
|
| 10 |
from langchain_community.vectorstores import Qdrant
|
| 11 |
from langchain.chains import RetrievalQA
|
| 12 |
from langchain.llms import HuggingFaceHub
|
|
|
|
|
|
|
|
|
|
| 13 |
hf_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
| 14 |
model_name_or_path = "ahmadmac/Trained-T5-large"
|
| 15 |
-
pipe = pipeline(
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
)
|
| 22 |
-
llm = HuggingFacePipeline(pipeline=pipe)
|
| 23 |
-
prompt_template = """ you are a highly knowledgeable AI assistant. Engage in a conversation with the user. Your main goal is to provide clear and informative answers to the user's questions.
|
| 24 |
-
User: {question}
|
| 25 |
-
Assistant:"""
|
| 26 |
-
prompt = PromptTemplate(template=prompt_template, input_variables=["question"])
|
| 27 |
-
chain = LLMChain(llm=llm, prompt=prompt)
|
| 28 |
with open("brookline_data.txt", "r") as f:
|
| 29 |
data = f.read()
|
| 30 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
|
|
@@ -39,17 +42,22 @@ qdrant = Qdrant.from_documents(
|
|
| 39 |
collection_name="my_documents",
|
| 40 |
)
|
| 41 |
retriever = qdrant.as_retriever()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
qna = RetrievalQA.from_chain_type(
|
| 43 |
-
llm=
|
| 44 |
-
huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]),
|
| 45 |
chain_type="stuff",
|
| 46 |
retriever=retriever
|
| 47 |
)
|
| 48 |
def chatbot(question, chat_history):
|
| 49 |
-
response = chain.run(question)
|
| 50 |
retrieval_result = qna(question)
|
| 51 |
retrieval_answer = retrieval_result['result']
|
| 52 |
-
combined_response = f"Based on the information available:\n{retrieval_answer}
|
| 53 |
return combined_response
|
| 54 |
demo = gr.ChatInterface(
|
| 55 |
fn=chatbot,
|
|
|
|
| 10 |
from langchain_community.vectorstores import Qdrant
|
| 11 |
from langchain.chains import RetrievalQA
|
| 12 |
from langchain.llms import HuggingFaceHub
|
| 13 |
+
from langchain.schema import StrOutputParser
|
| 14 |
+
from langchain.schema.runnable import RunnablePassthrough, RunnableMap
|
| 15 |
+
from langchain_google_genai import GoogleGenerativeAI
|
| 16 |
hf_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
| 17 |
model_name_or_path = "ahmadmac/Trained-T5-large"
|
| 18 |
+
# pipe = pipeline(
|
| 19 |
+
# 'text2text-generation',
|
| 20 |
+
# model=model_name_or_path,
|
| 21 |
+
# max_length=512,
|
| 22 |
+
# do_sample=True,
|
| 23 |
+
# temperature=1.0
|
| 24 |
+
# )
|
| 25 |
+
# llm = HuggingFacePipeline(pipeline=pipe)
|
| 26 |
+
# prompt_template = """ you are a highly knowledgeable AI assistant. Engage in a conversation with the user. Your main goal is to provide clear and informative answers to the user's questions.
|
| 27 |
+
# User: {question}
|
| 28 |
+
# Assistant:"""
|
| 29 |
+
# prompt = PromptTemplate(template=prompt_template, input_variables=["question"])
|
| 30 |
+
# chain = LLMChain(llm=llm, prompt=prompt)
|
| 31 |
with open("brookline_data.txt", "r") as f:
|
| 32 |
data = f.read()
|
| 33 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
|
|
|
|
| 42 |
collection_name="my_documents",
|
| 43 |
)
|
| 44 |
retriever = qdrant.as_retriever()
|
| 45 |
+
# qna = RetrievalQA.from_chain_type(
|
| 46 |
+
# llm=HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature": 0.9, "max_length": 512},
|
| 47 |
+
# huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]),
|
| 48 |
+
# chain_type="stuff",
|
| 49 |
+
# retriever=retriever
|
| 50 |
+
# )
|
| 51 |
qna = RetrievalQA.from_chain_type(
|
| 52 |
+
llm=GoogleGenerativeAI(model="gemini-1.5-flash", google_api_key=os.environ["google_api_key"]),
|
|
|
|
| 53 |
chain_type="stuff",
|
| 54 |
retriever=retriever
|
| 55 |
)
|
| 56 |
def chatbot(question, chat_history):
|
| 57 |
+
#response = chain.run(question)
|
| 58 |
retrieval_result = qna(question)
|
| 59 |
retrieval_answer = retrieval_result['result']
|
| 60 |
+
combined_response = f"Based on the information available:\n{retrieval_answer}"
|
| 61 |
return combined_response
|
| 62 |
demo = gr.ChatInterface(
|
| 63 |
fn=chatbot,
|