Spaces:
Runtime error
Runtime error
File size: 6,882 Bytes
af2c135 7ce0baf af2c135 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
from langchain_community.document_transformers import EmbeddingsRedundantFilter
from langchain.retrievers.document_compressors import EmbeddingsFilter
#from langchain_text_splitters import CharacterTextSplitter
from langchain.retrievers import ContextualCompressionRetriever
from langchain_groq import ChatGroq
#from langchain.document_loaders import HuggingFaceDatasetLoader
# from langchain_community.document_loaders import UnstructuredExcelLoader
# from langchain.document_loaders import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
# from transformers import AutoTokenizer, AutoModelForQuestionAnswering
# from transformers import AutoTokenizer, pipeline
# from langchain import HuggingFacePipeline
import re
import os
import streamlit as st
import requests
# Define the path to the pre-trained model you want to use
modelPath = "sentence-transformers/all-MiniLM-l6-v2"
# Create a dictionary with model configuration options, specifying to use the CPU for computations
model_kwargs = {'device': 'cpu'}
# Create a dictionary with encoding options, specifically setting 'normalize_embeddings' to False
encode_kwargs = {'normalize_embeddings': False}
# Initialize an instance of HuggingFaceEmbeddings with the specified parameters
embeddings = HuggingFaceEmbeddings(
model_name=modelPath, # Provide the pre-trained model's path
model_kwargs=model_kwargs, # Pass the model configuration options
encode_kwargs=encode_kwargs # Pass the encoding options
)
# Initialize the HuggingFaceEmbeddings
model_path = "sentence-transformers/all-MiniLM-l6-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
embeddings = HuggingFaceEmbeddings(
model_name=model_path,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
# Load the FAISS index
db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
retriever = db.as_retriever(search_kwargs={"k": 2})
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
relevant_filter = EmbeddingsFilter(embeddings=embeddings)
pipeline_compressor = DocumentCompressorPipeline(transformers=[text_splitter, redundant_filter, relevant_filter])
compression_retriever = ContextualCompressionRetriever(base_compressor=pipeline_compressor, base_retriever=retriever)
chat = ChatGroq(temperature=0, groq_api_key="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",
model_name="llama3-70b-8192")
rag_template_str = ("""
Answer the following query based on the context given.
Stylization:
1)Don't say "According to context provided" or "Here is the answer to the query"
2)Include the source URLs
3)Include the Category it belongs to
Formatting:
1)Start answer with "ANSR:"
2)Use bullet points
Restriction:
1)Only use context to answer the question
2)If you don't know the answer,reply with "No answer found, you can contact us on https://www.i2econsulting.com/contact-us/"
context: {context}
query:{query}
""")
rag_prompt = ChatPromptTemplate.from_template(rag_template_str)
rag_chain = rag_prompt | chat | StrOutputParser()
llm = ChatGroq(groq_api_key="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",
model_name="mixtral-8x7b-32768")
# prompt = ChatPromptTemplate.from_template(
# """
# Answer the questions based on the provided context only.
# Please provide the most accurate response based on the question
# <context>
# {context}
# <context>
# Questions:{input}
# """
# )
rag_prompt = ChatPromptTemplate.from_template(rag_template_str)
rag_chain = rag_prompt | chat | StrOutputParser()
col1, col2 = st.columns([1, 7])
# Display the robot image
with col1:
st.image(image="image.png", width=80)
# Display the title
with col2:
st.title("ANSR Chatbot")
#st.title("I2E Enterprise Chatbot")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
intro = {"role": "assistant",
"content": """Hello there! Welcome to i2e, your intelligent guide to unlocking productivity and efficiency! Whether you're seeking quick answers, expert assistance, or simply exploring our services, I'm here to assist you every step of the way. Let's dive in and discover how i2e can empower you to achieve your goals effortlessly. How can I assist you today?"""}
st.session_state.messages.append(intro)
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# st.title("i2e Enterprise Chatbot")
# prompt = st.text_input("Ask Question")
def api_py_function(query):
context = compression_retriever.get_relevant_documents(query)
#print(context)
l = []
for documents in context[:5]:
if documents.state['query_similarity_score'] > 0.1:
content = documents.page_content + str(documents.metadata)
l.append(content)
final_context = ''.join(l)
if l != []:
response = rag_chain.invoke({"query": query, "context": final_context})
else:
response = "No answer found, Please rephrase your question or you can contact us on https://www.i2econsulting.com/contact-us/"
for word in response.split():
yield word + " "
# time.sleep(0.05)
# return yield word + " "
# def response_generator():
# response=response.replace("\\n\\n"," \\n")
# for word in response.split():
# yield word + " "
# time.sleep(0.05)
# print(response)
# if prompt:
# print("processing request")
# full_response=api_py_function(prompt)
# # full_response = response.text.replace(u"\u2000", "")
# # full_response=response.text.replace("\\n\\n"," \\n")
# # full_response = full_response.replace("\\n", " \\n")
# st.write(full_response)
# Accept user input
if prompt := st.chat_input("ANSR is ready to answer your question"):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("assistant"):
response = st.write_stream(api_py_function(prompt))
# Add assistant response to chat history
print(st.session_state.messages)
st.session_state.messages.append({"role": "assistant", "content": response})
|