Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import requests | |
| import datetime | |
| import os | |
| from langchain.llms import HuggingFaceHub | |
| from langchain.chains import RetrievalQA | |
| # Embed and storae | |
| from langchain.vectorstores import Chroma | |
| from langchain.embeddings import HuggingFaceHubEmbeddings | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.prompts import PromptTemplate | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain.document_loaders import RecursiveUrlLoader | |
| from langchain.chains import RetrievalQA | |
| from langchain.document_loaders import PyPDFLoader | |
| from langchain.document_loaders import PyPDFDirectoryLoader | |
| os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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" | |
| embedding = HuggingFaceHubEmbeddings() | |
| loaderdocs = [] | |
| loader = PyPDFLoader("1.pdf") | |
| loader2 = PyPDFLoader("2.pdf") | |
| loaders = [] | |
| loaders.append(loader) | |
| loaders.append(loader2) | |
| for l in loaders: | |
| loaderdocs.extend(l.load_and_split()) | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) | |
| docs = text_splitter.split_documents(loaderdocs) | |
| vectorstore = Chroma.from_documents(documents=docs, embedding=embedding) | |
| memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # to remember chat history include this | |
| prompt_template = """Use the following pieces of context to answer the question at the end and provide the reasons. If you don't know the answer, just say that you don't know, don't try to make up an answer. | |
| {context} | |
| Question: {question}""" | |
| PROMPT = PromptTemplate( | |
| template=prompt_template, input_variables=["context", "question"] | |
| ) | |
| chain_type_kwargs = {"prompt": PROMPT} | |
| llm = HuggingFaceHub(repo_id="google/flan-ul2", model_kwargs={"temperature":0.4, "max_new_tokens":250}) | |
| qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), chain_type_kwargs=chain_type_kwargs) | |
| st.header("#CodeWars localGPT", divider='rainbow') | |
| option = st.selectbox('What is your role?', ('Support', 'Sales')) | |
| st.write('You selected', option) | |
| prompt = st.chat_input("Say something to our #CodeWars bot...") | |
| if(prompt): | |
| with st.chat_message(option): | |
| st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt) | |
| context = qachain({"query": prompt}) | |
| st.write(f"{datetime.datetime.now()}", context) |