File size: 2,333 Bytes
c43eadc
 
 
ab3d00d
c43eadc
24c2548
fba5bd1
ab3d00d
b49c9e0
ab3d00d
b49c9e0
5f51bfd
ab3d00d
 
 
 
 
 
 
 
 
357ba5f
ab3d00d
 
 
 
5bdb023
 
7b0038a
d2a7723
 
 
 
 
 
 
 
 
 
ab3d00d
7cd66f5
bde086b
ab3d00d
 
c43eadc
b2272c2
ab3d00d
 
 
 
 
 
 
 
67452f6
ab3d00d
4d9b259
ab3d00d
0156b0d
c43eadc
 
 
 
 
ab3d00d
fc6fa0a
 
ab3d00d
fc6fa0a
a7c42d3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import streamlit as st
import requests
import datetime
import os



from langchain.llms import HuggingFaceHub

from langchain.chains import RetrievalQA

# Embed and storae 
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import RecursiveUrlLoader
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import PyPDFDirectoryLoader

os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
embedding = HuggingFaceHubEmbeddings()

loaderdocs = []

loader = PyPDFLoader("1.pdf")
loader2 = PyPDFLoader("2.pdf")
loaders = []

loaders.append(loader)
loaders.append(loader2)

for l in loaders:
   loaderdocs.extend(l.load_and_split())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = text_splitter.split_documents(loaderdocs)


vectorstore = Chroma.from_documents(documents=docs, embedding=embedding)

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # to remember chat history include this

prompt_template = """Use the following pieces of context to answer the question at the end and provide the reasons. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}"""

PROMPT = PromptTemplate(
   template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}

llm = HuggingFaceHub(repo_id="google/flan-ul2", model_kwargs={"temperature":0.4, "max_new_tokens":250})

qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), chain_type_kwargs=chain_type_kwargs)
st.header("#CodeWars localGPT", divider='rainbow')

option = st.selectbox('What is your role?', ('Support', 'Sales'))

st.write('You selected', option)

prompt = st.chat_input("Say something to our #CodeWars bot...")
if(prompt):
    with st.chat_message(option):
         st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
    context = qachain({"query": prompt})
    
    st.write(f"{datetime.datetime.now()}", context)