File size: 2,134 Bytes
c43eadc
 
 
ab3d00d
c43eadc
9cb62b8
ab3d00d
b49c9e0
ab3d00d
b49c9e0
ab3d00d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c689b3a
ab3d00d
67452f6
c689b3a
bde086b
 
ab3d00d
 
c43eadc
b2272c2
ab3d00d
 
 
 
 
 
 
 
67452f6
ab3d00d
4d9b259
ab3d00d
0156b0d
c43eadc
 
 
 
 
ab3d00d
fc6fa0a
 
ab3d00d
fc6fa0a
a7c42d3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import streamlit as st
import requests
import datetime
import os


from langchain.llms import HuggingFaceHub

from langchain.chains import RetrievalQA

# Embed and store
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import RecursiveUrlLoader
from langchain.chains import RetrievalQA
from langchain.document_loaders import PyPDFLoader

os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
embedding = HuggingFaceHubEmbeddings()

loader = PyPDFLoader("sample.pdf")
pages = loader.load_and_split()

text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = text_splitter.split_documents(pages)

vectorstore = Chroma.from_documents(documents=docs, embedding=embedding)

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # to remember chat history include this

prompt_template = """Use the following pieces of context to answer the question at the end and provide the reasons. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}"""

PROMPT = PromptTemplate(
   template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}

llm = HuggingFaceHub(repo_id="google/flan-ul2", model_kwargs={"temperature":0.4, "max_new_tokens":250})

qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), chain_type_kwargs=chain_type_kwargs)
st.header("#CodeWars localGPT", divider='rainbow')

option = st.selectbox('What is your role?', ('Support', 'Sales'))

st.write('You selected', option)

prompt = st.chat_input("Say something to our #CodeWars bot...")
if(prompt):
    with st.chat_message(option):
         st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
    context = qachain({"query": prompt})
    
    st.write(f"{datetime.datetime.now()}", context)