File size: 2,426 Bytes
c43eadc
 
 
45d9ace
c43eadc
5b6b04b
 
 
62d8e7b
5b6b04b
 
 
 
 
 
 
 
dc3eee7
5b6b04b
 
45d9ace
5b6b04b
 
b0092fa
1b2cb35
 
5b6b04b
 
dc3eee7
5b6b04b
 
 
 
 
dc3eee7
5b6b04b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8263d3e
5b6b04b
a528322
62d8e7b
 
c43eadc
 
 
 
 
 
 
fa65cb6
58c3f91
 
 
b1717ac
58c3f91
abff4d4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import streamlit as st
import requests
import datetime
import os

from langchain.llms import HuggingFaceHub

from langchain.chains import RetrievalQA
from langchain import PromptTemplate, LLMChain

# Embed and store
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import RecursiveUrlLoader, WebBaseLoader
from langchain.chains import RetrievalQA

os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
embedding = HuggingFaceHubEmbeddings()

url = "https://nanoreview.net/en/phone-compare/samsung-galaxy-s9-vs-samsung-galaxy-s10"
loader = RecursiveUrlLoader(url=url, max_depth=1)
# loader = WebBaseLoader(url)
loaders = []


loaders.append(loader)

docs = []
for l in loaders:
   docs.extend(l.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = text_splitter.split_documents(docs)

vectorstore = Chroma.from_documents(documents=docs, embedding=embedding)


memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # to remember chat history include this


prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}"""

PROMPT = PromptTemplate(
   template=prompt_template, input_variables=["context", "question"]
)
chain_type_kwargs = {"prompt": PROMPT}

llm = HuggingFaceHub(repo_id="google/flan-ul2", model_kwargs={"temperature":0.1, "max_new_tokens":250})

qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), chain_type_kwargs=chain_type_kwargs)


st.header("#CodeWars localGPT", divider='rainbow')

option = st.selectbox('What is your role?', ('Support', 'Sales'))

st.write('You selected', option)

prompt = st.chat_input("Say something to our #CodeWars bot...")
context = [] # the context stores a conversation history, you can use this to make the model more context aware
if(prompt):
    with st.chat_message(option):
         st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
    context = qachain({"query": prompt})
    
    st.write(f"{datetime.datetime.now()}", context)