Spaces:
Runtime error
Runtime error
Commit
·
5b6b04b
1
Parent(s):
b5dd679
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,64 @@ import streamlit as st
|
|
| 2 |
import requests
|
| 3 |
import datetime
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
st.header("#CodeWars localGPT", divider='rainbow')
|
| 6 |
|
| 7 |
option = st.selectbox('What is your role?', ('Support', 'Sales'))
|
|
@@ -14,4 +72,13 @@ if prompt:
|
|
| 14 |
|
| 15 |
response = requests.get("https://dummyjson.com/products/1").text
|
| 16 |
|
| 17 |
-
st.write(f"{datetime.datetime.now()} :red[Bot:] ", response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import requests
|
| 3 |
import datetime
|
| 4 |
|
| 5 |
+
from langchain.llms import HuggingFaceHub
|
| 6 |
+
from langchain.callbacks.manager import CallbackManager
|
| 7 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
| 8 |
+
|
| 9 |
+
from langchain.chains import RetrievalQA
|
| 10 |
+
from langchain.schema import LLMResult
|
| 11 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
| 12 |
+
|
| 13 |
+
# Embed and store
|
| 14 |
+
from langchain.vectorstores import Chroma
|
| 15 |
+
from langchain.embeddings import HuggingFaceHubEmbeddings
|
| 16 |
+
from langchain.memory import ConversationBufferMemory
|
| 17 |
+
from langchain.prompts import PromptTemplate
|
| 18 |
+
|
| 19 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 20 |
+
from langchain.document_loaders import WebBaseLoader, RecursiveUrlLoader
|
| 21 |
+
from langchain.chains import RetrievalQA
|
| 22 |
+
|
| 23 |
+
embedding = HuggingFaceHubEmbeddings()
|
| 24 |
+
|
| 25 |
+
url = "https://us.community.samsung.com/t5/Get-Help/ct-p/get-help?page=1&tab=recent_topics"
|
| 26 |
+
loader = RecursiveUrlLoader(url=url, max_depth=1)
|
| 27 |
+
loaders = []
|
| 28 |
+
|
| 29 |
+
loaders.append(loader)
|
| 30 |
+
|
| 31 |
+
docs = []
|
| 32 |
+
for l in loaders:
|
| 33 |
+
docs.extend(l.load())
|
| 34 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)
|
| 35 |
+
docs = text_splitter.split_documents(docs)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
| 41 |
+
# all_splits = text_splitter.split_documents(data)
|
| 42 |
+
|
| 43 |
+
vectorstore = Chroma.from_documents(documents=docs, embedding=embedding)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # to remember chat history include this
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
| 50 |
+
|
| 51 |
+
{context}
|
| 52 |
+
|
| 53 |
+
Question: {question}"""
|
| 54 |
+
|
| 55 |
+
PROMPT = PromptTemplate(
|
| 56 |
+
template=prompt_template, input_variables=["context", "question"]
|
| 57 |
+
)
|
| 58 |
+
chain_type_kwargs = {"prompt": PROMPT}
|
| 59 |
+
|
| 60 |
+
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature":0.1, "max_new_tokens":250})
|
| 61 |
+
|
| 62 |
+
qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), memory=memory, chain_type_kwargs=chain_type_kwargs)
|
| 63 |
st.header("#CodeWars localGPT", divider='rainbow')
|
| 64 |
|
| 65 |
option = st.selectbox('What is your role?', ('Support', 'Sales'))
|
|
|
|
| 72 |
|
| 73 |
response = requests.get("https://dummyjson.com/products/1").text
|
| 74 |
|
| 75 |
+
st.write(f"{datetime.datetime.now()} :red[Bot:] ", response)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
context = [] # the context stores a conversation history, you can use this to make the model more context aware
|
| 79 |
+
while True:
|
| 80 |
+
st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
|
| 81 |
+
response = qachain({"query": prompt})
|
| 82 |
+
|
| 83 |
+
st.write(f"{datetime.datetime.now()} :red[Bot:] ", context['result'])
|
| 84 |
+
prompt = st.chat_input("Say something to our #CodeWars bot...")
|