KyleIsaacs commited on
Commit
ab3d00d
·
1 Parent(s): 2d7ef8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -14
app.py CHANGED
@@ -1,30 +1,58 @@
1
  import streamlit as st
2
  import requests
3
  import datetime
 
4
 
5
- API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1"
6
- headers = {"Authorization": "Bearer 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"}
7
 
8
- def query(payload):
9
- response = requests.post(API_URL, headers=headers, json=payload, verify=False)
10
- return response.json()
11
 
12
- st.header("#CodeWars localGPT", divider='rainbow')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- st.markdown("*#CodeWars* is **really** ***cool***... :sunglasses: our focus is on quality reasoning.")
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  option = st.selectbox('What is your role?', ('Support', 'Sales'))
17
 
18
  st.write('You selected', option)
19
 
20
  prompt = st.chat_input("Say something to our #CodeWars bot...")
21
- if prompt:
22
-
23
  with st.chat_message(option):
24
  st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
 
25
 
26
- response = query({
27
- "inputs": prompt,
28
- })
29
-
30
- st.write(f"{datetime.datetime.now()}", response)
 
1
  import streamlit as st
2
  import requests
3
  import datetime
4
+ import os
5
 
6
+ from langchain.llms import HuggingFaceHub
 
7
 
8
+ from langchain.chains import RetrievalQA
 
 
9
 
10
+ # Embed and store
11
+ from langchain.vectorstores import Chroma
12
+ from langchain.embeddings import HuggingFaceHubEmbeddings
13
+ from langchain.memory import ConversationBufferMemory
14
+ from langchain.prompts import PromptTemplate
15
+
16
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
17
+ from langchain.document_loaders import RecursiveUrlLoader
18
+ from langchain.chains import RetrievalQA
19
+ from langchain.document_loaders import PyPDFLoader
20
+
21
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
22
+ embedding = HuggingFaceHubEmbeddings()
23
+
24
+ loader = PyPDFLoader("sample.pdf")
25
+ pages = loader.load_and_split()
26
+
27
+ vectorstore = Chroma.from_documents(documents=docs, embedding=embedding)
28
+
29
+
30
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True) # to remember chat history include this
31
 
32
+
33
+ prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
34
+ {context}
35
+ Question: {question}"""
36
+
37
+ PROMPT = PromptTemplate(
38
+ template=prompt_template, input_variables=["context", "question"]
39
+ )
40
+ chain_type_kwargs = {"prompt": PROMPT}
41
+
42
+ llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-v0.1", model_kwargs={"temperature":0.1, "max_new_tokens":250})
43
+
44
+ qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), memory=memory, chain_type_kwargs=chain_type_kwargs)
45
+ st.header("#CodeWars localGPT", divider='rainbow')
46
 
47
  option = st.selectbox('What is your role?', ('Support', 'Sales'))
48
 
49
  st.write('You selected', option)
50
 
51
  prompt = st.chat_input("Say something to our #CodeWars bot...")
52
+ context = [] # the context stores a conversation history, you can use this to make the model more context aware
53
+ if(prompt):
54
  with st.chat_message(option):
55
  st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
56
+ context = qachain({"query": prompt})
57
 
58
+ st.write(f"{datetime.datetime.now()}", context['result'])