IsmaeelPandey commited on
Commit
62d8e7b
·
1 Parent(s): abff4d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -6,6 +6,7 @@ import os
6
  from langchain.llms import HuggingFaceHub
7
 
8
  from langchain.chains import RetrievalQA
 
9
 
10
  # Embed and store
11
  from langchain.vectorstores import Chroma
@@ -51,7 +52,10 @@ chain_type_kwargs = {"prompt": PROMPT}
51
 
52
  llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-v0.1", model_kwargs={"temperature":0.1, "max_new_tokens":250})
53
 
54
- qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), memory=memory, chain_type_kwargs=chain_type_kwargs)
 
 
 
55
  st.header("#CodeWars localGPT", divider='rainbow')
56
 
57
  option = st.selectbox('What is your role?', ('Support', 'Sales'))
@@ -63,6 +67,6 @@ context = [] # the context stores a conversation history, you can use this to ma
63
  if(prompt):
64
  with st.chat_message(option):
65
  st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
66
- context = qachain({"query": prompt})
67
 
68
  st.write(f"{datetime.datetime.now()}", context)
 
6
  from langchain.llms import HuggingFaceHub
7
 
8
  from langchain.chains import RetrievalQA
9
+ from langchain import PromptTemplate, LLMChain
10
 
11
  # Embed and store
12
  from langchain.vectorstores import Chroma
 
52
 
53
  llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-v0.1", model_kwargs={"temperature":0.1, "max_new_tokens":250})
54
 
55
+ # qachain=RetrievalQA.from_chain_type(llm, retriever=vectorstore.as_retriever(), memory=memory, chain_type_kwargs=chain_type_kwargs)
56
+
57
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
58
+
59
  st.header("#CodeWars localGPT", divider='rainbow')
60
 
61
  option = st.selectbox('What is your role?', ('Support', 'Sales'))
 
67
  if(prompt):
68
  with st.chat_message(option):
69
  st.write(f"{datetime.datetime.now()} :red[{option}:] ", prompt)
70
+ context = llm_chain.run(prompt)
71
 
72
  st.write(f"{datetime.datetime.now()}", context)