Spaces:
Sleeping
Sleeping
Commit
Β·
7d8a70b
1
Parent(s):
82b38e9
refactoring code
Browse files- __pycache__/utils.cpython-310.pyc +0 -0
- pages/3_π€_Chatbot.py +1 -9
- requirements.txt +1 -1
- utils.py +2 -1
- π_Introduction.py +0 -3
__pycache__/utils.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/utils.cpython-310.pyc and b/__pycache__/utils.cpython-310.pyc differ
|
|
|
pages/3_π€_Chatbot.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
# π€
|
| 2 |
import streamlit as st
|
| 3 |
|
| 4 |
st.set_page_config(
|
|
@@ -67,9 +66,6 @@ if empty_openai_api_key:
|
|
| 67 |
|
| 68 |
else:
|
| 69 |
try:
|
| 70 |
-
# if st.button('Say hello'):
|
| 71 |
-
# st.write('Why hello there')
|
| 72 |
-
|
| 73 |
# container for chat history
|
| 74 |
response_container = st.container()
|
| 75 |
# container for text box
|
|
@@ -81,16 +77,12 @@ else:
|
|
| 81 |
if query:
|
| 82 |
with st.spinner("typing..."):
|
| 83 |
conversation_string = get_conversation_string()
|
| 84 |
-
# st.code(conversation_string)
|
| 85 |
refined_query = query_refiner(conversation_string, query)
|
| 86 |
-
# st.subheader("Refined Query:")
|
| 87 |
-
# st.write(refined_query)
|
| 88 |
context = find_match(refined_query,
|
| 89 |
pinecone_api_key=st.session_state['pinecone_api_key'],
|
| 90 |
pinecone_env=st.session_state['pinecone_env'],
|
| 91 |
pinecone_index_namespace=st.session_state['pinecone_index_namespace']
|
| 92 |
-
)
|
| 93 |
-
# print(context)
|
| 94 |
response = conversation.predict(input=f"Context:\n {context} \n\n Query:\n{query}")
|
| 95 |
st.session_state.requests.append(query)
|
| 96 |
st.session_state.responses.append(response)
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
|
| 3 |
st.set_page_config(
|
|
|
|
| 66 |
|
| 67 |
else:
|
| 68 |
try:
|
|
|
|
|
|
|
|
|
|
| 69 |
# container for chat history
|
| 70 |
response_container = st.container()
|
| 71 |
# container for text box
|
|
|
|
| 77 |
if query:
|
| 78 |
with st.spinner("typing..."):
|
| 79 |
conversation_string = get_conversation_string()
|
|
|
|
| 80 |
refined_query = query_refiner(conversation_string, query)
|
|
|
|
|
|
|
| 81 |
context = find_match(refined_query,
|
| 82 |
pinecone_api_key=st.session_state['pinecone_api_key'],
|
| 83 |
pinecone_env=st.session_state['pinecone_env'],
|
| 84 |
pinecone_index_namespace=st.session_state['pinecone_index_namespace']
|
| 85 |
+
)
|
|
|
|
| 86 |
response = conversation.predict(input=f"Context:\n {context} \n\n Query:\n{query}")
|
| 87 |
st.session_state.requests.append(query)
|
| 88 |
st.session_state.responses.append(response)
|
requirements.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
altair<5
|
| 2 |
-
streamlit==1.
|
| 3 |
streamlit-chat
|
| 4 |
langchain
|
| 5 |
openai
|
|
|
|
| 1 |
altair<5
|
| 2 |
+
streamlit==1.25.0
|
| 3 |
streamlit-chat
|
| 4 |
langchain
|
| 5 |
openai
|
utils.py
CHANGED
|
@@ -9,7 +9,8 @@ import tempfile
|
|
| 9 |
import streamlit as st
|
| 10 |
import openai
|
| 11 |
|
| 12 |
-
|
|
|
|
| 13 |
def get_embeddings_model():
|
| 14 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 15 |
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
|
|
|
| 9 |
import streamlit as st
|
| 10 |
import openai
|
| 11 |
|
| 12 |
+
# To create embeddings on hard disk
|
| 13 |
+
@st.cache_resource(allow_output_mutation=True)
|
| 14 |
def get_embeddings_model():
|
| 15 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 16 |
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
π_Introduction.py
CHANGED
|
@@ -28,9 +28,6 @@ if 'responses' not in st.session_state:
|
|
| 28 |
if 'requests' not in st.session_state:
|
| 29 |
st.session_state['requests'] = []
|
| 30 |
|
| 31 |
-
if 'responses' not in st.session_state:
|
| 32 |
-
st.session_state['responses'] = ["How can I assist you?"]
|
| 33 |
-
|
| 34 |
if 'buffer_memory' not in st.session_state:
|
| 35 |
st.session_state.buffer_memory=ConversationBufferWindowMemory(k=3,return_messages=True)
|
| 36 |
|
|
|
|
| 28 |
if 'requests' not in st.session_state:
|
| 29 |
st.session_state['requests'] = []
|
| 30 |
|
|
|
|
|
|
|
|
|
|
| 31 |
if 'buffer_memory' not in st.session_state:
|
| 32 |
st.session_state.buffer_memory=ConversationBufferWindowMemory(k=3,return_messages=True)
|
| 33 |
|