John Graham Reynolds commited on
Commit
883ba18
·
1 Parent(s): 1898c5b

build inference app

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +102 -4
src/streamlit_app.py CHANGED
@@ -1,11 +1,109 @@
1
  import streamlit as st
2
  from chain import GlossaryChain
3
 
4
- st.title("MistralAI + VUMC Glossary Chatbot")
 
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  chain = GlossaryChain()
7
 
8
- input_text = st.text_input("Enter your question:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- if st.button("Submit"):
11
- st.write(chain.stream(input_text))
 
 
 
 
1
  import streamlit as st
2
  from chain import GlossaryChain
3
 
4
+ MODEL_AVATAR_URL= "./mistral.jpeg"
5
+ MAX_CHAT_TURNS = 10 # limit this for preliminary testing
6
+ MSG_MAX_TURNS_EXCEEDED = f"Sorry! The Mistral AI🦜🇫🇷🚀 playground is limited to {MAX_CHAT_TURNS} turns in a single history. Click the 'Clear Chat' button or refresh the page to start a new conversation."
7
 
8
+ EXAMPLE_PROMPTS = [
9
+ "How is a data lake used at Vanderbilt University Medical Center?",
10
+ "In a table, what are some of the greatest hurdles to healthcare in the United States?",
11
+ "What does EDW stand for?",
12
+ "Give me a Python code snippet that reads a dataframe from a Databricks Unity Catalog table.",
13
+ "Write a short story about a country concert in Nashville, Tennessee.",
14
+ "Tell me about maximum out-of-pocket costs for healthcare in the United States.",
15
+ ]
16
+
17
+ TITLE = "Mistral AI🇫🇷 + LangChain🦜 + FAISS📘: VUMC Glossary Chatbot"
18
+ DESCRIPTION= """Welcome to the Mistral AI🇫🇷 + LangChain🦜 + FAISS📘: VUMC Glossary Chatbot! \n
19
+
20
+ **Overview and Usage**: This Hugging Face 🤗 Space demos a retrieval-augmented chat model built with Mistral AI.
21
+
22
+ This AI assistant is built atop two Mistral AI🇫🇷 models: the **mistral-embed** model for the embedding and retrieval of information from the VUMC Glossary
23
+ and the open-weights **open-mistral-7b** model for the generation of the response. The external information is embedded to and retrieved from a FAISS📘 vector store. LangChain🦜 is used to chain the models together into a working chatbot.
24
+
25
+ The model has been augmented with a glossary of terms specific to Vanderbilt University Medical Center.
26
+ The chat model has knowledge of terms like **EDW**, **HCERA**, **NRHA** and **thousands more**. (Ask the assistant if you don't know what any of these terms mean!)
27
+
28
+ On the left is a sidebar of **Examples**; click any of these examples to issue the corresponding query to the chat model.
29
+
30
+ **Disclaimer**: The model has **no access to PHI**. \n
31
+
32
+ Please provide any additional, larger feedback, ideas, or issues to the email: **johngrahamreynolds@gmail.com**. Happy chatting!"""
33
+
34
+ GENERAL_ERROR_MSG = "An error occurred. Please refresh the page to start a new conversation."
35
+
36
+ st.set_page_config(layout="wide")
37
+
38
+ st.title(TITLE)
39
+ st.image("mistral.jpeg", caption="Mistral AI for Retrieval Augmented Generation", width=400)
40
+ st.markdown(DESCRIPTION)
41
+ st.markdown("\n")
42
+
43
+ with open("./style.css") as css:
44
+ st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True)
45
+
46
+ if "messages" not in st.session_state:
47
+ st.session_state["messages"] = []
48
+
49
+ if "feedback" not in st.session_state:
50
+ st.session_state["feedback"] = [None]
51
+
52
+ def clear_chat_history():
53
+ st.session_state["messages"] = []
54
+
55
+ st.button('Clear Chat', on_click=clear_chat_history)
56
+
57
+ # Functionality
58
  chain = GlossaryChain()
59
 
60
+ def last_role_is_user():
61
+ return len(st.session_state["messages"]) > 0 and st.session_state["messages"][-1]["role"] == "user"
62
+
63
+ def get_last_question():
64
+ return st.session_state["messages"][-1]["content"]
65
+
66
+ # if assistant is the last message, we need to prompt the user
67
+ # if user is the last message, we need to retry the assistant.
68
+ def handle_user_input(user_input):
69
+ with history:
70
+ response_content = ""
71
+ if last_role_is_user():
72
+ # retry the assistant if the user tries to send a new message
73
+ with st.chat_message("assistant", avatar=MODEL_AVATAR_URL):
74
+ response_stream = chain.stream(user_input) # NOTE chaining does not currently process chat history for context
75
+ response_content = st.write_stream(response_stream)
76
+
77
+ else:
78
+ st.session_state["messages"].append({"role": "user", "content": user_input})
79
+ with st.chat_message("user", avatar="🧑‍💻"):
80
+ st.markdown(user_input)
81
+ with st.chat_message("assistant", avatar=MODEL_AVATAR_URL):
82
+ response_stream = chain.stream(user_input) # NOTE chaining does not currently process chat history for context
83
+ response_content = st.write_stream(response_stream)
84
+
85
+ st.session_state["messages"].append({"role": "assistant", "content": response_content})
86
+
87
+
88
+ main = st.container()
89
+ with main:
90
+ history = st.container(height=400)
91
+ with history:
92
+ for message in st.session_state["messages"]:
93
+ avatar = "🧑‍💻"
94
+ if message["role"] == "assistant":
95
+ avatar = MODEL_AVATAR_URL
96
+ with st.chat_message(message["role"], avatar=avatar):
97
+ if message["content"] is not None:
98
+ st.markdown(message["content"])
99
+
100
+ if prompt := st.chat_input("Type a message!", max_chars=5000):
101
+ handle_user_input(prompt)
102
+ st.markdown("\n") #add some space for iphone users
103
+
104
 
105
+ with st.sidebar:
106
+ with st.container():
107
+ st.title("Examples")
108
+ for prompt in EXAMPLE_PROMPTS:
109
+ st.button(prompt, args=(prompt,), on_click=handle_user_input)