Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from chain import GlossaryChain | |
| MODEL_AVATAR_URL= "./mistral.jpeg" | |
| EXAMPLE_PROMPTS = [ | |
| "How is a data lake used at Vanderbilt University Medical Center?", | |
| "In a table, what are some of the greatest hurdles to healthcare in the United States?", | |
| "What does EDW stand for?", | |
| "Give me a Python code snippet that reads a dataframe from a Databricks Unity Catalog table.", | |
| "Write a short story about a country concert in Nashville, Tennessee.", | |
| "Tell me about maximum out-of-pocket costs for healthcare in the United States.", | |
| ] | |
| TITLE = "Mistral AI🇫🇷 + LangChain🦜 + FAISS📘: VUMC Glossary Chatbot" | |
| DESCRIPTION= """Welcome to the Mistral AI🇫🇷 + LangChain🦜 + FAISS📘: VUMC Glossary Chatbot! \n | |
| **Overview and Usage**: This Hugging Face 🤗 Space demos a retrieval-augmented chat model built with Mistral AI. | |
| This AI assistant is built atop two Mistral AI🇫🇷 models: the **mistral-embed** model for the embedding and retrieval of information from the VUMC Glossary | |
| and the open-weights **open-mistral-7b** model for the generation of the response. The external information is embedded to and retrieved from a FAISS📘 vector store. LangChain🦜 is used to chain the models together into a working chatbot. | |
| The model has been augmented with a glossary of terms specific to Vanderbilt University Medical Center. | |
| The chat model has knowledge of terms like **EDW**, **HCERA**, **NRHA** and **thousands more**. (Ask the assistant if you don't know what any of these terms mean!) | |
| On the left is a sidebar of **Examples**; click any of these examples to issue the corresponding query to the chat model. | |
| **Disclaimer**: The model has **no access to PHI**. \n | |
| Please provide any additional, larger feedback, ideas, or issues to the email: **johngrahamreynolds@gmail.com**. Happy chatting!""" | |
| st.set_page_config(layout="wide") | |
| st.title(TITLE) | |
| st.image("mistral.jpeg", caption="Mistral AI for Retrieval Augmented Generation", width=400) | |
| st.markdown(DESCRIPTION) | |
| st.markdown("\n") | |
| with open("./style.css") as css: | |
| st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True) | |
| if "messages" not in st.session_state: | |
| st.session_state["messages"] = [] | |
| if "feedback" not in st.session_state: | |
| st.session_state["feedback"] = [None] | |
| def clear_chat_history(): | |
| st.session_state["messages"] = [] | |
| st.button('Clear Chat', on_click=clear_chat_history) | |
| # construct the chain | |
| chain = GlossaryChain() | |
| def last_role_is_user(): | |
| return len(st.session_state["messages"]) > 0 and st.session_state["messages"][-1]["role"] == "user" | |
| def get_last_question(): | |
| return st.session_state["messages"][-1]["content"] | |
| # If the assistant is the last message, we need to prompt the user | |
| # If user is the last message, we need to retry the assistant. | |
| def handle_user_input(user_input): | |
| with history: | |
| response_content = "" | |
| if last_role_is_user(): | |
| # retry the assistant if the user tries to send a new message | |
| with st.chat_message("assistant", avatar=MODEL_AVATAR_URL): | |
| response_stream = chain.stream(user_input) # NOTE chaining does not currently process chat history for context | |
| response_content = st.write_stream(response_stream) | |
| else: | |
| st.session_state["messages"].append({"role": "user", "content": user_input}) | |
| with st.chat_message("user", avatar="🧑💻"): | |
| st.markdown(user_input) | |
| with st.chat_message("assistant", avatar=MODEL_AVATAR_URL): | |
| response_stream = chain.stream(user_input) # NOTE chaining does not currently process chat history for context | |
| response_content = st.write_stream(response_stream) | |
| st.session_state["messages"].append({"role": "assistant", "content": response_content}) | |
| main = st.container() | |
| with main: | |
| history = st.container(height=400) | |
| with history: | |
| for message in st.session_state["messages"]: | |
| avatar = "🧑💻" | |
| if message["role"] == "assistant": | |
| avatar = MODEL_AVATAR_URL | |
| with st.chat_message(message["role"], avatar=avatar): | |
| if message["content"] is not None: | |
| st.markdown(message["content"]) | |
| if prompt := st.chat_input("Type a message!", max_chars=5000): | |
| handle_user_input(prompt) | |
| st.markdown("\n") #add some space for iphone users | |
| with st.sidebar: | |
| with st.container(): | |
| st.title("Examples") | |
| for prompt in EXAMPLE_PROMPTS: | |
| st.button(prompt, args=(prompt,), on_click=handle_user_input) |