Spaces:
Sleeping
Sleeping
Update app.py
#1
by
Mykes
- opened
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
from llama_cpp import Llama
|
| 3 |
|
| 4 |
-
st.set_page_config(page_title="Chat with AI", page_icon="π€")
|
| 5 |
|
| 6 |
# Custom CSS for better styling
|
| 7 |
st.markdown("""
|
|
@@ -57,6 +57,36 @@ def format_context(messages):
|
|
| 57 |
context += f"Assistant: {message['content']}\n"
|
| 58 |
return context
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
# Initialize chat history
|
| 61 |
if "messages" not in st.session_state:
|
| 62 |
st.session_state.messages = []
|
|
@@ -101,5 +131,14 @@ if prompt := st.chat_input("What is your question?"):
|
|
| 101 |
# Add assistant response to chat history
|
| 102 |
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
| 103 |
|
| 104 |
-
|
| 105 |
-
st.sidebar.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from llama_cpp import Llama
|
| 3 |
|
| 4 |
+
st.set_page_config(page_title="Chat with AI", page_icon="π€", layout="wide")
|
| 5 |
|
| 6 |
# Custom CSS for better styling
|
| 7 |
st.markdown("""
|
|
|
|
| 57 |
context += f"Assistant: {message['content']}\n"
|
| 58 |
return context
|
| 59 |
|
| 60 |
+
# Sidebar
|
| 61 |
+
st.sidebar.title("Chat with AI")
|
| 62 |
+
st.sidebar.markdown("This is a simple chat interface using Streamlit and an AI model.")
|
| 63 |
+
|
| 64 |
+
# Add useful information to the sidebar
|
| 65 |
+
st.sidebar.header("How to use")
|
| 66 |
+
st.sidebar.markdown("""
|
| 67 |
+
1. Type your question in the chat input box at the bottom of the screen.
|
| 68 |
+
2. Press Enter or click the Send button to submit your question.
|
| 69 |
+
3. The AI will generate a response based on your input.
|
| 70 |
+
4. You can have a continuous conversation by asking follow-up questions.
|
| 71 |
+
""")
|
| 72 |
+
|
| 73 |
+
st.sidebar.header("Model Information")
|
| 74 |
+
st.sidebar.markdown("""
|
| 75 |
+
- Model: med_phi3-mini-4k-GGUF
|
| 76 |
+
- Context Length: 512 tokens
|
| 77 |
+
- This model is specialized in medical knowledge.
|
| 78 |
+
""")
|
| 79 |
+
|
| 80 |
+
st.sidebar.header("Tips")
|
| 81 |
+
st.sidebar.markdown("""
|
| 82 |
+
- Be clear and specific in your questions.
|
| 83 |
+
- For medical queries, provide relevant details.
|
| 84 |
+
- Remember that this is an AI model and may not always be 100% accurate.
|
| 85 |
+
""")
|
| 86 |
+
|
| 87 |
+
# Main chat interface
|
| 88 |
+
st.title("Chat with AI")
|
| 89 |
+
|
| 90 |
# Initialize chat history
|
| 91 |
if "messages" not in st.session_state:
|
| 92 |
st.session_state.messages = []
|
|
|
|
| 131 |
# Add assistant response to chat history
|
| 132 |
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
|
| 133 |
|
| 134 |
+
# Add a button to clear the chat history
|
| 135 |
+
if st.sidebar.button("Clear Chat History"):
|
| 136 |
+
st.session_state.messages = []
|
| 137 |
+
st.experimental_rerun()
|
| 138 |
+
|
| 139 |
+
# Display the number of messages in the current conversation
|
| 140 |
+
st.sidebar.markdown(f"Current conversation length: {len(st.session_state.messages)} messages")
|
| 141 |
+
|
| 142 |
+
# Add a footer
|
| 143 |
+
st.sidebar.markdown("---")
|
| 144 |
+
st.sidebar.markdown("Created with β€οΈ using Streamlit and Llama.cpp")
|