neuralleap's picture
Update app.py
d495416 verified
import streamlit as st
from openai import OpenAI
from dotenv import load_dotenv
import os
import uuid
from datetime import datetime
import time
import re
# Load environment variables
load_dotenv()
# Set page config
st.set_page_config(
page_title="GPT-Style Chat Assistant",
page_icon="πŸ€–",
layout="wide"
)
# Add custom CSS for better styling and smooth scrolling
st.markdown("""
<style>
/* Main container styles */
.main .block-container {
max-width: 1200px;
padding-bottom: 0;
padding-top: 1rem;
}
/* Chat container styles */
.chat-container {
display: flex;
flex-direction: column;
height: calc(100vh - 80px);
overflow: hidden;
}
/* Messages area */
.messages-container {
flex-grow: 1;
overflow-y: auto;
padding-right: 1rem;
margin-bottom: 5rem;
}
/* Chat input container fixed at bottom */
.chat-input-container {
position: fixed;
bottom: 0;
left: 50%;
transform: translateX(-50%);
width: 100%;
max-width: 1200px;
background-color: var(--background-color);
padding: 1rem 0;
z-index: 100;
}
/* Improved chat message rendering */
.stChatMessage {
margin-bottom: 1rem;
}
/* Remove max height limit for messages */
.stChatMessage div[data-testid="stMarkdownContainer"] {
max-height: none !important;
overflow: visible !important;
}
/* Sidebar improvements */
.sidebar .sidebar-content {
overflow-y: auto;
height: 100vh;
}
/* Remove top padding from sidebar */
section[data-testid="stSidebar"] > div {
padding-top: 0;
}
/* Fix gap caused by sidebar collapse button */
.stSidebar > div:first-child {
margin-top: -1rem !important;
}
/* Specifically target the button that collapses sidebar */
button[kind="header"] {
height: 1.5rem !important;
margin-top: 0 !important;
padding-top: 0 !important;
}
/* Reduce spacing between sidebar elements */
section[data-testid="stSidebar"] .block-container {
padding-top: 1rem;
padding-bottom: 0;
}
/* Make sidebar expanders more compact */
section[data-testid="stSidebar"] .stExpander {
margin-bottom: 0.5rem;
}
/* Reduce vertical margins in sidebar */
section[data-testid="stSidebar"] h2,
section[data-testid="stSidebar"] h3 {
margin-top: 0.5rem;
margin-bottom: 0.5rem;
}
/* Compact spacing for markdown elements in sidebar */
section[data-testid="stSidebar"] [data-testid="stMarkdown"] {
margin-bottom: 0.5rem;
}
/* Give space for chat input at bottom */
.main .block-container {
padding-bottom: 80px;
}
/* Improve the chat bubble appearance */
.stChatMessage {
border-radius: 10px;
padding: 0.5rem;
transition: background-color 0.3s;
}
/* Make expanders more compact */
.streamlit-expanderHeader {
font-size: 1em;
padding-top: 0.5rem;
padding-bottom: 0.5rem;
}
/* Custom scrollbar */
::-webkit-scrollbar {
width: 6px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: rgba(155, 155, 155, 0.5);
border-radius: 10px;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(155, 155, 155, 0.8);
}
/* Button styles */
.edit-button-row {
display: flex;
gap: 10px;
margin-top: 10px;
}
.edit-button-row button {
flex: 1;
}
</style>
""", unsafe_allow_html=True)
# Initialize session state for conversations and history
if "conversations" not in st.session_state:
st.session_state.conversations = {}
if "current_conversation_id" not in st.session_state:
new_id = str(uuid.uuid4())
st.session_state.current_conversation_id = new_id
st.session_state.conversations[new_id] = {
"title": f"New chat {datetime.now().strftime('%H:%M')}",
"messages": [],
"should_update_title": False
}
# Default Model as o1
if "selected_model" not in st.session_state:
st.session_state.selected_model = "o1" # Default to o1
# Initialize System Prompt
if "system_prompt" not in st.session_state:
st.session_state.system_prompt = (
"You are an advanced AI assistant designed to provide clear, concise, and accurate information while maintaining a "
"professional and informative tone. Your responses should be well-structured, logically sound, and adapted to the user's context.\n\n"
"Guidelines:\n"
"1. **Clarity & Accuracy**: Ensure that all responses are factual, precise, and easy to understand.\n"
"2. **Depth & Detail**: Provide thorough explanations with relevant examples, case studies, or analogies when needed.\n"
"3. **Context Awareness**: Understand and remember relevant details from the conversation to tailor responses appropriately.\n"
"4. **Technical & Analytical Capability**: Be proficient in technical, scientific, and analytical discussions, offering well-reasoned arguments and solutions.\n"
"5. **Step-by-Step Guidance**: When responding to queries related to problem-solving, coding, or calculations, provide a structured, step-by-step breakdown.\n"
"6. **Comparisons & Evaluations**: When discussing alternatives or comparisons, include key differences, advantages, disadvantages, and real-world applications.\n"
"7. **Professional Communication**: Maintain a respectful and professional tone, avoiding unnecessary jargon while ensuring technical accuracy.\n\n"
"If the user requests explanations on complex topics, adapt the response to their level of expertise, simplifying where necessary or diving deeper into advanced details when appropriate."
)
# Initialize editing state flags
if "is_editing_system_prompt" not in st.session_state:
st.session_state.is_editing_system_prompt = False
if "temp_system_prompt" not in st.session_state:
st.session_state.temp_system_prompt = st.session_state.system_prompt
# Removed the show_model_details state variable as we're using expander instead
# Get OpenAI API key from environment or user input
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
if not openai_api_key:
openai_api_key = st.sidebar.text_input("Enter OpenAI API Key", type="password")
# Initialize OpenAI client
client = None
if openai_api_key:
client = OpenAI(api_key=openai_api_key)
# Available models with descriptions and token limits
AVAILABLE_MODELS = {
"gpt-4o": {
"description": "Latest GPT-4 Omni model",
"max_tokens": 128000,
"output_tokens": 4096,
"supports_temperature": True
},
"o1": {
"description": "OpenAI Reasoning Model - Standard",
"max_tokens": 200000,
"output_tokens": 4096,
"supports_temperature": False
},
"o3-mini": {
"description": "OpenAI Advanced Reasoning - Mini",
"max_tokens": 200000,
"output_tokens": 4096,
"supports_temperature": False
}
}
# Function to call OpenAI API with improved error handling
def get_ai_response(prompt, history, stream=True):
if not client:
return "No API key provided. Running in demo mode."
try:
# Use the system prompt from the session state
system_prompt = st.session_state.system_prompt
# Construct the messages with the system prompt first
messages = [
{"role": "system", "content": system_prompt}
]
for msg in history:
messages.append({"role": msg["role"], "content": msg["content"]})
messages.append({"role": "user", "content": prompt})
model = st.session_state.selected_model
model_config = AVAILABLE_MODELS.get(model)
# Common parameters for all models
params = {
"model": model,
"messages": messages,
"stream": stream
}
# Add model-specific parameters
if model_config["supports_temperature"]:
params["temperature"] = 0.7
params["max_tokens"] = model_config["output_tokens"]
else:
# For o1 and o3-mini models which use different parameter names
params["max_completion_tokens"] = model_config["output_tokens"]
# Make the API call
response = client.chat.completions.create(**params)
return response
except Exception as e:
return f"An error occurred: {str(e)}."
# Function to create a new conversation
def create_new_chat():
new_id = str(uuid.uuid4())
st.session_state.current_conversation_id = new_id
st.session_state.conversations[new_id] = {
"title": f"New chat {datetime.now().strftime('%H:%M')}",
"messages": [],
"should_update_title": False
}
# Functions for system prompt editing
def start_editing_system_prompt():
st.session_state.is_editing_system_prompt = True
st.session_state.temp_system_prompt = st.session_state.system_prompt
def save_system_prompt():
st.session_state.system_prompt = st.session_state.temp_system_prompt
st.session_state.is_editing_system_prompt = False
def cancel_editing_system_prompt():
st.session_state.is_editing_system_prompt = False
st.session_state.temp_system_prompt = st.session_state.system_prompt
# Sidebar for model selection and conversation management
with st.sidebar:
# Compact model selection at the top
st.subheader("Model Selection")
# Model selection dropdown
selected_model = st.selectbox(
"Choose a model:",
list(AVAILABLE_MODELS.keys()),
index=list(AVAILABLE_MODELS.keys()).index(st.session_state.selected_model)
)
st.session_state.selected_model = selected_model
# Display selected model's information with system prompt viewer style
model_info = AVAILABLE_MODELS[selected_model]
# Create an expander for model info (similar to system prompt)
with st.expander("Selected Model Info", expanded=False):
# Display model info in a text area to match system prompt style
model_info_text = f"""Model: {selected_model}
Description: {model_info['description']}
Max Tokens: {model_info['max_tokens']}
Output Tokens: {model_info['output_tokens']}
Temperature: {"0.7" if model_info["supports_temperature"] else "Not supported for this model"}"""
st.text_area(
"",
value=model_info_text,
height=150,
disabled=True
)
# Collapsible System Prompt Box with Edit/Save/Cancel buttons
with st.expander("System Prompt", expanded=False):
if st.session_state.is_editing_system_prompt:
# Editing mode
st.session_state.temp_system_prompt = st.text_area(
"Edit System Prompt:",
value=st.session_state.temp_system_prompt,
height=200
)
# Edit buttons row
col1, col2, col3 = st.columns(3)
with col1:
if st.button("Save", key="save_system_prompt_btn", use_container_width=True):
save_system_prompt()
with col2:
if st.button("Cancel", key="cancel_system_prompt_btn", use_container_width=True):
cancel_editing_system_prompt()
else:
# Display mode
st.markdown("**Current System Prompt:**")
st.text_area(
"",
value=st.session_state.system_prompt,
height=200,
disabled=True
)
if st.button("Edit System Prompt", key="edit_system_prompt_btn", use_container_width=True):
start_editing_system_prompt()
# Add conversation management
st.subheader("Conversations")
# Add a new chat button
if st.button("+ New Chat", use_container_width=True):
create_new_chat()
st.rerun()
# Display conversation list with scrollable container
st.markdown('<div style="max-height: 400px; overflow-y: auto;">', unsafe_allow_html=True)
for conv_id, conv_data in st.session_state.conversations.items():
col1, col2 = st.columns([4, 1])
is_active = conv_id == st.session_state.current_conversation_id
with col1:
if st.button(
conv_data["title"],
key=f"conv_{conv_id}",
use_container_width=True,
type="secondary" if is_active else "tertiary"
):
st.session_state.current_conversation_id = conv_id
st.rerun()
with col2:
if st.button("πŸ—‘οΈ", key=f"del_{conv_id}"):
if conv_id in st.session_state.conversations:
del st.session_state.conversations[conv_id]
if conv_id == st.session_state.current_conversation_id:
if st.session_state.conversations:
st.session_state.current_conversation_id = next(iter(st.session_state.conversations))
else:
create_new_chat()
st.rerun()
st.markdown('</div>', unsafe_allow_html=True)
# Main chat window with improved layout
st.markdown('<div class="chat-container">', unsafe_allow_html=True)
# Messages container
st.markdown('<div class="messages-container">', unsafe_allow_html=True)
chat_container = st.container()
with chat_container:
current_id = st.session_state.current_conversation_id
current_conv = st.session_state.conversations.get(current_id, {"messages": []})
messages = current_conv["messages"]
# Display chat messages
for message in messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
st.markdown('</div>', unsafe_allow_html=True)
# Chat input at the bottom
st.markdown('<div class="chat-input-container">', unsafe_allow_html=True)
prompt = st.chat_input("What's on your mind?", key="chat_input")
st.markdown('</div>', unsafe_allow_html=True)
st.markdown('</div>', unsafe_allow_html=True)
if prompt:
# Check if this is the first message
if len(messages) == 0:
# Check if the first prompt is just a greeting
greetings = ["hello", "hi", "hey", "greetings", "good morning", "good afternoon", "good evening", "howdy"]
is_greeting = any(greeting.lower() in prompt.lower() for greeting in greetings) and len(prompt.split()) < 5
# Keep "New chat" if it's a greeting, otherwise change immediately after first response
if not is_greeting:
# We'll update the title after getting first response
st.session_state.conversations[current_id]["should_update_title"] = True
# Update title after 2nd or 3rd user message if still has default title
elif 1 <= len(messages) <= 5 and st.session_state.conversations[current_id]["title"].startswith("New chat"):
# Create a short 3-4 word summary
words = prompt.split()
if len(words) > 4:
short_title = " ".join(words[:4]) + "..."
else:
short_title = prompt
st.session_state.conversations[current_id]["title"] = short_title[:30]
# Add user message to conversation
messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response with streaming
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# Get streaming response
try:
response_stream = get_ai_response(prompt, messages[:-1], stream=True)
# Check if response is a string (error message)
if isinstance(response_stream, str):
message_placeholder.markdown(response_stream)
full_response = response_stream
else:
# For streaming output
for chunk in response_stream:
if hasattr(chunk.choices[0].delta, 'content'):
content = chunk.choices[0].delta.content
if content is not None:
full_response += content
message_placeholder.markdown(full_response + "β–Œ")
time.sleep(0.002)
# Final display without cursor
if full_response:
# Clean up any excessive dash lines or formatting issues
cleaned_response = full_response
# Replace multiple consecutive dash lines with a single line
cleaned_response = re.sub(r'━{5,}', '━━━━━', cleaned_response)
# Remove excessive newlines
cleaned_response = re.sub(r'\n{3,}', '\n\n', cleaned_response)
message_placeholder.markdown(cleaned_response)
else:
message_placeholder.markdown("No response received from the model.")
# Add the response to conversation history
messages.append({"role": "assistant", "content": full_response})
# Check if we should update the title after first response
if len(messages) == 2 and st.session_state.conversations[current_id].get("should_update_title", False):
# Extract a short title from the first user prompt
user_prompt = messages[0]["content"]
words = user_prompt.split()
if len(words) >= 4:
# Use first 3-4 meaningful words for title
title_words = words[:4]
new_title = " ".join(title_words)
else:
new_title = user_prompt
st.session_state.conversations[current_id]["title"] = new_title[:30]
st.session_state.conversations[current_id]["should_update_title"] = False
# Force a refresh of the UI after each message to ensure title updates
st.rerun()
except Exception as e:
error_msg = f"Error: {str(e)}"
message_placeholder.markdown(error_msg)
messages.append({"role": "assistant", "content": error_msg})
# We will force rerun after message processing completes in the assistant block above
# This allows us to update the UI after title changes
# JavaScript for Auto-Scroll - Improved version
scroll_script = """
<script>
// Function to scroll messages container to bottom
function scrollToBottom() {
// Small delay to ensure DOM is updated
setTimeout(() => {
const messagesContainer = window.parent.document.querySelector('.messages-container');
if (messagesContainer) {
messagesContainer.scrollTop = messagesContainer.scrollHeight;
}
// Also scroll main container as backup
const mainContainer = window.parent.document.querySelector('.main');
if (mainContainer) {
mainContainer.scrollTop = mainContainer.scrollHeight;
}
}, 100);
}
// Initial scroll
scrollToBottom();
// Set up mutation observer for dynamic content changes
const observerConfig = {
childList: true,
subtree: true,
characterData: true,
attributes: true
};
// Create a mutation observer instance
const observer = new MutationObserver(function(mutations) {
scrollToBottom();
});
// Start observing the chat container for changes
const targetNode = window.parent.document.querySelector('.messages-container');
if (targetNode) {
observer.observe(targetNode, observerConfig);
}
// Also observe any new chat messages
const chatMessages = window.parent.document.querySelectorAll('.stChatMessage');
chatMessages.forEach(function(message) {
observer.observe(message, observerConfig);
});
// Handle window resize
window.onresize = scrollToBottom;
// Make sure we scroll when page is fully loaded
window.addEventListener('load', scrollToBottom);
document.addEventListener('DOMContentLoaded', scrollToBottom);
// Additional periodic check for slow connections S
setInterval(scrollToBottom, 1000);
</script>
"""
st.markdown(scroll_script, unsafe_allow_html=True)