|
|
import streamlit as st |
|
|
from openai import OpenAI |
|
|
from dotenv import load_dotenv |
|
|
import os |
|
|
import uuid |
|
|
from datetime import datetime |
|
|
import time |
|
|
import re |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
st.set_page_config( |
|
|
page_title="GPT-Style Chat Assistant", |
|
|
page_icon="π€", |
|
|
layout="wide" |
|
|
) |
|
|
|
|
|
|
|
|
st.markdown(""" |
|
|
<style> |
|
|
/* Main container styles */ |
|
|
.main .block-container { |
|
|
max-width: 1200px; |
|
|
padding-bottom: 0; |
|
|
padding-top: 1rem; |
|
|
} |
|
|
|
|
|
/* Chat container styles */ |
|
|
.chat-container { |
|
|
display: flex; |
|
|
flex-direction: column; |
|
|
height: calc(100vh - 80px); |
|
|
overflow: hidden; |
|
|
} |
|
|
|
|
|
/* Messages area */ |
|
|
.messages-container { |
|
|
flex-grow: 1; |
|
|
overflow-y: auto; |
|
|
padding-right: 1rem; |
|
|
margin-bottom: 5rem; |
|
|
} |
|
|
|
|
|
/* Chat input container fixed at bottom */ |
|
|
.chat-input-container { |
|
|
position: fixed; |
|
|
bottom: 0; |
|
|
left: 50%; |
|
|
transform: translateX(-50%); |
|
|
width: 100%; |
|
|
max-width: 1200px; |
|
|
background-color: var(--background-color); |
|
|
padding: 1rem 0; |
|
|
z-index: 100; |
|
|
} |
|
|
|
|
|
/* Improved chat message rendering */ |
|
|
.stChatMessage { |
|
|
margin-bottom: 1rem; |
|
|
} |
|
|
|
|
|
/* Remove max height limit for messages */ |
|
|
.stChatMessage div[data-testid="stMarkdownContainer"] { |
|
|
max-height: none !important; |
|
|
overflow: visible !important; |
|
|
} |
|
|
|
|
|
/* Sidebar improvements */ |
|
|
.sidebar .sidebar-content { |
|
|
overflow-y: auto; |
|
|
height: 100vh; |
|
|
} |
|
|
|
|
|
/* Remove top padding from sidebar */ |
|
|
section[data-testid="stSidebar"] > div { |
|
|
padding-top: 0; |
|
|
} |
|
|
|
|
|
/* Fix gap caused by sidebar collapse button */ |
|
|
.stSidebar > div:first-child { |
|
|
margin-top: -1rem !important; |
|
|
} |
|
|
|
|
|
/* Specifically target the button that collapses sidebar */ |
|
|
button[kind="header"] { |
|
|
height: 1.5rem !important; |
|
|
margin-top: 0 !important; |
|
|
padding-top: 0 !important; |
|
|
} |
|
|
|
|
|
/* Reduce spacing between sidebar elements */ |
|
|
section[data-testid="stSidebar"] .block-container { |
|
|
padding-top: 1rem; |
|
|
padding-bottom: 0; |
|
|
} |
|
|
|
|
|
/* Make sidebar expanders more compact */ |
|
|
section[data-testid="stSidebar"] .stExpander { |
|
|
margin-bottom: 0.5rem; |
|
|
} |
|
|
|
|
|
/* Reduce vertical margins in sidebar */ |
|
|
section[data-testid="stSidebar"] h2, |
|
|
section[data-testid="stSidebar"] h3 { |
|
|
margin-top: 0.5rem; |
|
|
margin-bottom: 0.5rem; |
|
|
} |
|
|
|
|
|
/* Compact spacing for markdown elements in sidebar */ |
|
|
section[data-testid="stSidebar"] [data-testid="stMarkdown"] { |
|
|
margin-bottom: 0.5rem; |
|
|
} |
|
|
|
|
|
/* Give space for chat input at bottom */ |
|
|
.main .block-container { |
|
|
padding-bottom: 80px; |
|
|
} |
|
|
|
|
|
/* Improve the chat bubble appearance */ |
|
|
.stChatMessage { |
|
|
border-radius: 10px; |
|
|
padding: 0.5rem; |
|
|
transition: background-color 0.3s; |
|
|
} |
|
|
|
|
|
/* Make expanders more compact */ |
|
|
.streamlit-expanderHeader { |
|
|
font-size: 1em; |
|
|
padding-top: 0.5rem; |
|
|
padding-bottom: 0.5rem; |
|
|
} |
|
|
|
|
|
/* Custom scrollbar */ |
|
|
::-webkit-scrollbar { |
|
|
width: 6px; |
|
|
} |
|
|
|
|
|
::-webkit-scrollbar-track { |
|
|
background: transparent; |
|
|
} |
|
|
|
|
|
::-webkit-scrollbar-thumb { |
|
|
background: rgba(155, 155, 155, 0.5); |
|
|
border-radius: 10px; |
|
|
} |
|
|
|
|
|
::-webkit-scrollbar-thumb:hover { |
|
|
background: rgba(155, 155, 155, 0.8); |
|
|
} |
|
|
|
|
|
/* Button styles */ |
|
|
.edit-button-row { |
|
|
display: flex; |
|
|
gap: 10px; |
|
|
margin-top: 10px; |
|
|
} |
|
|
|
|
|
.edit-button-row button { |
|
|
flex: 1; |
|
|
} |
|
|
</style> |
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
if "conversations" not in st.session_state: |
|
|
st.session_state.conversations = {} |
|
|
|
|
|
if "current_conversation_id" not in st.session_state: |
|
|
new_id = str(uuid.uuid4()) |
|
|
st.session_state.current_conversation_id = new_id |
|
|
st.session_state.conversations[new_id] = { |
|
|
"title": f"New chat {datetime.now().strftime('%H:%M')}", |
|
|
"messages": [], |
|
|
"should_update_title": False |
|
|
} |
|
|
|
|
|
|
|
|
if "selected_model" not in st.session_state: |
|
|
st.session_state.selected_model = "o1" |
|
|
|
|
|
|
|
|
if "system_prompt" not in st.session_state: |
|
|
st.session_state.system_prompt = ( |
|
|
"You are an advanced AI assistant designed to provide clear, concise, and accurate information while maintaining a " |
|
|
"professional and informative tone. Your responses should be well-structured, logically sound, and adapted to the user's context.\n\n" |
|
|
"Guidelines:\n" |
|
|
"1. **Clarity & Accuracy**: Ensure that all responses are factual, precise, and easy to understand.\n" |
|
|
"2. **Depth & Detail**: Provide thorough explanations with relevant examples, case studies, or analogies when needed.\n" |
|
|
"3. **Context Awareness**: Understand and remember relevant details from the conversation to tailor responses appropriately.\n" |
|
|
"4. **Technical & Analytical Capability**: Be proficient in technical, scientific, and analytical discussions, offering well-reasoned arguments and solutions.\n" |
|
|
"5. **Step-by-Step Guidance**: When responding to queries related to problem-solving, coding, or calculations, provide a structured, step-by-step breakdown.\n" |
|
|
"6. **Comparisons & Evaluations**: When discussing alternatives or comparisons, include key differences, advantages, disadvantages, and real-world applications.\n" |
|
|
"7. **Professional Communication**: Maintain a respectful and professional tone, avoiding unnecessary jargon while ensuring technical accuracy.\n\n" |
|
|
"If the user requests explanations on complex topics, adapt the response to their level of expertise, simplifying where necessary or diving deeper into advanced details when appropriate." |
|
|
) |
|
|
|
|
|
|
|
|
if "is_editing_system_prompt" not in st.session_state: |
|
|
st.session_state.is_editing_system_prompt = False |
|
|
|
|
|
if "temp_system_prompt" not in st.session_state: |
|
|
st.session_state.temp_system_prompt = st.session_state.system_prompt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3") |
|
|
if not openai_api_key: |
|
|
openai_api_key = st.sidebar.text_input("Enter OpenAI API Key", type="password") |
|
|
|
|
|
|
|
|
client = None |
|
|
if openai_api_key: |
|
|
client = OpenAI(api_key=openai_api_key) |
|
|
|
|
|
|
|
|
AVAILABLE_MODELS = { |
|
|
"gpt-4o": { |
|
|
"description": "Latest GPT-4 Omni model", |
|
|
"max_tokens": 128000, |
|
|
"output_tokens": 4096, |
|
|
"supports_temperature": True |
|
|
}, |
|
|
"o1": { |
|
|
"description": "OpenAI Reasoning Model - Standard", |
|
|
"max_tokens": 200000, |
|
|
"output_tokens": 4096, |
|
|
"supports_temperature": False |
|
|
}, |
|
|
"o3-mini": { |
|
|
"description": "OpenAI Advanced Reasoning - Mini", |
|
|
"max_tokens": 200000, |
|
|
"output_tokens": 4096, |
|
|
"supports_temperature": False |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
def get_ai_response(prompt, history, stream=True): |
|
|
if not client: |
|
|
return "No API key provided. Running in demo mode." |
|
|
|
|
|
try: |
|
|
|
|
|
system_prompt = st.session_state.system_prompt |
|
|
|
|
|
|
|
|
messages = [ |
|
|
{"role": "system", "content": system_prompt} |
|
|
] |
|
|
for msg in history: |
|
|
messages.append({"role": msg["role"], "content": msg["content"]}) |
|
|
messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
model = st.session_state.selected_model |
|
|
model_config = AVAILABLE_MODELS.get(model) |
|
|
|
|
|
|
|
|
params = { |
|
|
"model": model, |
|
|
"messages": messages, |
|
|
"stream": stream |
|
|
} |
|
|
|
|
|
|
|
|
if model_config["supports_temperature"]: |
|
|
params["temperature"] = 0.7 |
|
|
params["max_tokens"] = model_config["output_tokens"] |
|
|
else: |
|
|
|
|
|
params["max_completion_tokens"] = model_config["output_tokens"] |
|
|
|
|
|
|
|
|
response = client.chat.completions.create(**params) |
|
|
|
|
|
return response |
|
|
|
|
|
except Exception as e: |
|
|
return f"An error occurred: {str(e)}." |
|
|
|
|
|
|
|
|
def create_new_chat(): |
|
|
new_id = str(uuid.uuid4()) |
|
|
st.session_state.current_conversation_id = new_id |
|
|
st.session_state.conversations[new_id] = { |
|
|
"title": f"New chat {datetime.now().strftime('%H:%M')}", |
|
|
"messages": [], |
|
|
"should_update_title": False |
|
|
} |
|
|
|
|
|
|
|
|
def start_editing_system_prompt(): |
|
|
st.session_state.is_editing_system_prompt = True |
|
|
st.session_state.temp_system_prompt = st.session_state.system_prompt |
|
|
|
|
|
def save_system_prompt(): |
|
|
st.session_state.system_prompt = st.session_state.temp_system_prompt |
|
|
st.session_state.is_editing_system_prompt = False |
|
|
|
|
|
def cancel_editing_system_prompt(): |
|
|
st.session_state.is_editing_system_prompt = False |
|
|
st.session_state.temp_system_prompt = st.session_state.system_prompt |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
|
|
|
st.subheader("Model Selection") |
|
|
|
|
|
|
|
|
selected_model = st.selectbox( |
|
|
"Choose a model:", |
|
|
list(AVAILABLE_MODELS.keys()), |
|
|
index=list(AVAILABLE_MODELS.keys()).index(st.session_state.selected_model) |
|
|
) |
|
|
st.session_state.selected_model = selected_model |
|
|
|
|
|
|
|
|
model_info = AVAILABLE_MODELS[selected_model] |
|
|
|
|
|
|
|
|
with st.expander("Selected Model Info", expanded=False): |
|
|
|
|
|
model_info_text = f"""Model: {selected_model} |
|
|
Description: {model_info['description']} |
|
|
Max Tokens: {model_info['max_tokens']} |
|
|
Output Tokens: {model_info['output_tokens']} |
|
|
Temperature: {"0.7" if model_info["supports_temperature"] else "Not supported for this model"}""" |
|
|
|
|
|
st.text_area( |
|
|
"", |
|
|
value=model_info_text, |
|
|
height=150, |
|
|
disabled=True |
|
|
) |
|
|
|
|
|
|
|
|
with st.expander("System Prompt", expanded=False): |
|
|
if st.session_state.is_editing_system_prompt: |
|
|
|
|
|
st.session_state.temp_system_prompt = st.text_area( |
|
|
"Edit System Prompt:", |
|
|
value=st.session_state.temp_system_prompt, |
|
|
height=200 |
|
|
) |
|
|
|
|
|
|
|
|
col1, col2, col3 = st.columns(3) |
|
|
with col1: |
|
|
if st.button("Save", key="save_system_prompt_btn", use_container_width=True): |
|
|
save_system_prompt() |
|
|
with col2: |
|
|
if st.button("Cancel", key="cancel_system_prompt_btn", use_container_width=True): |
|
|
cancel_editing_system_prompt() |
|
|
else: |
|
|
|
|
|
st.markdown("**Current System Prompt:**") |
|
|
st.text_area( |
|
|
"", |
|
|
value=st.session_state.system_prompt, |
|
|
height=200, |
|
|
disabled=True |
|
|
) |
|
|
if st.button("Edit System Prompt", key="edit_system_prompt_btn", use_container_width=True): |
|
|
start_editing_system_prompt() |
|
|
|
|
|
|
|
|
st.subheader("Conversations") |
|
|
|
|
|
|
|
|
if st.button("+ New Chat", use_container_width=True): |
|
|
create_new_chat() |
|
|
st.rerun() |
|
|
|
|
|
|
|
|
st.markdown('<div style="max-height: 400px; overflow-y: auto;">', unsafe_allow_html=True) |
|
|
for conv_id, conv_data in st.session_state.conversations.items(): |
|
|
col1, col2 = st.columns([4, 1]) |
|
|
is_active = conv_id == st.session_state.current_conversation_id |
|
|
|
|
|
with col1: |
|
|
if st.button( |
|
|
conv_data["title"], |
|
|
key=f"conv_{conv_id}", |
|
|
use_container_width=True, |
|
|
type="secondary" if is_active else "tertiary" |
|
|
): |
|
|
st.session_state.current_conversation_id = conv_id |
|
|
st.rerun() |
|
|
|
|
|
with col2: |
|
|
if st.button("ποΈ", key=f"del_{conv_id}"): |
|
|
if conv_id in st.session_state.conversations: |
|
|
del st.session_state.conversations[conv_id] |
|
|
if conv_id == st.session_state.current_conversation_id: |
|
|
if st.session_state.conversations: |
|
|
st.session_state.current_conversation_id = next(iter(st.session_state.conversations)) |
|
|
else: |
|
|
create_new_chat() |
|
|
st.rerun() |
|
|
st.markdown('</div>', unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
st.markdown('<div class="chat-container">', unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
st.markdown('<div class="messages-container">', unsafe_allow_html=True) |
|
|
chat_container = st.container() |
|
|
|
|
|
with chat_container: |
|
|
current_id = st.session_state.current_conversation_id |
|
|
current_conv = st.session_state.conversations.get(current_id, {"messages": []}) |
|
|
messages = current_conv["messages"] |
|
|
|
|
|
|
|
|
for message in messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
st.markdown('</div>', unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
st.markdown('<div class="chat-input-container">', unsafe_allow_html=True) |
|
|
prompt = st.chat_input("What's on your mind?", key="chat_input") |
|
|
st.markdown('</div>', unsafe_allow_html=True) |
|
|
|
|
|
st.markdown('</div>', unsafe_allow_html=True) |
|
|
|
|
|
if prompt: |
|
|
|
|
|
if len(messages) == 0: |
|
|
|
|
|
greetings = ["hello", "hi", "hey", "greetings", "good morning", "good afternoon", "good evening", "howdy"] |
|
|
is_greeting = any(greeting.lower() in prompt.lower() for greeting in greetings) and len(prompt.split()) < 5 |
|
|
|
|
|
|
|
|
if not is_greeting: |
|
|
|
|
|
st.session_state.conversations[current_id]["should_update_title"] = True |
|
|
|
|
|
|
|
|
elif 1 <= len(messages) <= 5 and st.session_state.conversations[current_id]["title"].startswith("New chat"): |
|
|
|
|
|
words = prompt.split() |
|
|
if len(words) > 4: |
|
|
short_title = " ".join(words[:4]) + "..." |
|
|
else: |
|
|
short_title = prompt |
|
|
st.session_state.conversations[current_id]["title"] = short_title[:30] |
|
|
|
|
|
|
|
|
messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(prompt) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
message_placeholder = st.empty() |
|
|
full_response = "" |
|
|
|
|
|
|
|
|
try: |
|
|
response_stream = get_ai_response(prompt, messages[:-1], stream=True) |
|
|
|
|
|
|
|
|
if isinstance(response_stream, str): |
|
|
message_placeholder.markdown(response_stream) |
|
|
full_response = response_stream |
|
|
else: |
|
|
|
|
|
for chunk in response_stream: |
|
|
if hasattr(chunk.choices[0].delta, 'content'): |
|
|
content = chunk.choices[0].delta.content |
|
|
if content is not None: |
|
|
full_response += content |
|
|
message_placeholder.markdown(full_response + "β") |
|
|
time.sleep(0.002) |
|
|
|
|
|
|
|
|
if full_response: |
|
|
|
|
|
cleaned_response = full_response |
|
|
|
|
|
cleaned_response = re.sub(r'β{5,}', 'βββββ', cleaned_response) |
|
|
|
|
|
cleaned_response = re.sub(r'\n{3,}', '\n\n', cleaned_response) |
|
|
message_placeholder.markdown(cleaned_response) |
|
|
else: |
|
|
message_placeholder.markdown("No response received from the model.") |
|
|
|
|
|
|
|
|
messages.append({"role": "assistant", "content": full_response}) |
|
|
|
|
|
|
|
|
if len(messages) == 2 and st.session_state.conversations[current_id].get("should_update_title", False): |
|
|
|
|
|
user_prompt = messages[0]["content"] |
|
|
words = user_prompt.split() |
|
|
if len(words) >= 4: |
|
|
|
|
|
title_words = words[:4] |
|
|
new_title = " ".join(title_words) |
|
|
else: |
|
|
new_title = user_prompt |
|
|
|
|
|
st.session_state.conversations[current_id]["title"] = new_title[:30] |
|
|
st.session_state.conversations[current_id]["should_update_title"] = False |
|
|
|
|
|
|
|
|
st.rerun() |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"Error: {str(e)}" |
|
|
message_placeholder.markdown(error_msg) |
|
|
messages.append({"role": "assistant", "content": error_msg}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scroll_script = """ |
|
|
<script> |
|
|
// Function to scroll messages container to bottom |
|
|
function scrollToBottom() { |
|
|
// Small delay to ensure DOM is updated |
|
|
setTimeout(() => { |
|
|
const messagesContainer = window.parent.document.querySelector('.messages-container'); |
|
|
if (messagesContainer) { |
|
|
messagesContainer.scrollTop = messagesContainer.scrollHeight; |
|
|
} |
|
|
|
|
|
// Also scroll main container as backup |
|
|
const mainContainer = window.parent.document.querySelector('.main'); |
|
|
if (mainContainer) { |
|
|
mainContainer.scrollTop = mainContainer.scrollHeight; |
|
|
} |
|
|
}, 100); |
|
|
} |
|
|
|
|
|
// Initial scroll |
|
|
scrollToBottom(); |
|
|
|
|
|
// Set up mutation observer for dynamic content changes |
|
|
const observerConfig = { |
|
|
childList: true, |
|
|
subtree: true, |
|
|
characterData: true, |
|
|
attributes: true |
|
|
}; |
|
|
|
|
|
// Create a mutation observer instance |
|
|
const observer = new MutationObserver(function(mutations) { |
|
|
scrollToBottom(); |
|
|
}); |
|
|
|
|
|
// Start observing the chat container for changes |
|
|
const targetNode = window.parent.document.querySelector('.messages-container'); |
|
|
if (targetNode) { |
|
|
observer.observe(targetNode, observerConfig); |
|
|
} |
|
|
|
|
|
// Also observe any new chat messages |
|
|
const chatMessages = window.parent.document.querySelectorAll('.stChatMessage'); |
|
|
chatMessages.forEach(function(message) { |
|
|
observer.observe(message, observerConfig); |
|
|
}); |
|
|
|
|
|
// Handle window resize |
|
|
window.onresize = scrollToBottom; |
|
|
|
|
|
// Make sure we scroll when page is fully loaded |
|
|
window.addEventListener('load', scrollToBottom); |
|
|
document.addEventListener('DOMContentLoaded', scrollToBottom); |
|
|
|
|
|
// Additional periodic check for slow connections S |
|
|
setInterval(scrollToBottom, 1000); |
|
|
</script> |
|
|
""" |
|
|
|
|
|
st.markdown(scroll_script, unsafe_allow_html=True) |