import streamlit as st from openai import OpenAI from dotenv import load_dotenv import os import uuid from datetime import datetime import time import re # Load environment variables load_dotenv() # Set page config st.set_page_config( page_title="GPT-Style Chat Assistant", page_icon="🤖", layout="wide" ) # Add custom CSS for better styling and smooth scrolling st.markdown(""" """, unsafe_allow_html=True) # Initialize session state for conversations and history if "conversations" not in st.session_state: st.session_state.conversations = {} if "current_conversation_id" not in st.session_state: new_id = str(uuid.uuid4()) st.session_state.current_conversation_id = new_id st.session_state.conversations[new_id] = { "title": f"New chat {datetime.now().strftime('%H:%M')}", "messages": [], "should_update_title": False } # Default Model as o1 if "selected_model" not in st.session_state: st.session_state.selected_model = "o1" # Default to o1 # Initialize System Prompt if "system_prompt" not in st.session_state: st.session_state.system_prompt = ( "You are an advanced AI assistant designed to provide clear, concise, and accurate information while maintaining a " "professional and informative tone. Your responses should be well-structured, logically sound, and adapted to the user's context.\n\n" "Guidelines:\n" "1. **Clarity & Accuracy**: Ensure that all responses are factual, precise, and easy to understand.\n" "2. **Depth & Detail**: Provide thorough explanations with relevant examples, case studies, or analogies when needed.\n" "3. **Context Awareness**: Understand and remember relevant details from the conversation to tailor responses appropriately.\n" "4. **Technical & Analytical Capability**: Be proficient in technical, scientific, and analytical discussions, offering well-reasoned arguments and solutions.\n" "5. **Step-by-Step Guidance**: When responding to queries related to problem-solving, coding, or calculations, provide a structured, step-by-step breakdown.\n" "6. **Comparisons & Evaluations**: When discussing alternatives or comparisons, include key differences, advantages, disadvantages, and real-world applications.\n" "7. **Professional Communication**: Maintain a respectful and professional tone, avoiding unnecessary jargon while ensuring technical accuracy.\n\n" "If the user requests explanations on complex topics, adapt the response to their level of expertise, simplifying where necessary or diving deeper into advanced details when appropriate." ) # Initialize editing state flags if "is_editing_system_prompt" not in st.session_state: st.session_state.is_editing_system_prompt = False if "temp_system_prompt" not in st.session_state: st.session_state.temp_system_prompt = st.session_state.system_prompt # Removed the show_model_details state variable as we're using expander instead # Get OpenAI API key from environment or user input openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3") if not openai_api_key: openai_api_key = st.sidebar.text_input("Enter OpenAI API Key", type="password") # Initialize OpenAI client client = None if openai_api_key: client = OpenAI(api_key=openai_api_key) # Available models with descriptions and token limits AVAILABLE_MODELS = { "gpt-4o": { "description": "Latest GPT-4 Omni model", "max_tokens": 128000, "output_tokens": 4096, "supports_temperature": True }, "o1": { "description": "OpenAI Reasoning Model - Standard", "max_tokens": 200000, "output_tokens": 4096, "supports_temperature": False }, "o3-mini": { "description": "OpenAI Advanced Reasoning - Mini", "max_tokens": 200000, "output_tokens": 4096, "supports_temperature": False } } # Function to call OpenAI API with improved error handling def get_ai_response(prompt, history, stream=True): if not client: return "No API key provided. Running in demo mode." try: # Use the system prompt from the session state system_prompt = st.session_state.system_prompt # Construct the messages with the system prompt first messages = [ {"role": "system", "content": system_prompt} ] for msg in history: messages.append({"role": msg["role"], "content": msg["content"]}) messages.append({"role": "user", "content": prompt}) model = st.session_state.selected_model model_config = AVAILABLE_MODELS.get(model) # Common parameters for all models params = { "model": model, "messages": messages, "stream": stream } # Add model-specific parameters if model_config["supports_temperature"]: params["temperature"] = 0.7 params["max_tokens"] = model_config["output_tokens"] else: # For o1 and o3-mini models which use different parameter names params["max_completion_tokens"] = model_config["output_tokens"] # Make the API call response = client.chat.completions.create(**params) return response except Exception as e: return f"An error occurred: {str(e)}." # Function to create a new conversation def create_new_chat(): new_id = str(uuid.uuid4()) st.session_state.current_conversation_id = new_id st.session_state.conversations[new_id] = { "title": f"New chat {datetime.now().strftime('%H:%M')}", "messages": [], "should_update_title": False } # Functions for system prompt editing def start_editing_system_prompt(): st.session_state.is_editing_system_prompt = True st.session_state.temp_system_prompt = st.session_state.system_prompt def save_system_prompt(): st.session_state.system_prompt = st.session_state.temp_system_prompt st.session_state.is_editing_system_prompt = False def cancel_editing_system_prompt(): st.session_state.is_editing_system_prompt = False st.session_state.temp_system_prompt = st.session_state.system_prompt # Sidebar for model selection and conversation management with st.sidebar: # Compact model selection at the top st.subheader("Model Selection") # Model selection dropdown selected_model = st.selectbox( "Choose a model:", list(AVAILABLE_MODELS.keys()), index=list(AVAILABLE_MODELS.keys()).index(st.session_state.selected_model) ) st.session_state.selected_model = selected_model # Display selected model's information with system prompt viewer style model_info = AVAILABLE_MODELS[selected_model] # Create an expander for model info (similar to system prompt) with st.expander("Selected Model Info", expanded=False): # Display model info in a text area to match system prompt style model_info_text = f"""Model: {selected_model} Description: {model_info['description']} Max Tokens: {model_info['max_tokens']} Output Tokens: {model_info['output_tokens']} Temperature: {"0.7" if model_info["supports_temperature"] else "Not supported for this model"}""" st.text_area( "", value=model_info_text, height=150, disabled=True ) # Collapsible System Prompt Box with Edit/Save/Cancel buttons with st.expander("System Prompt", expanded=False): if st.session_state.is_editing_system_prompt: # Editing mode st.session_state.temp_system_prompt = st.text_area( "Edit System Prompt:", value=st.session_state.temp_system_prompt, height=200 ) # Edit buttons row col1, col2, col3 = st.columns(3) with col1: if st.button("Save", key="save_system_prompt_btn", use_container_width=True): save_system_prompt() with col2: if st.button("Cancel", key="cancel_system_prompt_btn", use_container_width=True): cancel_editing_system_prompt() else: # Display mode st.markdown("**Current System Prompt:**") st.text_area( "", value=st.session_state.system_prompt, height=200, disabled=True ) if st.button("Edit System Prompt", key="edit_system_prompt_btn", use_container_width=True): start_editing_system_prompt() # Add conversation management st.subheader("Conversations") # Add a new chat button if st.button("+ New Chat", use_container_width=True): create_new_chat() st.rerun() # Display conversation list with scrollable container st.markdown('
', unsafe_allow_html=True) for conv_id, conv_data in st.session_state.conversations.items(): col1, col2 = st.columns([4, 1]) is_active = conv_id == st.session_state.current_conversation_id with col1: if st.button( conv_data["title"], key=f"conv_{conv_id}", use_container_width=True, type="secondary" if is_active else "tertiary" ): st.session_state.current_conversation_id = conv_id st.rerun() with col2: if st.button("🗑️", key=f"del_{conv_id}"): if conv_id in st.session_state.conversations: del st.session_state.conversations[conv_id] if conv_id == st.session_state.current_conversation_id: if st.session_state.conversations: st.session_state.current_conversation_id = next(iter(st.session_state.conversations)) else: create_new_chat() st.rerun() st.markdown('
', unsafe_allow_html=True) # Main chat window with improved layout st.markdown('
', unsafe_allow_html=True) # Messages container st.markdown('
', unsafe_allow_html=True) chat_container = st.container() with chat_container: current_id = st.session_state.current_conversation_id current_conv = st.session_state.conversations.get(current_id, {"messages": []}) messages = current_conv["messages"] # Display chat messages for message in messages: with st.chat_message(message["role"]): st.markdown(message["content"]) st.markdown('
', unsafe_allow_html=True) # Chat input at the bottom st.markdown('
', unsafe_allow_html=True) prompt = st.chat_input("What's on your mind?", key="chat_input") st.markdown('
', unsafe_allow_html=True) st.markdown('
', unsafe_allow_html=True) if prompt: # Check if this is the first message if len(messages) == 0: # Check if the first prompt is just a greeting greetings = ["hello", "hi", "hey", "greetings", "good morning", "good afternoon", "good evening", "howdy"] is_greeting = any(greeting.lower() in prompt.lower() for greeting in greetings) and len(prompt.split()) < 5 # Keep "New chat" if it's a greeting, otherwise change immediately after first response if not is_greeting: # We'll update the title after getting first response st.session_state.conversations[current_id]["should_update_title"] = True # Update title after 2nd or 3rd user message if still has default title elif 1 <= len(messages) <= 5 and st.session_state.conversations[current_id]["title"].startswith("New chat"): # Create a short 3-4 word summary words = prompt.split() if len(words) > 4: short_title = " ".join(words[:4]) + "..." else: short_title = prompt st.session_state.conversations[current_id]["title"] = short_title[:30] # Add user message to conversation messages.append({"role": "user", "content": prompt}) # Display user message with st.chat_message("user"): st.markdown(prompt) # Display assistant response with streaming with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # Get streaming response try: response_stream = get_ai_response(prompt, messages[:-1], stream=True) # Check if response is a string (error message) if isinstance(response_stream, str): message_placeholder.markdown(response_stream) full_response = response_stream else: # For streaming output for chunk in response_stream: if hasattr(chunk.choices[0].delta, 'content'): content = chunk.choices[0].delta.content if content is not None: full_response += content message_placeholder.markdown(full_response + "▌") time.sleep(0.002) # Final display without cursor if full_response: # Clean up any excessive dash lines or formatting issues cleaned_response = full_response # Replace multiple consecutive dash lines with a single line cleaned_response = re.sub(r'━{5,}', '━━━━━', cleaned_response) # Remove excessive newlines cleaned_response = re.sub(r'\n{3,}', '\n\n', cleaned_response) message_placeholder.markdown(cleaned_response) else: message_placeholder.markdown("No response received from the model.") # Add the response to conversation history messages.append({"role": "assistant", "content": full_response}) # Check if we should update the title after first response if len(messages) == 2 and st.session_state.conversations[current_id].get("should_update_title", False): # Extract a short title from the first user prompt user_prompt = messages[0]["content"] words = user_prompt.split() if len(words) >= 4: # Use first 3-4 meaningful words for title title_words = words[:4] new_title = " ".join(title_words) else: new_title = user_prompt st.session_state.conversations[current_id]["title"] = new_title[:30] st.session_state.conversations[current_id]["should_update_title"] = False # Force a refresh of the UI after each message to ensure title updates st.rerun() except Exception as e: error_msg = f"Error: {str(e)}" message_placeholder.markdown(error_msg) messages.append({"role": "assistant", "content": error_msg}) # We will force rerun after message processing completes in the assistant block above # This allows us to update the UI after title changes # JavaScript for Auto-Scroll - Improved version scroll_script = """ """ st.markdown(scroll_script, unsafe_allow_html=True)