neuralleap's picture
Create app_old.py
077dbb1 verified
import streamlit as st
import requests
from dotenv import load_dotenv
import os
import sys
import time
import random
import uuid
from datetime import datetime
import openai
# Load environment variables
load_dotenv()
# Set page config with a wider layout
st.set_page_config(
page_title="GPT-Style Chat Assistant",
page_icon="🤖",
layout="wide"
)
# Add custom CSS for better styling
st.markdown("""
<style>
.main-content {
max-width: 800px;
margin: 0 auto;
padding: 1rem;
}
.chat-message {
padding: 1.5rem;
border-radius: 0.5rem;
margin-bottom: 1rem;
display: flex;
flex-direction: column;
}
.user-message {
background-color: #f0f2f6;
}
.assistant-message {
background-color: #e6f3f7;
}
.chat-input {
position: fixed;
bottom: 0;
width: 100%;
padding: 1rem;
background-color: white;
}
.sidebar-conv {
padding: 0.5rem 1rem;
border-radius: 0.5rem;
margin-bottom: 0.5rem;
cursor: pointer;
transition: background-color 0.3s;
}
.sidebar-conv:hover {
background-color: #f0f2f6;
}
.active-conv {
background-color: #e6f3f7;
font-weight: bold;
}
</style>
""", unsafe_allow_html=True)
# Initialize session state
if "conversations" not in st.session_state:
st.session_state.conversations = {}
if "current_conversation_id" not in st.session_state:
new_id = str(uuid.uuid4())
st.session_state.current_conversation_id = new_id
st.session_state.conversations[new_id] = {
"title": f"New chat {datetime.now().strftime('%H:%M')}",
"messages": []
}
# Initialize selected model
if "selected_model" not in st.session_state:
st.session_state.selected_model = "gpt-3.5-turbo"
# Get OpenAI API key from environment or let user enter it
# openai_api_key = os.getenv("OPENAI_API_KEY")
# openai_api_key = os.getenv("OPENAI_API_KEY_NEW_1")
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
# Configure OpenAI client
if openai_api_key:
client = openai.OpenAI(api_key=openai_api_key)
# Available models with descriptions and token limits
AVAILABLE_MODELS = {
"gpt-3.5-turbo": {
"description": "Fast and cost-effective",
"max_tokens": 4096,
"output_tokens": 500,
"temperature": 0.7
},
"gpt-4": {
"description": "More capable but slower",
"max_tokens": 8192,
"output_tokens": 800,
"temperature": 0.7
},
"gpt-3.5-turbo-16k": {
"description": "Longer context window",
"max_tokens": 16384,
"output_tokens": 1000,
"temperature": 0.7
},
"gpt-4-turbo": {
"description": "Most powerful model (if available)",
"max_tokens": 128000,
"output_tokens": 1200,
"temperature": 0.7
},
"gpt-4o": {
"description": "Latest GPT-4 Omni model",
"max_tokens": 128000,
"output_tokens": 1200,
"temperature": 0.7
},
"gpt-4o-mini": {
"description": "Efficient version of GPT-4o",
"max_tokens": 128000,
"output_tokens": 1000,
"temperature": 0.7
},
"o1-mini": {
"description": "OpenAI Reasoning Model - Mini",
"max_tokens": 180000,
"output_tokens": 1000,
"temperature": 0.7
},
"o1": {
"description": "OpenAI Reasoning Model - Standard",
"max_tokens": 200000,
"output_tokens": 1200,
"temperature": 0.7
},
"o1-pro": {
"description": "OpenAI Reasoning Model - Professional",
"max_tokens": 200000,
"output_tokens": 1500,
"temperature": 0.7
},
"o3-mini": {
"description": "OpenAI Advanced Reasoning - Mini",
"max_tokens": 200000,
"output_tokens": 1000,
"temperature": 0.7
},
"o3-mini-2025-01-31": {
"description": "OpenAI Advanced Reasoning - Enhanced",
"max_tokens": 200000,
"output_tokens": 1200,
"temperature": 0.7
}
#"""
}
# Function to call OpenAI API
def get_ai_response(prompt, history):
# Use demo mode if no API key is provided
if not openai_api_key:
return get_demo_response(prompt)
try:
# Format messages for API
messages = []
# Add system message
system_message = "You are a helpful assistant that provides clear, concise, and accurate information."
messages.append({
"role": "system",
"content": system_message
})
# Add conversation history
for msg in history:
messages.append({
"role": msg["role"],
"content": msg["content"]
})
# Add the current prompt
messages.append({
"role": "user",
"content": prompt
})
# Get model configuration
model = st.session_state.selected_model
model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
output_tokens = model_config["output_tokens"]
temperature = model_config["temperature"]
# Call OpenAI API
response = client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=output_tokens,
stream=False
)
# Extract the response
return response.choices[0].message.content
except Exception as e:
st.error(f"An error occurred: {str(e)}")
return "I'm sorry, I encountered an error while processing your request. Please check your OpenAI API key or try again later."
# Demo mode responses for when no API key is available
def get_demo_response(prompt):
prompt_lower = prompt.lower()
# Simple response templates
greetings = [
"Hello! How can I assist you today?",
"Hi there! I'm a demo AI assistant. What can I help you with?",
"Greetings! I'm running in demo mode. Feel free to ask simple questions."
]
farewells = [
"Goodbye! Have a great day!",
"Farewell! Come back soon!",
"Take care! It was nice chatting with you."
]
info_responses = [
"I'm a simple AI assistant running in demo mode. To use the full features, please provide an OpenAI API key.",
"This is a demo version with limited capabilities. For a better experience, add your OpenAI API key.",
"I'm just demonstrating basic functionality. Get a free API key from OpenAI to unlock my full potential!"
]
reasoning_examples = [
"This is a demonstration of how I would process a reasoning task. In a real scenario with the full model, I would analyze the problem step by step, consider multiple angles, and provide a detailed explanation.",
"When solving problems, I would typically break them down into smaller parts, examine each component, and build towards a comprehensive solution. This demo just simulates that process.",
"Reasoning typically involves identifying key facts, applying logical rules, and drawing conclusions based on available information. With a proper API key, I could demonstrate this more effectively."
]
# Simple pattern matching
if any(word in prompt_lower for word in ["hello", "hi", "hey", "greetings"]):
return random.choice(greetings)
elif any(word in prompt_lower for word in ["bye", "goodbye", "farewell", "see you"]):
return random.choice(farewells)
elif any(phrase in prompt_lower for phrase in ["who are you", "what are you", "tell me about yourself", "what can you do"]):
return random.choice(info_responses)
elif any(word in prompt_lower for word in ["think", "reason", "analyze", "solve", "explain", "why", "how"]):
return random.choice(reasoning_examples)
elif "weather" in prompt_lower:
return "I'm sorry, I don't have access to real-time weather data in demo mode."
elif any(word in prompt_lower for word in ["help", "assist", "support"]):
return "To get better assistance, please add your OpenAI API key. You can get one for free at https://platform.openai.com/account/api-keys."
else:
return "I'm running in demo mode with limited responses. For a full conversation experience, please add your OpenAI API key above."
# Function to create a new conversation
def create_new_chat():
new_id = str(uuid.uuid4())
st.session_state.current_conversation_id = new_id
st.session_state.conversations[new_id] = {
"title": f"New chat {datetime.now().strftime('%H:%M')}",
"messages": []
}
# Function to update conversation title based on first message
def update_conversation_title(conv_id, user_message):
current_title = st.session_state.conversations[conv_id]["title"]
if current_title.startswith("New chat"):
# Limit title length to prevent overflow
new_title = user_message[:30] + "..." if len(user_message) > 30 else user_message
st.session_state.conversations[conv_id]["title"] = new_title
# Function to delete a conversation
def delete_conversation(conv_id):
if conv_id in st.session_state.conversations:
del st.session_state.conversations[conv_id]
# If we deleted the current conversation, set a new one
if conv_id == st.session_state.current_conversation_id:
if st.session_state.conversations:
st.session_state.current_conversation_id = next(iter(st.session_state.conversations))
else:
create_new_chat()
# Create a two-column layout
sidebar, main_content = st.columns([1, 3])
# Sidebar (conversation history)
with sidebar:
st.sidebar.title("Conversations")
# Add a new chat button
if st.sidebar.button("+ New Chat", use_container_width=True):
create_new_chat()
st.rerun()
st.sidebar.markdown("---")
# API token input in sidebar if not available
if not openai_api_key:
st.sidebar.info("⚠️ No OpenAI API key found.", icon="ℹ️")
entered_token = st.sidebar.text_input("Enter OpenAI API Key", type="password")
if entered_token:
openai_api_key = entered_token
client = openai.OpenAI(api_key=openai_api_key)
# Model selection dropdown
st.sidebar.subheader("Model Selection")
model_options = list(AVAILABLE_MODELS.keys())
model_descriptions = [f"{model} - {AVAILABLE_MODELS[model]['description']}" for model in model_options]
selected_model_index = model_options.index(st.session_state.selected_model) if st.session_state.selected_model in model_options else 0
selected_description = st.sidebar.selectbox(
"Choose a model:",
model_descriptions,
index=selected_model_index
)
# Extract model name from description
selected_model = model_options[model_descriptions.index(selected_description)]
if selected_model != st.session_state.selected_model:
st.session_state.selected_model = selected_model
st.sidebar.info(f"Model set to {selected_model}")
# Show model details
with st.sidebar.expander("Model Details"):
model_info = AVAILABLE_MODELS[selected_model]
st.write(f"**Description:** {model_info['description']}")
st.write(f"**Max tokens:** {model_info['max_tokens']}")
st.write(f"**Default temperature:** {model_info['temperature']}")
st.write("""
**Note:** Some models may not be available with your current API key.
If you encounter an error, try selecting a different model.
""")
st.sidebar.markdown("---")
# Display conversation history
for conv_id, conv_data in st.session_state.conversations.items():
col1, col2 = st.sidebar.columns([4, 1])
is_active = conv_id == st.session_state.current_conversation_id
with col1:
if st.button(
conv_data["title"],
key=f"conv_{conv_id}",
use_container_width=True,
type="secondary" if is_active else "tertiary"
):
st.session_state.current_conversation_id = conv_id
st.rerun()
with col2:
if st.button("🗑️", key=f"del_{conv_id}"):
delete_conversation(conv_id)
st.rerun()
# Main content area
with main_content:
st.write("") # Add some space at the top
# Get current conversation
current_id = st.session_state.current_conversation_id
current_conv = st.session_state.conversations.get(current_id, {"messages": []})
messages = current_conv["messages"]
# Create a container for the chat area (scrollable)
chat_container = st.container()
# Display chat messages
with chat_container:
for i, message in enumerate(messages):
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input at the bottom
prompt = st.chat_input("What's on your mind?")
if prompt:
# Add user message to the current conversation
messages.append({"role": "user", "content": prompt})
# Update conversation title if this is the first message
if len(messages) == 1:
update_conversation_title(current_id, prompt)
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Display assistant response with typing animation
with st.chat_message("assistant"):
message_placeholder = st.empty()
# Get response from AI
full_response = get_ai_response(prompt, messages[:-1])
# Simulate typing
displayed_response = ""
for i in range(len(full_response)):
displayed_response += full_response[i]
message_placeholder.markdown(displayed_response + "▌")
time.sleep(0.005) # Slightly faster typing
# Display final response
message_placeholder.markdown(full_response)
# Add assistant response to the conversation
messages.append({"role": "assistant", "content": full_response})
# Force a rerun to update the sidebar with the new conversation title
st.rerun()