|
|
|
|
|
import streamlit as st |
|
|
import requests |
|
|
from dotenv import load_dotenv |
|
|
import os |
|
|
import sys |
|
|
import time |
|
|
import random |
|
|
import uuid |
|
|
from datetime import datetime |
|
|
import openai |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
st.set_page_config( |
|
|
page_title="GPT-Style Chat Assistant", |
|
|
page_icon="🤖", |
|
|
layout="wide" |
|
|
) |
|
|
|
|
|
|
|
|
st.markdown(""" |
|
|
<style> |
|
|
.main-content { |
|
|
max-width: 800px; |
|
|
margin: 0 auto; |
|
|
padding: 1rem; |
|
|
} |
|
|
.chat-message { |
|
|
padding: 1.5rem; |
|
|
border-radius: 0.5rem; |
|
|
margin-bottom: 1rem; |
|
|
display: flex; |
|
|
flex-direction: column; |
|
|
} |
|
|
.user-message { |
|
|
background-color: #f0f2f6; |
|
|
} |
|
|
.assistant-message { |
|
|
background-color: #e6f3f7; |
|
|
} |
|
|
.chat-input { |
|
|
position: fixed; |
|
|
bottom: 0; |
|
|
width: 100%; |
|
|
padding: 1rem; |
|
|
background-color: white; |
|
|
} |
|
|
.sidebar-conv { |
|
|
padding: 0.5rem 1rem; |
|
|
border-radius: 0.5rem; |
|
|
margin-bottom: 0.5rem; |
|
|
cursor: pointer; |
|
|
transition: background-color 0.3s; |
|
|
} |
|
|
.sidebar-conv:hover { |
|
|
background-color: #f0f2f6; |
|
|
} |
|
|
.active-conv { |
|
|
background-color: #e6f3f7; |
|
|
font-weight: bold; |
|
|
} |
|
|
</style> |
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
if "conversations" not in st.session_state: |
|
|
st.session_state.conversations = {} |
|
|
|
|
|
if "current_conversation_id" not in st.session_state: |
|
|
new_id = str(uuid.uuid4()) |
|
|
st.session_state.current_conversation_id = new_id |
|
|
st.session_state.conversations[new_id] = { |
|
|
"title": f"New chat {datetime.now().strftime('%H:%M')}", |
|
|
"messages": [] |
|
|
} |
|
|
|
|
|
|
|
|
if "selected_model" not in st.session_state: |
|
|
st.session_state.selected_model = "gpt-3.5-turbo" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3") |
|
|
|
|
|
|
|
|
if openai_api_key: |
|
|
client = openai.OpenAI(api_key=openai_api_key) |
|
|
|
|
|
|
|
|
AVAILABLE_MODELS = { |
|
|
"gpt-3.5-turbo": { |
|
|
"description": "Fast and cost-effective", |
|
|
"max_tokens": 4096, |
|
|
"output_tokens": 500, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"gpt-4": { |
|
|
"description": "More capable but slower", |
|
|
"max_tokens": 8192, |
|
|
"output_tokens": 800, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"gpt-3.5-turbo-16k": { |
|
|
"description": "Longer context window", |
|
|
"max_tokens": 16384, |
|
|
"output_tokens": 1000, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"gpt-4-turbo": { |
|
|
"description": "Most powerful model (if available)", |
|
|
"max_tokens": 128000, |
|
|
"output_tokens": 1200, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"gpt-4o": { |
|
|
"description": "Latest GPT-4 Omni model", |
|
|
"max_tokens": 128000, |
|
|
"output_tokens": 1200, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"gpt-4o-mini": { |
|
|
"description": "Efficient version of GPT-4o", |
|
|
"max_tokens": 128000, |
|
|
"output_tokens": 1000, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
|
|
|
"o1-mini": { |
|
|
"description": "OpenAI Reasoning Model - Mini", |
|
|
"max_tokens": 180000, |
|
|
"output_tokens": 1000, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"o1": { |
|
|
"description": "OpenAI Reasoning Model - Standard", |
|
|
"max_tokens": 200000, |
|
|
"output_tokens": 1200, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"o1-pro": { |
|
|
"description": "OpenAI Reasoning Model - Professional", |
|
|
"max_tokens": 200000, |
|
|
"output_tokens": 1500, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"o3-mini": { |
|
|
"description": "OpenAI Advanced Reasoning - Mini", |
|
|
"max_tokens": 200000, |
|
|
"output_tokens": 1000, |
|
|
"temperature": 0.7 |
|
|
}, |
|
|
"o3-mini-2025-01-31": { |
|
|
"description": "OpenAI Advanced Reasoning - Enhanced", |
|
|
"max_tokens": 200000, |
|
|
"output_tokens": 1200, |
|
|
"temperature": 0.7 |
|
|
} |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
def get_ai_response(prompt, history): |
|
|
|
|
|
if not openai_api_key: |
|
|
return get_demo_response(prompt) |
|
|
|
|
|
try: |
|
|
|
|
|
messages = [] |
|
|
|
|
|
|
|
|
system_message = "You are a helpful assistant that provides clear, concise, and accurate information." |
|
|
messages.append({ |
|
|
"role": "system", |
|
|
"content": system_message |
|
|
}) |
|
|
|
|
|
|
|
|
for msg in history: |
|
|
messages.append({ |
|
|
"role": msg["role"], |
|
|
"content": msg["content"] |
|
|
}) |
|
|
|
|
|
|
|
|
messages.append({ |
|
|
"role": "user", |
|
|
"content": prompt |
|
|
}) |
|
|
|
|
|
|
|
|
model = st.session_state.selected_model |
|
|
model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"]) |
|
|
output_tokens = model_config["output_tokens"] |
|
|
temperature = model_config["temperature"] |
|
|
|
|
|
|
|
|
response = client.chat.completions.create( |
|
|
model=model, |
|
|
messages=messages, |
|
|
temperature=temperature, |
|
|
max_tokens=output_tokens, |
|
|
stream=False |
|
|
) |
|
|
|
|
|
|
|
|
return response.choices[0].message.content |
|
|
|
|
|
except Exception as e: |
|
|
st.error(f"An error occurred: {str(e)}") |
|
|
return "I'm sorry, I encountered an error while processing your request. Please check your OpenAI API key or try again later." |
|
|
|
|
|
|
|
|
def get_demo_response(prompt): |
|
|
prompt_lower = prompt.lower() |
|
|
|
|
|
|
|
|
greetings = [ |
|
|
"Hello! How can I assist you today?", |
|
|
"Hi there! I'm a demo AI assistant. What can I help you with?", |
|
|
"Greetings! I'm running in demo mode. Feel free to ask simple questions." |
|
|
] |
|
|
|
|
|
farewells = [ |
|
|
"Goodbye! Have a great day!", |
|
|
"Farewell! Come back soon!", |
|
|
"Take care! It was nice chatting with you." |
|
|
] |
|
|
|
|
|
info_responses = [ |
|
|
"I'm a simple AI assistant running in demo mode. To use the full features, please provide an OpenAI API key.", |
|
|
"This is a demo version with limited capabilities. For a better experience, add your OpenAI API key.", |
|
|
"I'm just demonstrating basic functionality. Get a free API key from OpenAI to unlock my full potential!" |
|
|
] |
|
|
|
|
|
reasoning_examples = [ |
|
|
"This is a demonstration of how I would process a reasoning task. In a real scenario with the full model, I would analyze the problem step by step, consider multiple angles, and provide a detailed explanation.", |
|
|
"When solving problems, I would typically break them down into smaller parts, examine each component, and build towards a comprehensive solution. This demo just simulates that process.", |
|
|
"Reasoning typically involves identifying key facts, applying logical rules, and drawing conclusions based on available information. With a proper API key, I could demonstrate this more effectively." |
|
|
] |
|
|
|
|
|
|
|
|
if any(word in prompt_lower for word in ["hello", "hi", "hey", "greetings"]): |
|
|
return random.choice(greetings) |
|
|
elif any(word in prompt_lower for word in ["bye", "goodbye", "farewell", "see you"]): |
|
|
return random.choice(farewells) |
|
|
elif any(phrase in prompt_lower for phrase in ["who are you", "what are you", "tell me about yourself", "what can you do"]): |
|
|
return random.choice(info_responses) |
|
|
elif any(word in prompt_lower for word in ["think", "reason", "analyze", "solve", "explain", "why", "how"]): |
|
|
return random.choice(reasoning_examples) |
|
|
elif "weather" in prompt_lower: |
|
|
return "I'm sorry, I don't have access to real-time weather data in demo mode." |
|
|
elif any(word in prompt_lower for word in ["help", "assist", "support"]): |
|
|
return "To get better assistance, please add your OpenAI API key. You can get one for free at https://platform.openai.com/account/api-keys." |
|
|
else: |
|
|
return "I'm running in demo mode with limited responses. For a full conversation experience, please add your OpenAI API key above." |
|
|
|
|
|
|
|
|
def create_new_chat(): |
|
|
new_id = str(uuid.uuid4()) |
|
|
st.session_state.current_conversation_id = new_id |
|
|
st.session_state.conversations[new_id] = { |
|
|
"title": f"New chat {datetime.now().strftime('%H:%M')}", |
|
|
"messages": [] |
|
|
} |
|
|
|
|
|
|
|
|
def update_conversation_title(conv_id, user_message): |
|
|
current_title = st.session_state.conversations[conv_id]["title"] |
|
|
if current_title.startswith("New chat"): |
|
|
|
|
|
new_title = user_message[:30] + "..." if len(user_message) > 30 else user_message |
|
|
st.session_state.conversations[conv_id]["title"] = new_title |
|
|
|
|
|
|
|
|
def delete_conversation(conv_id): |
|
|
if conv_id in st.session_state.conversations: |
|
|
del st.session_state.conversations[conv_id] |
|
|
|
|
|
if conv_id == st.session_state.current_conversation_id: |
|
|
if st.session_state.conversations: |
|
|
st.session_state.current_conversation_id = next(iter(st.session_state.conversations)) |
|
|
else: |
|
|
create_new_chat() |
|
|
|
|
|
|
|
|
sidebar, main_content = st.columns([1, 3]) |
|
|
|
|
|
|
|
|
with sidebar: |
|
|
st.sidebar.title("Conversations") |
|
|
|
|
|
|
|
|
if st.sidebar.button("+ New Chat", use_container_width=True): |
|
|
create_new_chat() |
|
|
st.rerun() |
|
|
|
|
|
st.sidebar.markdown("---") |
|
|
|
|
|
|
|
|
if not openai_api_key: |
|
|
st.sidebar.info("⚠️ No OpenAI API key found.", icon="ℹ️") |
|
|
entered_token = st.sidebar.text_input("Enter OpenAI API Key", type="password") |
|
|
if entered_token: |
|
|
openai_api_key = entered_token |
|
|
client = openai.OpenAI(api_key=openai_api_key) |
|
|
|
|
|
|
|
|
st.sidebar.subheader("Model Selection") |
|
|
model_options = list(AVAILABLE_MODELS.keys()) |
|
|
model_descriptions = [f"{model} - {AVAILABLE_MODELS[model]['description']}" for model in model_options] |
|
|
selected_model_index = model_options.index(st.session_state.selected_model) if st.session_state.selected_model in model_options else 0 |
|
|
|
|
|
selected_description = st.sidebar.selectbox( |
|
|
"Choose a model:", |
|
|
model_descriptions, |
|
|
index=selected_model_index |
|
|
) |
|
|
|
|
|
|
|
|
selected_model = model_options[model_descriptions.index(selected_description)] |
|
|
if selected_model != st.session_state.selected_model: |
|
|
st.session_state.selected_model = selected_model |
|
|
st.sidebar.info(f"Model set to {selected_model}") |
|
|
|
|
|
|
|
|
with st.sidebar.expander("Model Details"): |
|
|
model_info = AVAILABLE_MODELS[selected_model] |
|
|
st.write(f"**Description:** {model_info['description']}") |
|
|
st.write(f"**Max tokens:** {model_info['max_tokens']}") |
|
|
st.write(f"**Default temperature:** {model_info['temperature']}") |
|
|
st.write(""" |
|
|
**Note:** Some models may not be available with your current API key. |
|
|
If you encounter an error, try selecting a different model. |
|
|
""") |
|
|
|
|
|
st.sidebar.markdown("---") |
|
|
|
|
|
|
|
|
for conv_id, conv_data in st.session_state.conversations.items(): |
|
|
col1, col2 = st.sidebar.columns([4, 1]) |
|
|
is_active = conv_id == st.session_state.current_conversation_id |
|
|
|
|
|
with col1: |
|
|
if st.button( |
|
|
conv_data["title"], |
|
|
key=f"conv_{conv_id}", |
|
|
use_container_width=True, |
|
|
type="secondary" if is_active else "tertiary" |
|
|
): |
|
|
st.session_state.current_conversation_id = conv_id |
|
|
st.rerun() |
|
|
|
|
|
with col2: |
|
|
if st.button("🗑️", key=f"del_{conv_id}"): |
|
|
delete_conversation(conv_id) |
|
|
st.rerun() |
|
|
|
|
|
|
|
|
with main_content: |
|
|
st.write("") |
|
|
|
|
|
|
|
|
current_id = st.session_state.current_conversation_id |
|
|
current_conv = st.session_state.conversations.get(current_id, {"messages": []}) |
|
|
messages = current_conv["messages"] |
|
|
|
|
|
|
|
|
chat_container = st.container() |
|
|
|
|
|
|
|
|
with chat_container: |
|
|
for i, message in enumerate(messages): |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
|
|
|
prompt = st.chat_input("What's on your mind?") |
|
|
|
|
|
if prompt: |
|
|
|
|
|
messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
|
|
|
if len(messages) == 1: |
|
|
update_conversation_title(current_id, prompt) |
|
|
|
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(prompt) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
message_placeholder = st.empty() |
|
|
|
|
|
|
|
|
full_response = get_ai_response(prompt, messages[:-1]) |
|
|
|
|
|
|
|
|
displayed_response = "" |
|
|
for i in range(len(full_response)): |
|
|
displayed_response += full_response[i] |
|
|
message_placeholder.markdown(displayed_response + "▌") |
|
|
time.sleep(0.005) |
|
|
|
|
|
|
|
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
|
|
|
messages.append({"role": "assistant", "content": full_response}) |
|
|
|
|
|
|
|
|
st.rerun() |