Sathishsri's picture
Update app.py from anycoder
778d154 verified
"""
ChatGPT Clone - A Gradio 6 Chat Application
A modern, production-ready chat interface inspired by ChatGPT
"""
import gradio as gr
import json
from datetime import datetime
from typing import Iterator
# ============================================
# CONFIGURATION
# ============================================
# You can set your OpenAI API key here to enable real AI responses
# Or leave it empty to use simulated responses
OPENAI_API_KEY = ""
# ============================================
# SIMULATED AI RESPONSES (Default Mode)
# ============================================
SIMULATED_RESPONSES = [
"That's a great question! Let me explain...",
"I'd be happy to help you with that. Here's what I think...",
"Interesting! Here's my perspective on that topic...",
"Based on my knowledge, I can tell you that...",
"That's an important topic. Let me share some insights...",
]
# ============================================
# CORE FUNCTIONS
# ============================================
def simulate_response(message: str, history: list) -> Iterator[str]:
"""
Simulate streaming AI response (for demo purposes).
Replace this with actual OpenAI API calls when you have a key.
"""
import random
import time
response = random.choice(SIMULATED_RESPONSES)
full_response = f"{response} {message}? Let me think about that more deeply.\n\n"
full_response += "Here are some key points to consider:\n"
full_response += "• First, understand the core concept\n"
full_response += "• Break it down into smaller parts\n"
full_response += "• Apply logical reasoning\n"
full_response += "• Consider different perspectives\n\n"
full_response += "Is there anything specific you'd like me to elaborate on?"
# Stream the response token by token
words = full_response.split()
for i, word in enumerate(words):
yield " ".join(words[:i+1])
time.sleep(0.05) # Simulate token generation delay
def openai_response_generator(message: str, history: list, api_key: str):
"""
Real OpenAI API integration (when API key is provided).
Requires: pip install openai
"""
try:
from openai import OpenAI
client = OpenAI(api_key=api_key)
# Convert history to messages format
messages = [{"role": "user", "content": message}]
for msg in history[-6:]: # Use last 6 messages for context
if msg.get("role") in ["user", "assistant"]:
messages.append({"role": msg["role"], "content": msg["content"]})
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
stream=True,
temperature=0.7,
)
full_response = ""
for chunk in response:
if chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
full_response += content
yield full_response
except ImportError:
yield "⚠️ Please install the OpenAI library: pip install openai"
except Exception as e:
yield f"⚠️ API Error: {str(e)}"
def get_response(message: str, history: list, use_openai: bool, api_key: str) -> Iterator[str]:
"""
Unified response generator - chooses between simulated or OpenAI.
"""
if use_openai and api_key:
yield from openai_response_generator(message, history, api_key)
else:
yield from simulate_response(message, history)
def format_chat_history(history: list) -> list[dict]:
"""
Format chat history for display.
"""
formatted = []
for msg in history:
if msg.get("role") == "user":
formatted.append({"role": "user", "content": msg["content"]})
elif msg.get("role") == "assistant":
formatted.append({"role": "assistant", "content": msg["content"]})
return formatted
def clear_chat() -> tuple:
"""
Clear the chat history and return empty state.
"""
return [], "", gr.Chatbot(value=[], visible=True)
def download_chat(history: list) -> str:
"""
Export chat history to a downloadable format.
"""
if not history:
return ""
export_data = {
"export_date": datetime.now().isoformat(),
"total_messages": len(history),
"conversation": []
}
for msg in history:
export_data["conversation"].append({
"role": msg.get("role", "unknown"),
"content": msg.get("content", ""),
"timestamp": msg.get("timestamp", "")
})
return json.dumps(export_data, indent=2)
def retry_last_message(history: list) -> tuple:
"""
Prepare for retry by showing the last user message.
"""
if not history:
return [], "", gr.Chatbot(value=[], visible=True)
# Find last user message
for msg in reversed(history):
if msg.get("role") == "user":
return history[:-1], msg.get("content", ""), gr.Chatbot(
value=format_chat_history(history[:-1]),
visible=True
)
return history, "", gr.Chatbot(value=format_chat_history(history), visible=True)
# ============================================
# EXAMPLE PROMPTS
# ============================================
EXAMPLE_PROMPTS = [
"Explain how quantum computing works",
"Write a Python function to sort a list",
"What are the best productivity tips?",
"Help me write a business proposal",
"Tell me a fun fact about science",
]
# ============================================
# GRADIO 6 APPLICATION
# ============================================
with gr.Blocks() as demo:
# ===== HEADER =====
with gr.Row(elem_classes="header"):
gr.Markdown("""
# 🤖 ChatGPT Clone
*Built with [anycoder](https://huggingface.co/spaces/akhaliq/anycoder)*
""")
# ===== SETTINGS PANEL (Collapsible) =====
with gr.Accordion("⚙️ Settings", open=False):
with gr.Row():
use_openai = gr.Checkbox(
label="Use Real OpenAI API",
value=False,
info="Requires OpenAI API key"
)
api_key_input = gr.Textbox(
label="OpenAI API Key",
type="password",
placeholder="sk-...",
visible=False
)
def toggle_api_key(show):
return gr.Textbox(visible=show)
use_openai.change(
toggle_api_key,
inputs=use_openai,
outputs=api_key_input
)
gr.Markdown("""
### 🔑 Getting an OpenAI API Key
1. Go to [platform.openai.com](https://platform.openai.com)
2. Sign up or log in
3. Navigate to API Keys section
4. Create a new secret key
5. Paste it above
""")
# ===== CHAT INTERFACE =====
with gr.Row():
# Sidebar with examples
with gr.Column(scale=1, min_width=250):
gr.Markdown("### 💡 Try These")
for prompt in EXAMPLE_PROMPTS:
gr.Button(
prompt,
variant="secondary",
size="sm",
elem_classes="example-btn"
)
gr.Markdown("---")
gr.Markdown("### 🛠️ Actions")
with gr.Row():
new_chat_btn = gr.Button("🗑️ New Chat", variant="stop", size="sm")
retry_btn = gr.Button("🔄 Retry", variant="secondary", size="sm")
download_btn = gr.Button("💾 Download Chat", variant="secondary", size="sm")
gr.Markdown("---")
gr.Markdown("### 📊 Stats")
message_count = gr.Number(value=0, label="Messages", interactive=False)
# Main chat area
with gr.Column(scale=4):
chatbot = gr.Chatbot(
label="Conversation",
height=500,
avatar_images=(
"https://huggingface.co/datasets/huggingface/avatars/resolve/main/male.png",
"https://huggingface.co/datasets/huggingface/avatars/resolve/main/huggingface.png"
),
latex_delimiters=[
{"left": "$", "right": "$", "display": True},
{"left": "$$", "right": "$$", "display": True}
],
show_copy_all_button=True,
render_markdown=True,
bubble_full_width=False,
line_breaks=True,
)
# Input area
with gr.Row():
msg_input = gr.Textbox(
placeholder="Type your message here...",
label=None,
lines=3,
scale=5,
submit_btn=True,
max_lines=10,
)
# Action buttons
with gr.Row():
send_btn = gr.Button("Send Message", variant="primary", size="lg", scale=2)
clear_btn = gr.Button("Clear", variant="secondary", size="lg", scale=1)
# ===== FOOTER =====
gr.Markdown("""
---
### 💡 Tips
- Press **Enter** to send a message
- Use **Shift+Enter** for a new line
- Click **New Chat** to start fresh
- **Retry** to regenerate the last response
---
*Built with ❤️ using Gradio 6*
""")
# ===== EVENT HANDLERS =====
def respond(
message: str,
history: list,
use_openai: bool,
api_key: str
) -> tuple:
"""
Handle user message and generate response.
"""
if not message.strip():
return "", history, message_count.update(len(history))
# Add user message to history
timestamp = datetime.now().isoformat()
history.append({
"role": "user",
"content": message,
"timestamp": timestamp
})
# Update chatbot display
chatbot_history = format_chat_history(history)
# Generate response
response_parts = []
for part in get_response(message, history, use_openai, api_key):
response_parts.append(part)
# Update assistant message in history
temp_history = history + [{"role": "assistant", "content": part}]
yield "", temp_history, len(temp_history)
# Final response
final_response = response_parts[-1] if response_parts else ""
history.append({
"role": "assistant",
"content": final_response,
"timestamp": timestamp
})
yield "", history, len(history)
# Submit message events
submit_triggers = [
msg_input.submit,
send_btn.click
]
for trigger in submit_triggers:
trigger(
fn=respond,
inputs=[msg_input, gr.State([]), use_openai, api_key_input],
outputs=[msg_input, gr.State([]), message_count],
show_progress="minimal"
)
# Clear chat
clear_btn.click(
fn=clear_chat,
outputs=[gr.State([]), msg_input, chatbot]
)
new_chat_btn.click(
fn=clear_chat,
outputs=[gr.State([]), msg_input, chatbot]
)
# Retry
retry_btn.click(
fn=retry_last_message,
inputs=gr.State([]),
outputs=[gr.State([]), msg_input, chatbot]
)
# Download chat
download_btn.click(
fn=download_chat,
inputs=gr.State([]),
outputs=gr.DownloadButton(value=None, label="📥 Download Chat History")
)
# ============================================
# LAUNCH APPLICATION
# ============================================
if __name__ == "__main__":
demo.launch(
title="ChatGPT Clone - Gradio 6",
theme=gr.themes.Soft(
primary_hue="indigo",
secondary_hue="purple",
neutral_hue="slate",
font=gr.themes.GoogleFont("Inter"),
text_size="lg",
spacing_size="lg",
radius_size="md"
).set(
button_primary_background_fill="*primary_600",
button_primary_background_fill_hover="*primary_700",
button_secondary_background_fill="*neutral_200",
button_secondary_background_fill_hover="*neutral_300",
block_title_text_weight="600",
block_background_fill="*neutral_100",
chatbot_prompt_background_fill="*primary_100",
chatbot_reply_background_fill="*neutral_200",
),
css="""
/* Custom CSS for modern chat appearance */
.header {
text-align: center;
padding: 1rem;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
border-radius: 0.5rem;
margin-bottom: 1rem;
}
.header h1 {
color: white !important;
margin: 0 !important;
font-size: 1.8rem !important;
}
.header a {
color: rgba(255, 255, 255, 0.9) !important;
text-decoration: none;
}
.header a:hover {
color: white !important;
text-decoration: underline;
}
.example-btn {
margin: 0.25rem 0;
text-align: left !important;
justify-content: flex-start !important;
}
/* Chat bubble styling */
.gradio-container .chatbot {
border-radius: 1rem;
overflow: hidden;
}
/* Smooth animations */
* {
transition: all 0.2s ease-in-out !important;
}
/* Hide scrollbar in chat */
.gradio-container .chatbot .wrap {
scrollbar-width: thin;
scrollbar-color: #667eea transparent;
}
/* Mobile responsive */
@media (max-width: 768px) {
.header h1 {
font-size: 1.4rem !important;
}
}
""",
footer_links=[
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
{"label": "Gradio", "url": "https://gradio.app"},
{"label": "Hugging Face", "url": "https://huggingface.co"}
],
show_api=False,
quiet=True
)