kamcio1989's picture
Update app.py from anycoder
24159fe verified
import gradio as gr
import google.generativeai as genai
import os
import time
from typing import List, Tuple, Dict, Any
import json
# Configure Google Gemini
def configure_gemini(api_key: str):
"""Configure Gemini API with user's API key"""
try:
genai.configure(api_key=api_key)
model = genai.GenerativeModel('gemini-pro')
return model, None
except Exception as e:
return None, str(e)
def gemini_chat_response(message: str, history: List[Dict[str, str]], model, temperature: float = 0.7, max_tokens: int = 1024) -> str:
"""Generate response from Gemini"""
if not model:
return "Error: Please enter a valid Google Gemini API key first."
try:
# Convert history to Gemini format
conversation_history = []
for item in history:
if item.get("role") == "user":
conversation_history.append({"role": "user", "parts": [item.get("content", "")]})
elif item.get("role") == "assistant":
conversation_history.append({"role": "model", "parts": [item.get("content", "")]})
# Start chat with history
chat = model.start_chat(history=conversation_history)
# Generate response with parameters
generation_config = genai.types.GenerationConfig(
temperature=temperature,
max_output_tokens=max_tokens,
)
response = chat.send_message(message, generation_config=generation_config)
return response.text
except Exception as e:
return f"Error: {str(e)}"
def stream_gemini_response(message: str, history: List[Dict[str, str]], model, temperature: float = 0.7, max_tokens: int = 1024):
"""Stream response from Gemini"""
if not model:
yield "Error: Please enter a valid Google Gemini API key first."
return
try:
# Convert history to Gemini format
conversation_history = []
for item in history:
if item.get("role") == "user":
conversation_history.append({"role": "user", "parts": [item.get("content", "")]})
elif item.get("role") == "assistant":
conversation_history.append({"role": "model", "parts": [item.get("content", "")]})
# Start chat with history
chat = model.start_chat(history=conversation_history)
# Generation config
generation_config = genai.types.GenerationConfig(
temperature=temperature,
max_output_tokens=max_tokens,
)
# Stream response
response = chat.send_message(message, generation_config=generation_config, stream=True)
full_response = ""
for chunk in response:
if chunk.text:
full_response += chunk.text
yield full_response
time.sleep(0.02) # Small delay for streaming effect
except Exception as e:
yield f"Error: {str(e)}"
# Custom CSS for enhanced UI
custom_css = """
/* Main container styling */
.gradio-container {
max-width: 900px !important;
margin: auto !important;
}
/* Chatbot styling */
.chatbot-container {
height: 600px !important;
}
/* Header styling */
.main-header {
text-align: center;
padding: 1.5rem;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border-radius: 10px;
margin-bottom: 1.5rem;
}
/* API key section */
.api-section {
background: #f8f9fa;
padding: 1rem;
border-radius: 8px;
border: 1px solid #e9ecef;
margin-bottom: 1rem;
}
/* Settings panel */
.settings-panel {
background: #f8f9fa;
padding: 1rem;
border-radius: 8px;
border: 1px solid #e9ecef;
}
/* Button styling */
.primary-button {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
border: none !important;
}
/* Message styling */
.user-message {
background: #e3f2fd !important;
}
.assistant-message {
background: #f3e5f5 !important;
}
/* Status indicator */
.status-indicator {
display: inline-block;
width: 10px;
height: 10px;
border-radius: 50%;
margin-right: 8px;
}
.status-connected {
background-color: #4caf50;
}
.status-disconnected {
background-color: #f44336;
}
"""
# Initialize session state
session_state = {"model": None, "configured": False}
with gr.Blocks(theme=gr.themes.Soft(
primary_hue="purple",
secondary_hue="blue",
neutral_hue="slate",
text_size="lg",
spacing_size="lg",
radius_size="md"
).set(
button_primary_background_fill="linear-gradient(135deg, #667eea 0%, #764ba2 100%)",
button_primary_background_fill_hover="linear-gradient(135deg, #5a6fd8 0%, #6a4190 100%)",
block_title_text_weight="600",
block_background_fill="#fafafa",
), css=custom_css) as demo:
# Header
gr.HTML("""
<div class="main-header">
<h1>🤖 Google Gemini Chat Interface</h1>
<p>Powered by Google's advanced AI model</p>
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: white; text-decoration: underline;">
Built with anycoder
</a>
</div>
""")
# API Key Configuration
with gr.Row():
with gr.Column(scale=3):
api_key_input = gr.Textbox(
label="Google Gemini API Key",
type="password",
placeholder="Enter your API key from Google AI Studio",
info="Get your API key from: https://makersuite.google.com/app/apikey"
)
with gr.Column(scale=1):
configure_btn = gr.Button("Configure", variant="primary", size="lg")
# Status indicator
status_display = gr.HTML('<span class="status-indicator status-disconnected"></span>Not Configured')
# Chat Interface
with gr.Row():
with gr.Column(scale=4):
chatbot = gr.Chatbot(
label="Chat with Gemini",
height=600,
show_copy_button=True,
bubble_full_width=False,
avatar_images=(
None, # User avatar (default)
"https://www.google.com/gemini/static/images/gemini/favicon-32x32.png" # Gemini logo
),
placeholder="Hello! I'm Gemini. How can I help you today?"
)
# Input area
with gr.Row():
msg_input = gr.MultimodalTextbox(
label="Your Message",
placeholder="Type your message here...",
file_types=["image"],
file_count="single",
show_label=False
)
with gr.Row():
submit_btn = gr.Button("Send", variant="primary", scale=2)
clear_btn = gr.Button("Clear Chat", variant="secondary", scale=1)
with gr.Column(scale=1):
# Settings Panel
with gr.Group(elem_classes=["settings-panel"]):
gr.Markdown("### ⚙️ Chat Settings")
temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1,
label="Temperature",
info="Controls randomness (0 = focused, 2 = creative)"
)
max_tokens = gr.Slider(
minimum=100,
maximum=2048,
value=1024,
step=50,
label="Max Tokens",
info="Maximum response length"
)
streaming = gr.Checkbox(
label="Enable Streaming",
value=True,
info="Show response as it's being generated"
)
model_info = gr.HTML("""
<div style="margin-top: 10px; padding: 10px; background: #e8f5e8; border-radius: 5px;">
<small><strong>Model:</strong> gemini-pro</small><br>
<small><strong>Status:</strong> Ready to chat</small>
</div>
""")
# Example prompts
with gr.Accordion("💡 Example Prompts", open=False):
gr.Examples(
examples=[
["Explain quantum computing in simple terms"],
["Write a short poem about artificial intelligence"],
["What are the latest trends in machine learning?"],
["Help me plan a healthy meal plan for one week"],
["Explain the theory of relativity with analogies"]
],
inputs=[msg_input],
label="Click to try these examples"
)
# Configuration handler
def handle_configuration(api_key):
if not api_key or len(api_key) < 10:
return (
'<span class="status-indicator status-disconnected"></span>Invalid API Key',
gr.update(visible=False)
)
model, error = configure_gemini(api_key)
if error:
return (
f'<span class="status-indicator status-disconnected"></span>Error: {error}',
gr.update(visible=False)
)
session_state["model"] = model
session_state["configured"] = True
return (
'<span class="status-indicator status-connected"></span>Connected',
gr.update(visible=True)
)
# Chat response handler
def respond(message, history, temperature, max_tokens, stream_enabled):
if not session_state.get("configured"):
return "Please configure your API key first.", history
if stream_enabled:
# Handle streaming response
history.append({"role": "user", "content": message})
yield "", history
full_response = ""
for chunk in stream_gemini_response(message, [h for h in history if h != history[-1]], session_state["model"], temperature, max_tokens):
full_response = chunk
yield "", history
history.append({"role": "assistant", "content": full_response})
yield "", history
else:
# Handle non-streaming response
response = gemini_chat_response(message, history, session_state["model"], temperature, max_tokens)
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": response})
return "", history
# Event handlers
configure_btn.click(
handle_configuration,
inputs=[api_key_input],
outputs=[status_display, chatbot]
)
# Chat submission
submit_event = msg_input.submit(
respond,
inputs=[msg_input, chatbot, temperature, max_tokens, streaming],
outputs=[msg_input, chatbot]
)
submit_btn.click(
respond,
inputs=[msg_input, chatbot, temperature, max_tokens, streaming],
outputs=[msg_input, chatbot]
)
clear_btn.click(
lambda: [],
outputs=[chatbot]
)
# Instructions footer
gr.HTML("""
<div style="margin-top: 2rem; padding: 1rem; background: #f0f4f8; border-radius: 8px; text-align: center;">
<h3>📖 How to Use</h3>
<ol style="text-align: left; max-width: 600px; margin: 0 auto;">
<li>Get your Gemini API key from <a href="https://makersuite.google.com/app/apikey" target="_blank">Google AI Studio</a></li>
<li>Enter your API key and click "Configure"</li>
<li>Start chatting with Gemini!</li>
<li>Adjust temperature and max tokens for different response styles</li>
</ol>
</div>
""")
demo.launch(
footer_links=[
{"label": "Google AI Studio", "url": "https://makersuite.google.com/app/apikey"},
{"label": "Gemini API Docs", "url": "https://ai.google.dev/docs"},
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
]
)