Spaces:
Sleeping
Sleeping
| import os | |
| import time | |
| from typing import List, Optional, Dict, Any | |
| import google.genai as genai | |
| from google.genai import types | |
| import gradio as gr | |
| from PIL import Image | |
| print("google-genai version loaded") | |
| # ===================== CONFIG ===================== | |
| # For Hugging Face Spaces, use Secrets | |
| GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "") | |
| TITLE = """<h1 align="center" style="background: linear-gradient(90deg, #9D50BB 0%, #6E48AA 50%, #4A90E2 100%); | |
| -webkit-background-clip: text; -webkit-text-fill-color: transparent; font-size: 3em; font-weight: bold; margin-bottom: 10px;"> | |
| Gemini Chatbot π₯ with Kelwa</h1>""" | |
| SUBTITLE = """<h2 align="center" style="color: #B19CD9; font-size: 1.5em; margin-top: 0;"> | |
| π¨ Create with Multimodal Gemini</h2>""" | |
| DESCRIPTION = """ | |
| <p align="center" style="color: #6E48AA; font-size: 1.1em;"> | |
| Enter your Google API Key to start chatting with Gemini AI! Get your API key from | |
| <a href="https://aistudio.google.com/app/apikey" target="_blank" style="color: #9D50BB; font-weight: bold;">Google AI Studio</a> | |
| </p> | |
| """ | |
| IMAGE_WIDTH = 512 | |
| # Custom CSS for purple and baby blue theme | |
| CUSTOM_CSS = """ | |
| .gradio-container { | |
| max-width: 1400px !important; | |
| margin: auto !important; | |
| background: linear-gradient(to bottom, #f0e6ff 0%, #e6f3ff 100%) !important; | |
| } | |
| .contain { | |
| max-width: 100% !important; | |
| } | |
| button { | |
| background: linear-gradient(90deg, #9D50BB 0%, #6E48AA 100%) !important; | |
| border: none !important; | |
| color: white !important; | |
| font-weight: 600 !important; | |
| border-radius: 8px !important; | |
| padding: 10px 20px !important; | |
| transition: all 0.3s ease !important; | |
| } | |
| button:hover { | |
| background: linear-gradient(90deg, #B19CD9 0%, #89CFF0 100%) !important; | |
| transform: scale(1.05) !important; | |
| } | |
| .input-text, textarea, input { | |
| border: 2px solid #B19CD9 !important; | |
| border-radius: 10px !important; | |
| padding: 10px !important; | |
| } | |
| .input-text:focus, textarea:focus, input:focus { | |
| border-color: #6E48AA !important; | |
| box-shadow: 0 0 10px rgba(157, 80, 187, 0.3) !important; | |
| outline: none !important; | |
| } | |
| label { | |
| color: #6E48AA !important; | |
| font-weight: 600 !important; | |
| margin-bottom: 8px !important; | |
| } | |
| /* Chatbot message styling */ | |
| .message-wrap { | |
| padding: 10px !important; | |
| } | |
| .user { | |
| background: linear-gradient(135deg, #9D50BB 0%, #6E48AA 100%) !important; | |
| color: white !important; | |
| border-radius: 15px !important; | |
| padding: 12px 16px !important; | |
| } | |
| .bot { | |
| background: linear-gradient(135deg, #E6F3FF 0%, #D4E4F7 100%) !important; | |
| color: #333 !important; | |
| border-radius: 15px !important; | |
| padding: 12px 16px !important; | |
| border-left: 4px solid #89CFF0 !important; | |
| } | |
| /* Accordion styling */ | |
| .accordion { | |
| background: rgba(177, 156, 217, 0.1) !important; | |
| border: 2px solid #B19CD9 !important; | |
| border-radius: 10px !important; | |
| margin: 10px 0 !important; | |
| } | |
| /* Image container */ | |
| .image-container { | |
| border: 3px solid #89CFF0 !important; | |
| border-radius: 15px !important; | |
| padding: 10px !important; | |
| background: white !important; | |
| } | |
| /* Slider styling */ | |
| input[type="range"] { | |
| accent-color: #9D50BB !important; | |
| } | |
| /* Better spacing */ | |
| .gap { | |
| gap: 20px !important; | |
| } | |
| .block { | |
| padding: 15px !important; | |
| border-radius: 10px !important; | |
| } | |
| footer { | |
| display: none !important; | |
| } | |
| """ | |
| # ===================== HELPERS ===================== | |
| def preprocess_stop_sequences(stop_sequences: str) -> Optional[List[str]]: | |
| """Safely preprocess stop sequences.""" | |
| try: | |
| if not stop_sequences or stop_sequences.strip() == "": | |
| return None | |
| sequences = [s.strip() for s in stop_sequences.split(",") if s.strip()] | |
| return sequences if sequences else None | |
| except Exception as e: | |
| print(f"Error processing stop sequences: {e}") | |
| return None | |
| def preprocess_image(image: Image.Image) -> Optional[Image.Image]: | |
| """Safely resize image.""" | |
| try: | |
| if image is None: | |
| return None | |
| width, height = image.size | |
| if width == 0 or height == 0: | |
| return None | |
| new_height = int(height * IMAGE_WIDTH / width) | |
| return image.resize((IMAGE_WIDTH, new_height), Image.LANCZOS) | |
| except Exception as e: | |
| print(f"Error preprocessing image: {e}") | |
| return None | |
| # ===================== CHAT FUNCTIONS ===================== | |
| def user(message: str, history: Optional[List[Dict]]) -> tuple: | |
| """Add user message to history using messages format.""" | |
| try: | |
| if history is None: | |
| history = [] | |
| if not message or message.strip() == "": | |
| return "", history | |
| # Messages format: list of dicts with 'role' and 'content' | |
| history.append({"role": "user", "content": message.strip()}) | |
| return "", history | |
| except Exception as e: | |
| print(f"Error in user function: {e}") | |
| return "", history or [] | |
| def bot( | |
| google_key: str, | |
| image_prompt: Optional[Image.Image], | |
| temperature: float, | |
| max_output_tokens: int, | |
| stop_sequences: str, | |
| top_k: int, | |
| top_p: float, | |
| history: Optional[List[Dict]] | |
| ): | |
| """Generate bot response with comprehensive error handling.""" | |
| try: | |
| # Validate history | |
| if not history or len(history) == 0: | |
| yield history or [] | |
| return | |
| # Validate API key | |
| api_key = google_key.strip() if google_key else GOOGLE_API_KEY | |
| if not api_key: | |
| history.append({ | |
| "role": "assistant", | |
| "content": "β **Error**: API key is missing. Please provide a valid Google API key in the textbox below.\n\nGet your free API key from [Google AI Studio](https://aistudio.google.com/app/apikey)" | |
| }) | |
| yield history | |
| return | |
| # Configure API with new package | |
| try: | |
| client = genai.Client(api_key=api_key) | |
| except Exception as e: | |
| history.append({ | |
| "role": "assistant", | |
| "content": f"β **Error**: Invalid API key.\n\n{str(e)}\n\nPlease check your API key and try again." | |
| }) | |
| yield history | |
| return | |
| # Get user message | |
| user_message = history[-1]["content"] | |
| # Validate parameters | |
| temperature = max(0.0, min(2.0, float(temperature))) | |
| max_output_tokens = max(1, min(8192, int(max_output_tokens))) | |
| top_k = max(1, min(100, int(top_k))) if top_k > 0 else None | |
| top_p = max(0.0, min(1.0, float(top_p))) | |
| # Build config | |
| config = types.GenerateContentConfig( | |
| temperature=temperature, | |
| max_output_tokens=max_output_tokens, | |
| top_k=top_k, | |
| top_p=top_p, | |
| ) | |
| if preprocess_stop_sequences(stop_sequences): | |
| config.stop_sequences = preprocess_stop_sequences(stop_sequences) | |
| # Prepare content | |
| if image_prompt is None: | |
| contents = [user_message] | |
| else: | |
| processed_image = preprocess_image(image_prompt) | |
| if processed_image is None: | |
| history.append({ | |
| "role": "assistant", | |
| "content": "β **Error**: Could not process the image. Please try uploading another image." | |
| }) | |
| yield history | |
| return | |
| contents = [user_message, processed_image] | |
| # Generate response | |
| try: | |
| response = client.models.generate_content_stream( | |
| model='gemini-2.0-flash-exp', | |
| contents=contents, | |
| config=config | |
| ) | |
| history.append({"role": "assistant", "content": ""}) | |
| for chunk in response: | |
| if chunk.text: | |
| history[-1]["content"] += chunk.text | |
| yield history | |
| time.sleep(0.01) | |
| # Handle empty response | |
| if not history[-1]["content"]: | |
| history[-1]["content"] = "β οΈ No response generated. The content may have been blocked by safety filters. Please try a different prompt." | |
| yield history | |
| except Exception as e: | |
| error_message = str(e) | |
| if "API_KEY_INVALID" in error_message or "invalid" in error_message.lower(): | |
| error_msg = "β **Invalid API Key**. Please check your Google API key and try again." | |
| elif "quota" in error_message.lower(): | |
| error_msg = "β **Quota Exceeded**. You've reached the API usage limit. Please try again later or check your quota." | |
| else: | |
| error_msg = f"β **Error generating response**: {error_message}" | |
| history.append({ | |
| "role": "assistant", | |
| "content": error_msg | |
| }) | |
| yield history | |
| return | |
| except Exception as e: | |
| print(f"Critical error in bot function: {e}") | |
| if history: | |
| history.append({ | |
| "role": "assistant", | |
| "content": f"β **Critical error**: {str(e)}" | |
| }) | |
| yield history or [] | |
| def clear_chat(): | |
| """Clear chat history.""" | |
| return [], None | |
| # ===================== UI ===================== | |
| with gr.Blocks(title="Gemini Chatbot with Kelwa") as demo: | |
| # Header | |
| gr.HTML(TITLE) | |
| gr.HTML(SUBTITLE) | |
| gr.HTML(DESCRIPTION) | |
| # Main chat interface | |
| with gr.Row(equal_height=False): | |
| with gr.Column(scale=1, min_width=300): | |
| image_prompt = gr.Image( | |
| type="pil", | |
| label="πΌοΈ Upload Image (Optional)", | |
| height=350, | |
| elem_classes="image-container" | |
| ) | |
| api_key = gr.Textbox( | |
| label="π Google API Key", | |
| type="password", | |
| placeholder="Enter your Google API key here...", | |
| info="Your API key is not stored. Get one from Google AI Studio." | |
| ) | |
| clear_btn = gr.Button("ποΈ Clear Chat", variant="secondary", size="sm") | |
| with gr.Column(scale=2): | |
| chatbot = gr.Chatbot( | |
| label="π¬ Gemini Chat", | |
| height=550 | |
| ) | |
| # Message input | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| placeholder="β¨ Type your message here and press Enter...", | |
| label="Your Message", | |
| show_label=False, | |
| scale=9, | |
| lines=2, | |
| max_lines=5 | |
| ) | |
| submit_btn = gr.Button("Send π€", scale=1, variant="primary") | |
| # Parameters accordion | |
| with gr.Accordion("βοΈ Generation Parameters", open=False): | |
| with gr.Row(): | |
| temperature = gr.Slider( | |
| minimum=0, | |
| maximum=2, | |
| value=1.0, | |
| step=0.1, | |
| label="π‘οΈ Temperature", | |
| info="Higher = more creative, Lower = more focused" | |
| ) | |
| max_tokens = gr.Slider( | |
| minimum=100, | |
| maximum=8192, | |
| value=2048, | |
| step=100, | |
| label="π Max Output Tokens", | |
| info="Maximum length of response" | |
| ) | |
| stop_seq = gr.Textbox( | |
| label="π Stop Sequences (comma-separated)", | |
| placeholder="STOP, END, ###", | |
| info="Optional sequences to stop generation", | |
| value="" | |
| ) | |
| with gr.Accordion("π¬ Advanced Parameters", open=False): | |
| with gr.Row(): | |
| top_k = gr.Slider( | |
| minimum=0, | |
| maximum=100, | |
| value=40, | |
| step=1, | |
| label="π Top-K", | |
| info="Limits token selection pool (0 = disabled)" | |
| ) | |
| top_p = gr.Slider( | |
| minimum=0, | |
| maximum=1, | |
| value=0.95, | |
| step=0.01, | |
| label="π Top-P (Nucleus)", | |
| info="Cumulative probability threshold" | |
| ) | |
| # Examples | |
| gr.Examples( | |
| examples=[ | |
| ["Explain quantum computing in simple terms"], | |
| ["Write a Python function to sort a list"], | |
| ["What are the health benefits of meditation?"], | |
| ["Create a short story about a robot learning to paint"], | |
| ], | |
| inputs=msg, | |
| label="π‘ Example Prompts" | |
| ) | |
| # Event handlers | |
| msg.submit( | |
| user, | |
| inputs=[msg, chatbot], | |
| outputs=[msg, chatbot], | |
| queue=False | |
| ).then( | |
| bot, | |
| inputs=[ | |
| api_key, | |
| image_prompt, | |
| temperature, | |
| max_tokens, | |
| stop_seq, | |
| top_k, | |
| top_p, | |
| chatbot | |
| ], | |
| outputs=chatbot | |
| ) | |
| submit_btn.click( | |
| user, | |
| inputs=[msg, chatbot], | |
| outputs=[msg, chatbot], | |
| queue=False | |
| ).then( | |
| bot, | |
| inputs=[ | |
| api_key, | |
| image_prompt, | |
| temperature, | |
| max_tokens, | |
| stop_seq, | |
| top_k, | |
| top_p, | |
| chatbot | |
| ], | |
| outputs=chatbot | |
| ) | |
| clear_btn.click( | |
| clear_chat, | |
| outputs=[chatbot, image_prompt] | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.queue(max_size=20) | |
| demo.launch( | |
| theme=gr.themes.Soft( | |
| primary_hue="purple", | |
| secondary_hue="blue", | |
| spacing_size="md", | |
| radius_size="lg" | |
| ), | |
| css=CUSTOM_CSS, | |
| ssr_mode=False | |
| ) |