Spaces:
Sleeping
Sleeping
| import os | |
| import json | |
| from typing import Optional | |
| from datetime import datetime | |
| import google.generativeai as genai | |
| import gradio as gr | |
| import PIL.Image | |
| import tempfile | |
| import requests | |
| import base64 | |
| import io | |
| GEMINI_API_KEY = "AIzaSyBSHcHdbVcvo5IvKDAq2fY1xsWDDb2-JUI" | |
| MODEL_NAME = "models/gemini-2.5-flash" | |
| # Configure the API | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| class GeminiChatBot: | |
| """Main chatbot class with context management and multiple modes""" | |
| def __init__(self, model_name: str = MODEL_NAME): | |
| self.model = genai.GenerativeModel(model_name) | |
| self.conversation_history = [] | |
| self.chat_session = None | |
| self.system_prompt = "" | |
| def set_system_prompt(self, mode: str): | |
| """Set system prompt based on chatbot mode""" | |
| prompts = { | |
| "general": """You are a helpful, accurate, and friendly AI assistant. | |
| Provide clear, concise, and informative responses. | |
| Always be honest about limitations and uncertainty.""", | |
| "technical": """You are an expert technical support assistant. | |
| Provide detailed technical solutions, code examples, and best practices. | |
| When unsure, ask clarifying questions. Always suggest verification steps.""", | |
| "creative": """You are a creative writing assistant with strong storytelling abilities. | |
| Help users with creative writing, brainstorming, and narrative development. | |
| Provide engaging and imaginative content.""", | |
| "educational": """You are an educational tutor. Explain concepts clearly, | |
| break down complex topics, and provide examples. | |
| Encourage learning and ask clarifying questions.""", | |
| "medical": """You are a medical information assistant. | |
| Provide accurate health information and general guidance. | |
| Always recommend consulting healthcare professionals for serious concerns. | |
| Do NOT provide emergency medical advice.""" | |
| } | |
| self.system_prompt = prompts.get(mode, prompts["general"]) | |
| def chat(self, user_message: str, image: Optional[PIL.Image.Image] = None, mode: str = "general", temperature: float = 0.7) -> str: | |
| """Generate response using Gemini with context via Requests""" | |
| try: | |
| self.set_system_prompt(mode) | |
| # Prepare Image Data if exists | |
| image_data = None | |
| if image: | |
| # Convert PIL Image to base64 | |
| buffered = io.BytesIO() | |
| image.save(buffered, format="JPEG") | |
| image_data = base64.b64encode(buffered.getvalue()).decode("utf-8") | |
| # Override prompt for "Universal Image Analyzer" style if image is present | |
| # User requested "Universal Image Analyzer, 3 lines text as output" | |
| # We append this instruction to the user message | |
| user_message = f"{user_message}\n\nAct as a Universal Image Analyzer. Analyze this image and provide the output in exactly 3 lines of text." | |
| # Build Payload contents | |
| contents = [] | |
| # 1. Add History (optional, if we want to keep context) | |
| # Adapting history to the API format is good practice. | |
| # Simplified for now to just last few turns + current message | |
| for i in range(0, len(self.conversation_history), 2): | |
| if i+1 < len(self.conversation_history): | |
| user_text = self.conversation_history[i].replace("User: ", "") | |
| bot_text = self.conversation_history[i+1].replace("Bot: ", "") | |
| contents.append({"role": "user", "parts": [{"text": user_text}]}) | |
| contents.append({"role": "model", "parts": [{"text": bot_text}]}) | |
| # 2. Add Current Message | |
| current_parts = [] | |
| if image_data: | |
| current_parts.append({"inline_data": {"mime_type": "image/jpeg", "data": image_data}}) | |
| # Add system prompt as text part or separate instruction | |
| final_user_content = f"[SYSTEM: {self.system_prompt}]\n\n{user_message}" | |
| current_parts.append({"text": final_user_content}) | |
| contents.append({ | |
| "role": "user", | |
| "parts": current_parts | |
| }) | |
| # API URL | |
| url = ( | |
| "https://generativelanguage.googleapis.com/v1beta/models/" | |
| "gemini-2.5-flash:generateContent?key=" + GEMINI_API_KEY | |
| ) | |
| payload = { | |
| "contents": contents, | |
| "generationConfig": { | |
| "temperature": temperature, | |
| "maxOutputTokens": 4000 | |
| } | |
| } | |
| headers = {"Content-Type": "application/json"} | |
| # API CALL | |
| response = requests.post(url, json=payload, headers=headers, timeout=30) | |
| response.raise_for_status() | |
| response_json = response.json() | |
| # Extract Text | |
| if "candidates" not in response_json or not response_json["candidates"]: | |
| return "Error: No candidates in API response" | |
| candidate = response_json["candidates"][0] | |
| if ( | |
| "content" in candidate | |
| and "parts" in candidate["content"] | |
| and len(candidate["content"]["parts"]) > 0 | |
| ): | |
| bot_response = candidate["content"]["parts"][0].get("text", "") | |
| else: | |
| return "Error: Unexpected API response format" | |
| # Store in history | |
| self.conversation_history.append(f"User: {user_message[:100]}...") | |
| self.conversation_history.append(f"Bot: {bot_response[:100]}...") | |
| return bot_response | |
| except Exception as e: | |
| return f"Error: {str(e)}\n\nMake sure your API key is valid." | |
| # Initialize chatbot | |
| chatbot = GeminiChatBot() | |
| # Gradio Interface Functions | |
| def respond(message: str, image: Optional[PIL.Image.Image], chat_history: list, mode: str, temperature: float): | |
| """Respond to user message and return updated chat history""" | |
| response = chatbot.chat(message, image=image, mode=mode, temperature=temperature) | |
| content = "" | |
| if image: | |
| # Save image to temp file for display | |
| with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as f: | |
| image.save(f.name) | |
| img_path = f.name.replace("\\", "/") | |
| content += f"\n" | |
| if message: | |
| content += message | |
| if content: | |
| chat_history.append({"role": "user", "content": content}) | |
| chat_history.append({"role": "assistant", "content": response}) | |
| return "", None, chat_history | |
| def clear_history(): | |
| """Clear conversation history""" | |
| chatbot.conversation_history = [] | |
| return [], "" | |
| def export_chat(chat_history: list) -> str: | |
| """Export chat as JSON""" | |
| if not chat_history: | |
| return "No chat history to export" | |
| export_data = { | |
| "timestamp": datetime.now().isoformat(), | |
| "conversation": chat_history | |
| } | |
| return json.dumps(export_data, indent=2) | |
| # Create Gradio Interface | |
| with gr.Blocks(title="Gemini ChatBot", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # 🤖 Nexus Intelligent ChatBot | |
| A generalized, accurate chatbot powered by Google's Gemini AI. | |
| Select your mode and start chatting! | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| chatbot_ui = gr.Chatbot( | |
| label="Chat History", | |
| height=500, | |
| show_label=True | |
| ) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### ⚙️ Settings") | |
| mode = gr.Radio( | |
| choices=["general", "technical", "creative", "educational", "medical"], | |
| value="general", | |
| label="Chat Mode", | |
| info="Select conversation style" | |
| ) | |
| temperature = gr.Slider( | |
| minimum=0, | |
| maximum=2, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature", | |
| info="Higher = more creative, Lower = more focused" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| msg_input = gr.Textbox( | |
| placeholder="Type your message here...", | |
| label="Your Message", | |
| lines=2 | |
| ) | |
| with gr.Column(scale=1, min_width=200): | |
| img_input = gr.Image( | |
| type="pil", | |
| label="Upload Image", | |
| sources=["upload", "clipboard"], | |
| show_label=True, | |
| interactive=True | |
| ) | |
| with gr.Row(): | |
| send_btn = gr.Button("Send", variant="primary", scale=2) | |
| clear_btn = gr.Button("Clear Chat", scale=1) | |
| export_btn = gr.Button("Export Chat", scale=1) | |
| export_output = gr.Textbox( | |
| label="Exported Chat (JSON)", | |
| interactive=False, | |
| visible=False | |
| ) | |
| # Event handlers | |
| send_btn.click( | |
| respond, | |
| inputs=[msg_input, img_input, chatbot_ui, mode, temperature], | |
| outputs=[msg_input, img_input, chatbot_ui] | |
| ) | |
| msg_input.submit( | |
| respond, | |
| inputs=[msg_input, img_input, chatbot_ui, mode, temperature], | |
| outputs=[msg_input, img_input, chatbot_ui] | |
| ) | |
| clear_btn.click( | |
| clear_history, | |
| outputs=[chatbot_ui, msg_input] | |
| ).then( | |
| lambda: None, | |
| None, | |
| img_input, | |
| ) | |
| def toggle_export_visibility(): | |
| return gr.update(visible=True) | |
| def get_and_show_export(chat_history): | |
| return export_chat(chat_history), gr.update(visible=True) | |
| export_btn.click( | |
| get_and_show_export, | |
| inputs=[chatbot_ui], | |
| outputs=[export_output, export_output] | |
| ) | |
| gr.Markdown(""" | |
| ### 📝 Chat Modes: | |
| - **General**: Friendly assistant for everyday questions | |
| - **Technical**: Expert technical support and code help | |
| - **Creative**: Storytelling and creative writing | |
| - **Educational**: Learning and concept explanation | |
| - **Medical**: Health information (consult professionals for serious concerns) | |
| ### 🔑 Setup: | |
| 1. Get your API key from [Google AI Studio](https://aistudio.google.com/app/apikey) | |
| 2. Set `GEMINI_API_KEY` in `.env` file or environment variables | |
| 3. Run the app and start chatting! | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch() | |