Spaces:
Sleeping
Sleeping
| """ | |
| Gradio user interface for the Gemini AI Agent. | |
| """ | |
| import gradio as gr | |
| import asyncio | |
| import logging | |
| from typing import List, Tuple, Optional | |
| from src.agent import GeminiAgent | |
| from src.utils import validate_question, format_error_message | |
| logger = logging.getLogger(__name__) | |
| def create_interface(agent: GeminiAgent) -> gr.Blocks: | |
| """ | |
| Create the Gradio interface for the AI agent. | |
| Args: | |
| agent: The GeminiAgent instance | |
| Returns: | |
| gr.Blocks: The Gradio interface | |
| """ | |
| async def process_question_async(question: str, context: str = "") -> str: | |
| """ | |
| Async wrapper for question processing. | |
| """ | |
| return await agent.process_question(question, context or None) | |
| def process_question_sync(question: str, context: str = "") -> str: | |
| """ | |
| Synchronous wrapper for question processing (required by Gradio). | |
| """ | |
| try: | |
| # Validate question | |
| is_valid, error_msg = validate_question(question) | |
| if not is_valid: | |
| return f"β **Error**: {error_msg}" | |
| # Process question | |
| loop = asyncio.new_event_loop() | |
| asyncio.set_event_loop(loop) | |
| try: | |
| result = loop.run_until_complete(process_question_async(question, context)) | |
| return f"π€ **AI Response**:\n\n{result}" | |
| finally: | |
| loop.close() | |
| except Exception as e: | |
| error_msg = format_error_message(e) | |
| return f"β **Error**: {error_msg}" | |
| def clear_conversation(): | |
| """ | |
| Clear the conversation history. | |
| """ | |
| agent.clear_history() | |
| return "", "Conversation history cleared! β¨" | |
| def get_agent_info() -> str: | |
| """ | |
| Get information about the agent. | |
| """ | |
| stats = agent.get_stats() | |
| return f""" | |
| ## π€ Agent Information | |
| **Model**: {stats['model']} | |
| **Conversation Length**: {stats['conversation_length']} exchanges | |
| **Max Tokens**: {stats['max_tokens']} | |
| **Temperature**: {stats['temperature']} | |
| ### π― Specialized Capabilities | |
| - **Complex Research**: Multi-source fact-checking and analysis | |
| - **Mathematical Reasoning**: Step-by-step problem solving | |
| - **Multi-modal Analysis**: Processing images, videos, and audio | |
| - **Data Interpretation**: Tables, charts, and statistical analysis | |
| - **Creative Problem Solving**: Innovative approaches to unusual questions | |
| """ | |
| # Create the interface | |
| with gr.Blocks( | |
| title="Gemini AI Research Agent", | |
| theme=gr.themes.Soft(), | |
| css=""" | |
| .container { | |
| max-width: 1200px; | |
| margin: auto; | |
| } | |
| .question-box { | |
| border-left: 4px solid #4CAF50; | |
| padding-left: 16px; | |
| } | |
| .response-box { | |
| background-color: #f8f9fa; | |
| border-radius: 8px; | |
| padding: 16px; | |
| } | |
| """ | |
| ) as interface: | |
| gr.Markdown(""" | |
| # π§ Gemini AI Research Agent | |
| An advanced AI assistant powered by Google's Gemini 1.5 Flash, specialized in handling complex research questions, data analysis, and multi-modal content processing. | |
| **Perfect for**: Academic research, fact-checking, mathematical problems, data analysis, and challenging multi-step questions. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| with gr.Group(): | |
| gr.Markdown("## π¬ Ask Your Question") | |
| question_input = gr.Textbox( | |
| label="Question", | |
| placeholder="Enter your research question here... (e.g., 'How many studio albums were published by Mercedes Sosa between 2000 and 2009?')", | |
| lines=4, | |
| elem_classes=["question-box"] | |
| ) | |
| context_input = gr.Textbox( | |
| label="Additional Context (Optional)", | |
| placeholder="Provide any additional context, constraints, or specific requirements...", | |
| lines=2 | |
| ) | |
| with gr.Row(): | |
| submit_btn = gr.Button("π Ask Question", variant="primary", size="lg") | |
| clear_btn = gr.Button("ποΈ Clear History", variant="secondary") | |
| with gr.Group(): | |
| gr.Markdown("## π Response") | |
| response_output = gr.Textbox( | |
| label="AI Response", | |
| lines=15, | |
| interactive=False, | |
| elem_classes=["response-box"] | |
| ) | |
| with gr.Column(scale=1): | |
| with gr.Group(): | |
| gr.Markdown("## βΉοΈ Agent Status") | |
| agent_info = gr.Markdown(get_agent_info()) | |
| refresh_info_btn = gr.Button("π Refresh Info", size="sm") | |
| gr.Markdown(""" | |
| --- | |
| ### π§ Tips for Best Results: | |
| - **Be Specific**: Include all relevant details and constraints | |
| - **Multi-step Questions**: Break complex questions into clear parts | |
| - **Context Matters**: Use the context field for additional information | |
| - **Iterative Approach**: Build on previous questions for deeper analysis | |
| """) | |
| # Event handlers | |
| submit_btn.click( | |
| fn=process_question_sync, | |
| inputs=[question_input, context_input], | |
| outputs=[response_output] | |
| ) | |
| clear_btn.click( | |
| fn=clear_conversation, | |
| outputs=[question_input, response_output] | |
| ) | |
| refresh_info_btn.click( | |
| fn=get_agent_info, | |
| outputs=[agent_info] | |
| ) | |
| # Allow Enter key to submit | |
| question_input.submit( | |
| fn=process_question_sync, | |
| inputs=[question_input, context_input], | |
| outputs=[response_output] | |
| ) | |
| return interface |