Spaces:
Sleeping
Sleeping
File size: 6,213 Bytes
7224b0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
"""
Gradio user interface for the Gemini AI Agent.
"""
import gradio as gr
import asyncio
import logging
from typing import List, Tuple, Optional
from src.agent import GeminiAgent
from src.utils import validate_question, format_error_message
logger = logging.getLogger(__name__)
def create_interface(agent: GeminiAgent) -> gr.Blocks:
"""
Create the Gradio interface for the AI agent.
Args:
agent: The GeminiAgent instance
Returns:
gr.Blocks: The Gradio interface
"""
async def process_question_async(question: str, context: str = "") -> str:
"""
Async wrapper for question processing.
"""
return await agent.process_question(question, context or None)
def process_question_sync(question: str, context: str = "") -> str:
"""
Synchronous wrapper for question processing (required by Gradio).
"""
try:
# Validate question
is_valid, error_msg = validate_question(question)
if not is_valid:
return f"β **Error**: {error_msg}"
# Process question
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
result = loop.run_until_complete(process_question_async(question, context))
return f"π€ **AI Response**:\n\n{result}"
finally:
loop.close()
except Exception as e:
error_msg = format_error_message(e)
return f"β **Error**: {error_msg}"
def clear_conversation():
"""
Clear the conversation history.
"""
agent.clear_history()
return "", "Conversation history cleared! β¨"
def get_agent_info() -> str:
"""
Get information about the agent.
"""
stats = agent.get_stats()
return f"""
## π€ Agent Information
**Model**: {stats['model']}
**Conversation Length**: {stats['conversation_length']} exchanges
**Max Tokens**: {stats['max_tokens']}
**Temperature**: {stats['temperature']}
### π― Specialized Capabilities
- **Complex Research**: Multi-source fact-checking and analysis
- **Mathematical Reasoning**: Step-by-step problem solving
- **Multi-modal Analysis**: Processing images, videos, and audio
- **Data Interpretation**: Tables, charts, and statistical analysis
- **Creative Problem Solving**: Innovative approaches to unusual questions
"""
# Create the interface
with gr.Blocks(
title="Gemini AI Research Agent",
theme=gr.themes.Soft(),
css="""
.container {
max-width: 1200px;
margin: auto;
}
.question-box {
border-left: 4px solid #4CAF50;
padding-left: 16px;
}
.response-box {
background-color: #f8f9fa;
border-radius: 8px;
padding: 16px;
}
"""
) as interface:
gr.Markdown("""
# π§ Gemini AI Research Agent
An advanced AI assistant powered by Google's Gemini 1.5 Flash, specialized in handling complex research questions, data analysis, and multi-modal content processing.
**Perfect for**: Academic research, fact-checking, mathematical problems, data analysis, and challenging multi-step questions.
""")
with gr.Row():
with gr.Column(scale=2):
with gr.Group():
gr.Markdown("## π¬ Ask Your Question")
question_input = gr.Textbox(
label="Question",
placeholder="Enter your research question here... (e.g., 'How many studio albums were published by Mercedes Sosa between 2000 and 2009?')",
lines=4,
elem_classes=["question-box"]
)
context_input = gr.Textbox(
label="Additional Context (Optional)",
placeholder="Provide any additional context, constraints, or specific requirements...",
lines=2
)
with gr.Row():
submit_btn = gr.Button("π Ask Question", variant="primary", size="lg")
clear_btn = gr.Button("ποΈ Clear History", variant="secondary")
with gr.Group():
gr.Markdown("## π Response")
response_output = gr.Textbox(
label="AI Response",
lines=15,
interactive=False,
elem_classes=["response-box"]
)
with gr.Column(scale=1):
with gr.Group():
gr.Markdown("## βΉοΈ Agent Status")
agent_info = gr.Markdown(get_agent_info())
refresh_info_btn = gr.Button("π Refresh Info", size="sm")
gr.Markdown("""
---
### π§ Tips for Best Results:
- **Be Specific**: Include all relevant details and constraints
- **Multi-step Questions**: Break complex questions into clear parts
- **Context Matters**: Use the context field for additional information
- **Iterative Approach**: Build on previous questions for deeper analysis
""")
# Event handlers
submit_btn.click(
fn=process_question_sync,
inputs=[question_input, context_input],
outputs=[response_output]
)
clear_btn.click(
fn=clear_conversation,
outputs=[question_input, response_output]
)
refresh_info_btn.click(
fn=get_agent_info,
outputs=[agent_info]
)
# Allow Enter key to submit
question_input.submit(
fn=process_question_sync,
inputs=[question_input, context_input],
outputs=[response_output]
)
return interface |