File size: 2,505 Bytes
8b9e37a
d026761
8b9e37a
 
 
 
aa21a42
 
8b9e37a
 
 
 
 
869c000
 
 
 
d026761
869c000
 
 
 
aa21a42
869c000
aa21a42
869c000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d026761
 
8b9e37a
869c000
 
 
8b9e37a
869c000
8b9e37a
 
 
869c000
8b9e37a
 
d026761
 
869c000
aa21a42
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import os
import gradio as gr
import requests
import inspect
import pandas as pd
from dataclasses import asdict
from smolagents import CodeAgent, LiteLLMModel, stream_to_gradio, OpenAIServerModel, DuckDuckGoSearchTool, WikipediaSearchTool, GoogleSearchTool
from  tools import  SafeDuckDuckGoSearchTool, ImageUnderstanding, WikipediaSearch, ExcelReader, CsvReader,ChessSolver, download_files, get_images, FileReader, AudioTransciber, YouTubeTranscipt, YouTubeVideoUnderstanding, WeatherTool, OCR
from dotenv import find_dotenv, load_dotenv
from pathlib import Path
from PIL import Image
import time
from agent import PowerAgent
from utils import format_history, css
import logging

logging.basicConfig(level=logging.ERROR)   

def interact_with_agent(message, history):
    try:
        load_dotenv(find_dotenv())
        # os.environ["SERPER_API_KEY"] = os.getenv('SERPER_API_KEY')
        gemini_model = LiteLLMModel(model_id= "gemini/gemini-2.0-flash",
                                        api_key=os.getenv("GEMINI_API_KEY"))
        #image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
        agent = PowerAgent(model=gemini_model)
        messages = []
        prompt = message['text']
        if len(message.get('files')) > 0:
            file_prompt = "The following files are uploaded: " + str(message.get('files'))
        else:
            file_prompt = ""
        formatted_history = format_history(history)
        # Add current message
        prompt = formatted_history + f"User: {message['text']}{file_prompt}\nAgent:"
        #yield messages
        for msg in stream_to_gradio(agent, prompt):
            messages.append(asdict(msg))
            yield messages
        history.append([{'user':prompt, 'agent':msg.content}])
        yield messages
    except Exception as e:
        logging.error("An error occurred", exc_info=True)
        yield [{"role": "system", "content": f"❌ An error occurred: {str(e)}"}]    

demo = gr.ChatInterface(
    interact_with_agent,
    autofocus=False,
    css=css,
    title='Power Agent 🚀',
    multimodal=True,
    textbox=gr.MultimodalTextbox(placeholder="Ask me a question or upload a file.", 
    file_count="multiple", 
    file_types=['image', '.json', '.mp4', '.pdf', '.txt', '.csv', '.xlsx', '.xls', 'audio'], 
    sources=["upload", "microphone"], 
    container=False, scale=30),
    type="messages",
    )

if __name__ == "__main__":
    demo.launch(debug=False, share=True, mcp_server=False)