| import os | |
| import gradio as gr | |
| from huggingface_hub import login | |
| from smolagents import DuckDuckGoSearchTool, ToolCallingAgent, InferenceClientModel | |
| login(os.environ.get('HF_TOKEN')) | |
| def create_agent(): | |
| tools = [DuckDuckGoSearchTool()] | |
| model = InferenceClientModel("Qwen/Qwen2.5-72B-Instruct") | |
| return ToolCallingAgent(tools=tools, model=model, add_base_tools=True) | |
| def respond( | |
| message, | |
| history: list[dict[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p | |
| ): | |
| agent = create_agent() | |
| full_prompt = f"{system_message}\n\nChat history:\n{history}\n\nUser: {message}" | |
| response = agent.run(full_prompt, stream=False) | |
| yield response | |
| """ | |
| For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
| """ | |
| chatbot = gr.ChatInterface( | |
| respond, | |
| type="messages", | |
| additional_inputs=[], | |
| ) | |
| with gr.Blocks() as demo: | |
| chatbot.render() | |
| if __name__ == "__main__": | |
| demo.launch() | |