lailaelkoussy commited on
Commit
2898356
·
1 Parent(s): 6a1d3b6

add option between tool calling and code agent

Browse files
Files changed (1) hide show
  1. smolagent_chat.py +51 -15
smolagent_chat.py CHANGED
@@ -12,7 +12,7 @@ import re
12
  from typing import List, Dict, Any
13
  import gradio as gr
14
  from gradio import ChatMessage
15
- from smolagents import MCPClient, ToolCallingAgent, OpenAIServerModel, AzureOpenAIModel, InferenceClientModel, stream_to_gradio
16
 
17
 
18
  class Colors:
@@ -63,6 +63,7 @@ class KnowledgeGraphChatAgent:
63
  self.mcp_server_url = mcp_server_url or os.getenv("MCP_SERVER_URL", "http://localhost:4000/mcp")
64
  self.model = None
65
  self.agent = None
 
66
  self.mcp_client = None
67
  self.tools = None
68
  self.conversation_history = []
@@ -126,6 +127,7 @@ class KnowledgeGraphChatAgent:
126
  model_kwargs = {
127
  "model_id": model_name,
128
  "token": api_key,
 
129
  }
130
  if provider:
131
  model_kwargs["provider"] = provider
@@ -151,7 +153,7 @@ class KnowledgeGraphChatAgent:
151
  print_error(f"Failed to initialize model: {e}")
152
  raise
153
 
154
- def _initialize_agent(self, max_steps: int = None):
155
  """Initialize the agent using the configured model and pre-loaded MCP tools."""
156
  if not self.model:
157
  raise ValueError("Model must be initialized before creating agent!")
@@ -160,16 +162,30 @@ class KnowledgeGraphChatAgent:
160
 
161
  try:
162
  max_steps = max_steps or int(os.getenv("MAX_STEPS", 5))
 
163
 
164
- self.agent = ToolCallingAgent(
165
- tools=self.tools,
166
- model=self.model,
167
- name="KnowledgeGraphAgent",
168
- max_steps=max_steps,
169
- add_base_tools=False,
170
- instructions=CUSTOM_INSTRUCTIONS
171
- )
172
- print_success("Agent initialized successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  except Exception as e:
174
  print_error(f"Failed to initialize agent: {e}")
175
  raise
@@ -299,6 +315,25 @@ def create_gradio_interface(agent: KnowledgeGraphChatAgent):
299
  info="Choose between HuggingFace Inference, OpenAI, or Azure OpenAI"
300
  )
301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302
  # Model name field (shown for all types)
303
  with gr.Row() as model_name_row:
304
  model_name = gr.Textbox(
@@ -439,7 +474,7 @@ def create_gradio_interface(agent: KnowledgeGraphChatAgent):
439
  """)
440
 
441
  # Handle agent initialization
442
- def initialize_agent(mtype, mname, akey, burl, azure_akey, azure_ep, aversion, hf_tok, hf_prov, msteps):
443
  try:
444
  if mtype == "azure":
445
  agent._initialize_model(
@@ -463,9 +498,9 @@ def create_gradio_interface(agent: KnowledgeGraphChatAgent):
463
  base_url=burl,
464
  model_name=mname
465
  )
466
- agent._initialize_agent(max_steps=int(msteps))
467
  return (
468
- gr.update(value="**Status:** ✅ Agent Ready!"),
469
  gr.update(visible=False), # Hide init section
470
  gr.update(visible=True) # Show chat section
471
  )
@@ -479,7 +514,7 @@ def create_gradio_interface(agent: KnowledgeGraphChatAgent):
479
 
480
  init_btn.click(
481
  fn=initialize_agent,
482
- inputs=[model_type, model_name, api_key, base_url, azure_api_key, azure_endpoint, api_version, hf_token, hf_provider, max_steps],
483
  outputs=[init_status, init_section, chat_section]
484
  )
485
 
@@ -547,3 +582,4 @@ def main():
547
 
548
  if __name__ == "__main__":
549
  main()
 
 
12
  from typing import List, Dict, Any
13
  import gradio as gr
14
  from gradio import ChatMessage
15
+ from smolagents import MCPClient, ToolCallingAgent, CodeAgent, OpenAIServerModel, AzureOpenAIModel, InferenceClientModel, stream_to_gradio
16
 
17
 
18
  class Colors:
 
63
  self.mcp_server_url = mcp_server_url or os.getenv("MCP_SERVER_URL", "http://localhost:4000/mcp")
64
  self.model = None
65
  self.agent = None
66
+ self.agent_type = None
67
  self.mcp_client = None
68
  self.tools = None
69
  self.conversation_history = []
 
127
  model_kwargs = {
128
  "model_id": model_name,
129
  "token": api_key,
130
+ "bill_to": "epita"
131
  }
132
  if provider:
133
  model_kwargs["provider"] = provider
 
153
  print_error(f"Failed to initialize model: {e}")
154
  raise
155
 
156
+ def _initialize_agent(self, agent_type: str = "tool_calling", max_steps: int = None):
157
  """Initialize the agent using the configured model and pre-loaded MCP tools."""
158
  if not self.model:
159
  raise ValueError("Model must be initialized before creating agent!")
 
162
 
163
  try:
164
  max_steps = max_steps or int(os.getenv("MAX_STEPS", 5))
165
+ self.agent_type = agent_type
166
 
167
+ print_info(f"Initializing {agent_type} agent...")
168
+
169
+ if agent_type == "code":
170
+ self.agent = CodeAgent(
171
+ tools=self.tools,
172
+ model=self.model,
173
+ name="KnowledgeGraphCodeAgent",
174
+ max_steps=max_steps,
175
+ add_base_tools=False,
176
+ additional_authorized_imports=["numpy", "pandas", "re", "json"],
177
+ instructions=CUSTOM_INSTRUCTIONS
178
+ )
179
+ else: # tool_calling
180
+ self.agent = ToolCallingAgent(
181
+ tools=self.tools,
182
+ model=self.model,
183
+ name="KnowledgeGraphAgent",
184
+ max_steps=max_steps,
185
+ add_base_tools=False,
186
+ instructions=CUSTOM_INSTRUCTIONS
187
+ )
188
+ print_success(f"{agent_type.title()} agent initialized successfully!")
189
  except Exception as e:
190
  print_error(f"Failed to initialize agent: {e}")
191
  raise
 
315
  info="Choose between HuggingFace Inference, OpenAI, or Azure OpenAI"
316
  )
317
 
318
+ # Agent type selection
319
+ with gr.Row():
320
+ agent_type = gr.Radio(
321
+ choices=["tool_calling", "code"],
322
+ value="tool_calling",
323
+ label="Agent Type",
324
+ info="Choose the agent architecture"
325
+ )
326
+
327
+ gr.Markdown("""
328
+ **Agent Type Guide:**
329
+
330
+ - **Tool Calling Agent** (Recommended): Uses native function-calling capabilities. Works well for both medium and large models (GPT-4, Claude, Qwen 70B+). More reliable and efficient because the output format is constrained.
331
+
332
+ - **Code Agent**: Generates Python code to solve tasks. Best used with larger, more capable models that have strong code-generation abilities. Smaller models often struggle with correctness, debugging, and multi-step reasoning.
333
+
334
+ ⚠️ **Note**: Smaller models (<30B parameters) generally struggle with both complex tool calling and code generation, but code generation tends to be more error-prone. For best results, use high-capability models like GPT-4, Claude 3.5, or Qwen 70B+ for Code Agent workflows, and prefer Tool Calling Agents whenever possible.
335
+ """)
336
+
337
  # Model name field (shown for all types)
338
  with gr.Row() as model_name_row:
339
  model_name = gr.Textbox(
 
474
  """)
475
 
476
  # Handle agent initialization
477
+ def initialize_agent(atype, mtype, mname, akey, burl, azure_akey, azure_ep, aversion, hf_tok, hf_prov, msteps):
478
  try:
479
  if mtype == "azure":
480
  agent._initialize_model(
 
498
  base_url=burl,
499
  model_name=mname
500
  )
501
+ agent._initialize_agent(agent_type=atype, max_steps=int(msteps))
502
  return (
503
+ gr.update(value=f"**Status:** ✅ Agent Ready! (Using {atype.replace('_', ' ').title()} Agent)"),
504
  gr.update(visible=False), # Hide init section
505
  gr.update(visible=True) # Show chat section
506
  )
 
514
 
515
  init_btn.click(
516
  fn=initialize_agent,
517
+ inputs=[agent_type, model_type, model_name, api_key, base_url, azure_api_key, azure_endpoint, api_version, hf_token, hf_provider, max_steps],
518
  outputs=[init_status, init_section, chat_section]
519
  )
520
 
 
582
 
583
  if __name__ == "__main__":
584
  main()
585
+