Spaces:
Running
Running
| """ | |
| Tool integration with LLMs via native function calling. | |
| This module provides: | |
| - LLMResponse and LLMToolCall for parsing LLM responses | |
| - OpenAICaller β a SINGLE caller for all cases (with and without tools) | |
| - Response parsers for OpenAI and Anthropic | |
| Usage: | |
| # One caller for everything | |
| caller = create_openai_caller(api_key="...", model="gpt-4") | |
| # Without tools β returns str | |
| response = caller("Hello!") | |
| # With tools β returns LLMResponse | |
| response = caller("Calculate fib(10)", tools=[...]) | |
| if response.has_tool_calls: | |
| for tc in response.tool_calls: | |
| print(tc.name, tc.arguments) | |
| """ | |
| import json | |
| from dataclasses import dataclass, field | |
| from typing import Any | |
| from .base import ToolCall | |
| class LLMToolCall: | |
| """ | |
| Structured tool call from the LLM. | |
| Represents a tool call returned by the LLM via native function calling. | |
| """ | |
| id: str | |
| name: str | |
| arguments: dict[str, Any] | |
| def to_tool_call(self) -> ToolCall: | |
| """Convert to ToolCall for execution.""" | |
| return ToolCall(name=self.name, arguments=self.arguments) | |
| class LLMResponse: | |
| """ | |
| LLM response with tool call support. | |
| Attributes: | |
| content: Text content of the response. | |
| tool_calls: List of tool calls (if requested by the LLM). | |
| raw_response: Original API response (for debugging). | |
| """ | |
| content: str = "" | |
| tool_calls: list[LLMToolCall] = field(default_factory=list) | |
| raw_response: Any = None | |
| def has_tool_calls(self) -> bool: | |
| """Whether there are tool calls.""" | |
| return len(self.tool_calls) > 0 | |
| def get_tool_calls(self) -> list[ToolCall]: | |
| """Get ToolCall objects for execution.""" | |
| return [tc.to_tool_call() for tc in self.tool_calls] | |
| def parse_openai_response(response: Any) -> LLMResponse: | |
| """ | |
| Parse an OpenAI API response into LLMResponse. | |
| Supports both the new format (tool_calls) and legacy (function_call). | |
| Args: | |
| response: Response from the OpenAI ChatCompletion API. | |
| Returns: | |
| LLMResponse with parsed data. | |
| """ | |
| message = response.choices[0].message | |
| tool_calls = [] | |
| # New format: tool_calls | |
| if hasattr(message, "tool_calls") and message.tool_calls: | |
| for tc in message.tool_calls: | |
| try: | |
| args = json.loads(tc.function.arguments) if tc.function.arguments else {} | |
| except json.JSONDecodeError: | |
| args = {} | |
| tool_calls.append( | |
| LLMToolCall( | |
| id=tc.id, | |
| name=tc.function.name, | |
| arguments=args, | |
| ) | |
| ) | |
| # Legacy format: function_call | |
| elif hasattr(message, "function_call") and message.function_call: | |
| fc = message.function_call | |
| try: | |
| args = json.loads(fc.arguments) if fc.arguments else {} | |
| except json.JSONDecodeError: | |
| args = {} | |
| tool_calls.append( | |
| LLMToolCall( | |
| id="legacy_call", | |
| name=fc.name, | |
| arguments=args, | |
| ) | |
| ) | |
| return LLMResponse( | |
| content=message.content or "", | |
| tool_calls=tool_calls, | |
| raw_response=response, | |
| ) | |
| def parse_anthropic_response(response: Any) -> LLMResponse: | |
| """ | |
| Parse an Anthropic API response into LLMResponse. | |
| Args: | |
| response: Response from the Anthropic Messages API. | |
| Returns: | |
| LLMResponse with parsed data. | |
| """ | |
| tool_calls = [] | |
| content_parts = [] | |
| for block in response.content: | |
| if block.type == "text": | |
| content_parts.append(block.text) | |
| elif block.type == "tool_use": | |
| tool_calls.append( | |
| LLMToolCall( | |
| id=block.id, | |
| name=block.name, | |
| arguments=block.input if isinstance(block.input, dict) else {}, | |
| ) | |
| ) | |
| return LLMResponse( | |
| content="\n".join(content_parts), | |
| tool_calls=tool_calls, | |
| raw_response=response, | |
| ) | |
| class OpenAICaller: | |
| """ | |
| SINGLE LLM caller for OpenAI β works both with and without tools. | |
| This is the RECOMMENDED way to create callers for agents. | |
| - Without tools: returns str (like a regular caller) | |
| - With tools: returns LLMResponse with tool_calls | |
| Example: | |
| from openai import OpenAI | |
| client = OpenAI(api_key="...") | |
| caller = OpenAICaller(client, model="gpt-4") | |
| # Without tools β regular text response | |
| response = caller("Hello!") # -> str | |
| # With tools β LLMResponse with tool_calls | |
| response = caller("Calculate fib(15)", tools=[...]) # -> LLMResponse | |
| if response.has_tool_calls: | |
| for tc in response.tool_calls: | |
| print(f"Call {tc.name} with {tc.arguments}") | |
| """ | |
| def __init__( | |
| self, | |
| client: Any, # OpenAI client | |
| model: str = "gpt-4", | |
| temperature: float = 0.1, # Low temperature for determinism | |
| max_tokens: int = 2048, | |
| system_prompt: str | None = None, | |
| tool_choice: str = "required", # "required" = mandatory, "auto" = optional | |
| ): | |
| """ | |
| Create a universal OpenAI caller. | |
| Args: | |
| client: OpenAI client instance. | |
| model: Model name. | |
| temperature: Generation temperature (default 0.1 for determinism). | |
| max_tokens: Maximum tokens in the response. | |
| system_prompt: System prompt (optional). | |
| tool_choice: Tool usage policy: | |
| - "required": LLM MUST call a tool (default) | |
| - "auto": LLM decides whether to use tools | |
| """ | |
| self.client = client | |
| self.model = model | |
| self.temperature = temperature | |
| self.max_tokens = max_tokens | |
| self.system_prompt = system_prompt | |
| self.tool_choice = tool_choice | |
| def __call__( | |
| self, | |
| prompt: str, | |
| tools: list[dict[str, Any]] | None = None, | |
| ) -> str | LLMResponse: | |
| """ | |
| Call the OpenAI API. | |
| Args: | |
| prompt: User prompt. | |
| tools: Tools in OpenAI format (optional). | |
| Returns: | |
| - str: if tools are not passed | |
| - LLMResponse: if tools are passed | |
| """ | |
| messages = [] | |
| if self.system_prompt: | |
| messages.append({"role": "system", "content": self.system_prompt}) | |
| messages.append({"role": "user", "content": prompt}) | |
| kwargs: dict[str, Any] = { | |
| "model": self.model, | |
| "messages": messages, | |
| "temperature": self.temperature, | |
| "max_tokens": self.max_tokens, | |
| } | |
| if tools: | |
| kwargs["tools"] = tools | |
| kwargs["tool_choice"] = self.tool_choice | |
| response = self.client.chat.completions.create(**kwargs) | |
| # If tools were passed β return LLMResponse | |
| if tools: | |
| return parse_openai_response(response) | |
| # If without tools β return just a string | |
| return response.choices[0].message.content or "" | |
| # Alias for backward compatibility | |
| OpenAIToolsCaller = OpenAICaller | |
| def create_openai_caller( | |
| api_key: str | None = None, | |
| base_url: str | None = None, | |
| model: str = "gpt-4", | |
| temperature: float = 0.1, # Low temperature by default | |
| max_tokens: int = 2048, | |
| system_prompt: str | None = None, | |
| tool_choice: str = "required", | |
| ) -> OpenAICaller: | |
| """ | |
| Create a universal OpenAI caller. | |
| This is the RECOMMENDED way to create callers for agents. | |
| Works both with and without tools. | |
| Args: | |
| api_key: OpenAI API key (or from an environment variable). | |
| base_url: Base URL (for compatible APIs). | |
| model: Model name. | |
| temperature: Generation temperature (default 0.1 for determinism). | |
| max_tokens: Maximum tokens. | |
| system_prompt: System prompt. | |
| tool_choice: Tool usage policy: | |
| - "required": LLM MUST call a tool (default) | |
| - "auto": LLM decides whether to use tools | |
| Returns: | |
| Ready-to-use OpenAICaller. | |
| Example: | |
| # One caller for all agents | |
| caller = create_openai_caller( | |
| api_key="sk-...", | |
| model="gpt-4", | |
| ) | |
| # Without tools β plain text | |
| response = caller("Hello!") # -> str | |
| # With tools β LLMResponse | |
| response = caller("Calculate fib(10)", tools=[...]) | |
| if response.has_tool_calls: | |
| ... | |
| """ | |
| try: | |
| from openai import OpenAI | |
| except ImportError as e: | |
| msg = "openai package required: pip install openai" | |
| raise ImportError(msg) from e | |
| kwargs: dict[str, Any] = {} | |
| if api_key: | |
| kwargs["api_key"] = api_key | |
| if base_url: | |
| kwargs["base_url"] = base_url | |
| client = OpenAI(**kwargs) | |
| return OpenAICaller( | |
| client=client, | |
| model=model, | |
| temperature=temperature, | |
| max_tokens=max_tokens, | |
| system_prompt=system_prompt, | |
| tool_choice=tool_choice, | |
| ) | |
| # Alias for backward compatibility | |
| create_openai_tools_caller = create_openai_caller | |