code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def get_all_edges( agent: Agent, parent: Agent | None = None, visited: set[str] | None = None ) -> str: """ Recursively generates the edges for the given agent and its handoffs in DOT format. Args: agent (Agent): The agent for which the edges are to be generated. parent (Agent, optional...
Recursively generates the edges for the given agent and its handoffs in DOT format. Args: agent (Agent): The agent for which the edges are to be generated. parent (Agent, optional): The parent agent. Defaults to None. Returns: str: The DOT format string representing the edges. ...
get_all_edges
python
openai/openai-agents-python
src/agents/extensions/visualization.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/extensions/visualization.py
MIT
def draw_graph(agent: Agent, filename: str | None = None) -> graphviz.Source: """ Draws the graph for the given agent and optionally saves it as a PNG file. Args: agent (Agent): The agent for which the graph is to be drawn. filename (str): The name of the file to save the graph as a PNG. ...
Draws the graph for the given agent and optionally saves it as a PNG file. Args: agent (Agent): The agent for which the graph is to be drawn. filename (str): The name of the file to save the graph as a PNG. Returns: graphviz.Source: The graphviz Source object representing the grap...
draw_graph
python
openai/openai-agents-python
src/agents/extensions/visualization.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/extensions/visualization.py
MIT
async def connect(self): """Connect to the server. For example, this might mean spawning a subprocess or opening a network connection. The server is expected to remain connected until `cleanup()` is called. """ pass
Connect to the server. For example, this might mean spawning a subprocess or opening a network connection. The server is expected to remain connected until `cleanup()` is called.
connect
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
async def cleanup(self): """Cleanup the server. For example, this might mean closing a subprocess or closing a network connection. """ pass
Cleanup the server. For example, this might mean closing a subprocess or closing a network connection.
cleanup
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
async def list_tools(self) -> list[MCPTool]: """List the tools available on the server.""" pass
List the tools available on the server.
list_tools
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def __init__(self, cache_tools_list: bool, client_session_timeout_seconds: float | None): """ Args: cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be cached and only fetched from the server once. If `False`, the tools list will be fe...
Args: cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be cached and only fetched from the server once. If `False`, the tools list will be fetched from the server on each call to `list_tools()`. The cache can be invalidated by cal...
__init__
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def create_streams( self, ) -> AbstractAsyncContextManager[ tuple[ MemoryObjectReceiveStream[SessionMessage | Exception], MemoryObjectSendStream[SessionMessage], GetSessionIdCallback | None, ] ]: """Create the streams for the server.""" ...
Create the streams for the server.
create_streams
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
async def list_tools(self) -> list[MCPTool]: """List the tools available on the server.""" if not self.session: raise UserError("Server not initialized. Make sure you call `connect()` first.") # Return from cache if caching is enabled, we have tools, and the cache is not dirty ...
List the tools available on the server.
list_tools
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def __init__( self, params: MCPServerStdioParams, cache_tools_list: bool = False, name: str | None = None, client_session_timeout_seconds: float | None = 5, ): """Create a new MCP server based on the stdio transport. Args: params: The params that ...
Create a new MCP server based on the stdio transport. Args: params: The params that configure the server. This includes the command to run to start the server, the args to pass to the command, the environment variables to set for the server, the working directory to ...
__init__
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def create_streams( self, ) -> AbstractAsyncContextManager[ tuple[ MemoryObjectReceiveStream[SessionMessage | Exception], MemoryObjectSendStream[SessionMessage], GetSessionIdCallback | None, ] ]: """Create the streams for the server.""" ...
Create the streams for the server.
create_streams
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def __init__( self, params: MCPServerSseParams, cache_tools_list: bool = False, name: str | None = None, client_session_timeout_seconds: float | None = 5, ): """Create a new MCP server based on the HTTP with SSE transport. Args: params: The params...
Create a new MCP server based on the HTTP with SSE transport. Args: params: The params that configure the server. This includes the URL of the server, the headers to send to the server, the timeout for the HTTP request, and the timeout for the SSE connection. ...
__init__
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def create_streams( self, ) -> AbstractAsyncContextManager[ tuple[ MemoryObjectReceiveStream[SessionMessage | Exception], MemoryObjectSendStream[SessionMessage], GetSessionIdCallback | None, ] ]: """Create the streams for the server.""" ...
Create the streams for the server.
create_streams
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def __init__( self, params: MCPServerStreamableHttpParams, cache_tools_list: bool = False, name: str | None = None, client_session_timeout_seconds: float | None = 5, ): """Create a new MCP server based on the Streamable HTTP transport. Args: param...
Create a new MCP server based on the Streamable HTTP transport. Args: params: The params that configure the server. This includes the URL of the server, the headers to send to the server, the timeout for the HTTP request, and the timeout for the Streamable HTTP conne...
__init__
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
def create_streams( self, ) -> AbstractAsyncContextManager[ tuple[ MemoryObjectReceiveStream[SessionMessage | Exception], MemoryObjectSendStream[SessionMessage], GetSessionIdCallback | None, ] ]: """Create the streams for the server.""" ...
Create the streams for the server.
create_streams
python
openai/openai-agents-python
src/agents/mcp/server.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/server.py
MIT
async def get_all_function_tools( cls, servers: list["MCPServer"], convert_schemas_to_strict: bool ) -> list[Tool]: """Get all function tools from a list of MCP servers.""" tools = [] tool_names: set[str] = set() for server in servers: server_tools = await cls.get...
Get all function tools from a list of MCP servers.
get_all_function_tools
python
openai/openai-agents-python
src/agents/mcp/util.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/util.py
MIT
async def get_function_tools( cls, server: "MCPServer", convert_schemas_to_strict: bool ) -> list[Tool]: """Get all function tools from a single MCP server.""" with mcp_tools_span(server=server.name) as span: tools = await server.list_tools() span.span_data.result = ...
Get all function tools from a single MCP server.
get_function_tools
python
openai/openai-agents-python
src/agents/mcp/util.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/util.py
MIT
def to_function_tool( cls, tool: "MCPTool", server: "MCPServer", convert_schemas_to_strict: bool ) -> FunctionTool: """Convert an MCP tool to an Agents SDK function tool.""" invoke_func = functools.partial(cls.invoke_mcp_tool, server, tool) schema, is_strict = tool.inputSchema, False...
Convert an MCP tool to an Agents SDK function tool.
to_function_tool
python
openai/openai-agents-python
src/agents/mcp/util.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/util.py
MIT
async def invoke_mcp_tool( cls, server: "MCPServer", tool: "MCPTool", context: RunContextWrapper[Any], input_json: str ) -> str: """Invoke an MCP tool and return the result as a string.""" try: json_data: dict[str, Any] = json.loads(input_json) if input_json else {} excep...
Invoke an MCP tool and return the result as a string.
invoke_mcp_tool
python
openai/openai-agents-python
src/agents/mcp/util.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/mcp/util.py
MIT
def items_to_messages( cls, items: str | Iterable[TResponseInputItem], ) -> list[ChatCompletionMessageParam]: """ Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam. Rules: - EasyInputMessage or InputMessage (role=user) => ChatCompletionUs...
Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam. Rules: - EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam - EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam - EasyInputMessage or Inp...
items_to_messages
python
openai/openai-agents-python
src/agents/models/chatcmpl_converter.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/chatcmpl_converter.py
MIT
async def get_response( self, system_instructions: str | None, input: str | list[TResponseInputItem], model_settings: ModelSettings, tools: list[Tool], output_schema: AgentOutputSchemaBase | None, handoffs: list[Handoff], tracing: ModelTracing, *, ...
Get a response from the model. Args: system_instructions: The system instructions to use. input: The input items to the model, in OpenAI Responses format. model_settings: The model settings to use. tools: The tools available to the model. output_schem...
get_response
python
openai/openai-agents-python
src/agents/models/interface.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/interface.py
MIT
def stream_response( self, system_instructions: str | None, input: str | list[TResponseInputItem], model_settings: ModelSettings, tools: list[Tool], output_schema: AgentOutputSchemaBase | None, handoffs: list[Handoff], tracing: ModelTracing, *, ...
Stream a response from the model. Args: system_instructions: The system instructions to use. input: The input items to the model, in OpenAI Responses format. model_settings: The model settings to use. tools: The tools available to the model. output_sc...
stream_response
python
openai/openai-agents-python
src/agents/models/interface.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/interface.py
MIT
def get_model(self, model_name: str | None) -> Model: """Get a model by name. Args: model_name: The name of the model to get. Returns: The model. """
Get a model by name. Args: model_name: The name of the model to get. Returns: The model.
get_model
python
openai/openai-agents-python
src/agents/models/interface.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/interface.py
MIT
def __init__( self, *, provider_map: MultiProviderMap | None = None, openai_api_key: str | None = None, openai_base_url: str | None = None, openai_client: AsyncOpenAI | None = None, openai_organization: str | None = None, openai_project: str | None = None,...
Create a new OpenAI provider. Args: provider_map: A MultiProviderMap that maps prefixes to ModelProviders. If not provided, we will use a default mapping. See the documentation for this class to see the default mapping. openai_api_key: The API key to use ...
__init__
python
openai/openai-agents-python
src/agents/models/multi_provider.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/multi_provider.py
MIT
def get_model(self, model_name: str | None) -> Model: """Returns a Model based on the model name. The model name can have a prefix, ending with a "/", which will be used to look up the ModelProvider. If there is no prefix, we will use the OpenAI provider. Args: model_name: T...
Returns a Model based on the model name. The model name can have a prefix, ending with a "/", which will be used to look up the ModelProvider. If there is no prefix, we will use the OpenAI provider. Args: model_name: The name of the model to get. Returns: A Mode...
get_model
python
openai/openai-agents-python
src/agents/models/multi_provider.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/multi_provider.py
MIT
async def stream_response( self, system_instructions: str | None, input: str | list[TResponseInputItem], model_settings: ModelSettings, tools: list[Tool], output_schema: AgentOutputSchemaBase | None, handoffs: list[Handoff], tracing: ModelTracing, ...
Yields a partial message as it is generated, as well as the usage information.
stream_response
python
openai/openai-agents-python
src/agents/models/openai_chatcompletions.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/openai_chatcompletions.py
MIT
def __init__( self, *, api_key: str | None = None, base_url: str | None = None, openai_client: AsyncOpenAI | None = None, organization: str | None = None, project: str | None = None, use_responses: bool | None = None, ) -> None: """Create a new...
Create a new OpenAI provider. Args: api_key: The API key to use for the OpenAI client. If not provided, we will use the default API key. base_url: The base URL to use for the OpenAI client. If not provided, we will use the default base URL. op...
__init__
python
openai/openai-agents-python
src/agents/models/openai_provider.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/openai_provider.py
MIT
async def stream_response( self, system_instructions: str | None, input: str | list[TResponseInputItem], model_settings: ModelSettings, tools: list[Tool], output_schema: AgentOutputSchemaBase | None, handoffs: list[Handoff], tracing: ModelTracing, ...
Yields a partial message as it is generated, as well as the usage information.
stream_response
python
openai/openai-agents-python
src/agents/models/openai_responses.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/models/openai_responses.py
MIT
def trace( workflow_name: str, trace_id: str | None = None, group_id: str | None = None, metadata: dict[str, Any] | None = None, disabled: bool = False, ) -> Trace: """ Create a new trace. The trace will not be started automatically; you should either use it as a context manager (`with t...
Create a new trace. The trace will not be started automatically; you should either use it as a context manager (`with trace(...):`) or call `trace.start()` + `trace.finish()` manually. In addition to the workflow name and optional grouping identifier, you can provide an arbitrary metadata dictiona...
trace
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def agent_span( name: str, handoffs: list[str] | None = None, tools: list[str] | None = None, output_type: str | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[AgentSpanData]: """Create a new agent span. The span will n...
Create a new agent span. The span will not be started automatically, you should either do `with agent_span() ...` or call `span.start()` + `span.finish()` manually. Args: name: The name of the agent. handoffs: Optional list of agent names to which this agent could hand off control. tool...
agent_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def function_span( name: str, input: str | None = None, output: str | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[FunctionSpanData]: """Create a new function span. The span will not be started automatically, you should e...
Create a new function span. The span will not be started automatically, you should either do `with function_span() ...` or call `span.start()` + `span.finish()` manually. Args: name: The name of the function. input: The input to the function. output: The output of the function. ...
function_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def generation_span( input: Sequence[Mapping[str, Any]] | None = None, output: Sequence[Mapping[str, Any]] | None = None, model: str | None = None, model_config: Mapping[str, Any] | None = None, usage: dict[str, Any] | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None...
Create a new generation span. The span will not be started automatically, you should either do `with generation_span() ...` or call `span.start()` + `span.finish()` manually. This span captures the details of a model generation, including the input message sequence, any generated outputs, the model name an...
generation_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def response_span( response: Response | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[ResponseSpanData]: """Create a new response span. The span will not be started automatically, you should either do `with response_span() ......
Create a new response span. The span will not be started automatically, you should either do `with response_span() ...` or call `span.start()` + `span.finish()` manually. Args: response: The OpenAI Response object. span_id: The ID of the span. Optional. If not provided, we will generate an ID. ...
response_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def handoff_span( from_agent: str | None = None, to_agent: str | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[HandoffSpanData]: """Create a new handoff span. The span will not be started automatically, you should either do ...
Create a new handoff span. The span will not be started automatically, you should either do `with handoff_span() ...` or call `span.start()` + `span.finish()` manually. Args: from_agent: The name of the agent that is handing off. to_agent: The name of the agent that is receiving the handoff. ...
handoff_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def custom_span( name: str, data: dict[str, Any] | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[CustomSpanData]: """Create a new custom span, to which you can add your own metadata. The span will not be started automatica...
Create a new custom span, to which you can add your own metadata. The span will not be started automatically, you should either do `with custom_span() ...` or call `span.start()` + `span.finish()` manually. Args: name: The name of the custom span. data: Arbitrary structured data to associat...
custom_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def guardrail_span( name: str, triggered: bool = False, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[GuardrailSpanData]: """Create a new guardrail span. The span will not be started automatically, you should either do `with guardrail_...
Create a new guardrail span. The span will not be started automatically, you should either do `with guardrail_span() ...` or call `span.start()` + `span.finish()` manually. Args: name: The name of the guardrail. triggered: Whether the guardrail was triggered. span_id: The ID of the span...
guardrail_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def transcription_span( model: str | None = None, input: str | None = None, input_format: str | None = "pcm", output: str | None = None, model_config: Mapping[str, Any] | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[T...
Create a new transcription span. The span will not be started automatically, you should either do `with transcription_span() ...` or call `span.start()` + `span.finish()` manually. Args: model: The name of the model used for the speech-to-text. input: The audio input of the speech-to-text trans...
transcription_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def speech_span( model: str | None = None, input: str | None = None, output: str | None = None, output_format: str | None = "pcm", model_config: Mapping[str, Any] | None = None, first_content_at: str | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, ...
Create a new speech span. The span will not be started automatically, you should either do `with speech_span() ...` or call `span.start()` + `span.finish()` manually. Args: model: The name of the model used for the text-to-speech. input: The text input of the text-to-speech. output: The...
speech_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def speech_group_span( input: str | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[SpeechGroupSpanData]: """Create a new speech group span. The span will not be started automatically, you should either do `with speech_group_spa...
Create a new speech group span. The span will not be started automatically, you should either do `with speech_group_span() ...` or call `span.start()` + `span.finish()` manually. Args: input: The input text used for the speech request. span_id: The ID of the span. Optional. If not provided, we ...
speech_group_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def mcp_tools_span( server: str | None = None, result: list[str] | None = None, span_id: str | None = None, parent: Trace | Span[Any] | None = None, disabled: bool = False, ) -> Span[MCPListToolsSpanData]: """Create a new MCP list tools span. The span will not be started automatically, you shoul...
Create a new MCP list tools span. The span will not be started automatically, you should either do `with mcp_tools_span() ...` or call `span.start()` + `span.finish()` manually. Args: server: The name of the MCP server. result: The result of the MCP list tools call. span_id: The ID of t...
mcp_tools_span
python
openai/openai-agents-python
src/agents/tracing/create.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/create.py
MIT
def __init__( self, api_key: str | None = None, organization: str | None = None, project: str | None = None, endpoint: str = "https://api.openai.com/v1/traces/ingest", max_retries: int = 3, base_delay: float = 1.0, max_delay: float = 30.0, ): "...
Args: api_key: The API key for the "Authorization" header. Defaults to `os.environ["OPENAI_API_KEY"]` if not provided. organization: The OpenAI organization to use. Defaults to `os.environ["OPENAI_ORG_ID"]` if not provided. project: The OpenAI...
__init__
python
openai/openai-agents-python
src/agents/tracing/processors.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/processors.py
MIT
def set_api_key(self, api_key: str): """Set the OpenAI API key for the exporter. Args: api_key: The OpenAI API key to use. This is the same key used by the OpenAI Python client. """ # We're specifically setting the underlying cached property as well s...
Set the OpenAI API key for the exporter. Args: api_key: The OpenAI API key to use. This is the same key used by the OpenAI Python client.
set_api_key
python
openai/openai-agents-python
src/agents/tracing/processors.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/processors.py
MIT
def __init__( self, exporter: TracingExporter, max_queue_size: int = 8192, max_batch_size: int = 128, schedule_delay: float = 5.0, export_trigger_ratio: float = 0.7, ): """ Args: exporter: The exporter to use. max_queue_size: Th...
Args: exporter: The exporter to use. max_queue_size: The maximum number of spans to store in the queue. After this, we will start dropping spans. max_batch_size: The maximum number of spans to export in a single batch. schedule_delay: The delay be...
__init__
python
openai/openai-agents-python
src/agents/tracing/processors.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/processors.py
MIT
def shutdown(self, timeout: float | None = None): """ Called when the application stops. We signal our thread to stop, then join it. """ self._shutdown_event.set() # Only join if we ever started the background thread; otherwise flush synchronously. if self._worker_thread...
Called when the application stops. We signal our thread to stop, then join it.
shutdown
python
openai/openai-agents-python
src/agents/tracing/processors.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/processors.py
MIT
def _export_batches(self, force: bool = False): """Drains the queue and exports in batches. If force=True, export everything. Otherwise, export up to `max_batch_size` repeatedly until the queue is empty or below a certain threshold. """ while True: items_to_export: li...
Drains the queue and exports in batches. If force=True, export everything. Otherwise, export up to `max_batch_size` repeatedly until the queue is empty or below a certain threshold.
_export_batches
python
openai/openai-agents-python
src/agents/tracing/processors.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/processors.py
MIT
def add_tracing_processor(self, tracing_processor: TracingProcessor): """ Add a processor to the list of processors. Each processor will receive all traces/spans. """ with self._lock: self._processors += (tracing_processor,)
Add a processor to the list of processors. Each processor will receive all traces/spans.
add_tracing_processor
python
openai/openai-agents-python
src/agents/tracing/setup.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/setup.py
MIT
def set_processors(self, processors: list[TracingProcessor]): """ Set the list of processors. This will replace the current list of processors. """ with self._lock: self._processors = tuple(processors)
Set the list of processors. This will replace the current list of processors.
set_processors
python
openai/openai-agents-python
src/agents/tracing/setup.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/tracing/setup.py
MIT
def to_base64(self) -> str: """Returns the audio data as a base64 encoded string.""" if self.buffer.dtype == np.float32: # convert to int16 self.buffer = np.clip(self.buffer, -1.0, 1.0) self.buffer = (self.buffer * 32767).astype(np.int16) elif self.buffer.dtyp...
Returns the audio data as a base64 encoded string.
to_base64
python
openai/openai-agents-python
src/agents/voice/input.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/input.py
MIT
async def add_audio(self, audio: npt.NDArray[np.int16 | np.float32]): """Adds more audio data to the stream. Args: audio: The audio data to add. Must be a numpy array of int16 or float32. """ await self.queue.put(audio)
Adds more audio data to the stream. Args: audio: The audio data to add. Must be a numpy array of int16 or float32.
add_audio
python
openai/openai-agents-python
src/agents/voice/input.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/input.py
MIT
async def transcribe( self, input: AudioInput, settings: STTModelSettings, trace_include_sensitive_data: bool, trace_include_sensitive_audio_data: bool, ) -> str: """Given an audio input, produces a text transcription. Args: input: The audio input...
Given an audio input, produces a text transcription. Args: input: The audio input to transcribe. settings: The settings to use for the transcription. trace_include_sensitive_data: Whether to include sensitive data in traces. trace_include_sensitive_audio_data: Wh...
transcribe
python
openai/openai-agents-python
src/agents/voice/model.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/model.py
MIT
async def create_session( self, input: StreamedAudioInput, settings: STTModelSettings, trace_include_sensitive_data: bool, trace_include_sensitive_audio_data: bool, ) -> StreamedTranscriptionSession: """Creates a new transcription session, which you can push audio to,...
Creates a new transcription session, which you can push audio to, and receive a stream of text transcriptions. Args: input: The audio input to transcribe. settings: The settings to use for the transcription. trace_include_sensitive_data: Whether to include sensitive ...
create_session
python
openai/openai-agents-python
src/agents/voice/model.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/model.py
MIT
def __init__( self, *, workflow: VoiceWorkflowBase, stt_model: STTModel | str | None = None, tts_model: TTSModel | str | None = None, config: VoicePipelineConfig | None = None, ): """Create a new voice pipeline. Args: workflow: The workflo...
Create a new voice pipeline. Args: workflow: The workflow to run. See `VoiceWorkflowBase`. stt_model: The speech-to-text model to use. If not provided, a default OpenAI model will be used. tts_model: The text-to-speech model to use. If not provided, a default...
__init__
python
openai/openai-agents-python
src/agents/voice/pipeline.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/pipeline.py
MIT
async def run(self, audio_input: AudioInput | StreamedAudioInput) -> StreamedAudioResult: """Run the voice pipeline. Args: audio_input: The audio input to process. This can either be an `AudioInput` instance, which is a single static buffer, or a `StreamedAudioInput` instanc...
Run the voice pipeline. Args: audio_input: The audio input to process. This can either be an `AudioInput` instance, which is a single static buffer, or a `StreamedAudioInput` instance, which is a stream of audio data that you can append to. Returns: ...
run
python
openai/openai-agents-python
src/agents/voice/pipeline.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/pipeline.py
MIT
def __init__( self, tts_model: TTSModel, tts_settings: TTSModelSettings, voice_pipeline_config: VoicePipelineConfig, ): """Create a new `StreamedAudioResult` instance. Args: tts_model: The TTS model to use. tts_settings: The TTS settings to us...
Create a new `StreamedAudioResult` instance. Args: tts_model: The TTS model to use. tts_settings: The TTS settings to use. voice_pipeline_config: The voice pipeline config to use.
__init__
python
openai/openai-agents-python
src/agents/voice/result.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/result.py
MIT
async def stream(self) -> AsyncIterator[VoiceStreamEvent]: """Stream the events and audio data as they're generated.""" while True: try: event = await self._queue.get() except asyncio.CancelledError: break if isinstance(event, VoiceStre...
Stream the events and audio data as they're generated.
stream
python
openai/openai-agents-python
src/agents/voice/result.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/result.py
MIT
def get_sentence_based_splitter( min_sentence_length: int = 20, ) -> Callable[[str], tuple[str, str]]: """Returns a function that splits text into chunks based on sentence boundaries. Args: min_sentence_length: The minimum length of a sentence to be included in a chunk. Returns: A func...
Returns a function that splits text into chunks based on sentence boundaries. Args: min_sentence_length: The minimum length of a sentence to be included in a chunk. Returns: A function that splits text into chunks based on sentence boundaries.
get_sentence_based_splitter
python
openai/openai-agents-python
src/agents/voice/utils.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/utils.py
MIT
def sentence_based_text_splitter(text_buffer: str) -> tuple[str, str]: """ A function to split the text into chunks. This is useful if you want to split the text into chunks before sending it to the TTS model rather than waiting for the whole text to be processed. Args: ...
A function to split the text into chunks. This is useful if you want to split the text into chunks before sending it to the TTS model rather than waiting for the whole text to be processed. Args: text_buffer: The text to split. Returns: A tuple of the t...
sentence_based_text_splitter
python
openai/openai-agents-python
src/agents/voice/utils.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/utils.py
MIT
async def stream_text_from(cls, result: RunResultStreaming) -> AsyncIterator[str]: """Wraps a `RunResultStreaming` object and yields text events from the stream.""" async for event in result.stream_events(): if ( event.type == "raw_response_event" and event.da...
Wraps a `RunResultStreaming` object and yields text events from the stream.
stream_text_from
python
openai/openai-agents-python
src/agents/voice/workflow.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/workflow.py
MIT
def __init__(self, agent: Agent[Any], callbacks: SingleAgentWorkflowCallbacks | None = None): """Create a new single agent voice workflow. Args: agent: The agent to run. callbacks: Optional callbacks to call during the workflow. """ self._input_history: list[TRes...
Create a new single agent voice workflow. Args: agent: The agent to run. callbacks: Optional callbacks to call during the workflow.
__init__
python
openai/openai-agents-python
src/agents/voice/workflow.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/workflow.py
MIT
def __init__( self, *, api_key: str | None = None, base_url: str | None = None, openai_client: AsyncOpenAI | None = None, organization: str | None = None, project: str | None = None, ) -> None: """Create a new OpenAI voice model provider. Args...
Create a new OpenAI voice model provider. Args: api_key: The API key to use for the OpenAI client. If not provided, we will use the default API key. base_url: The base URL to use for the OpenAI client. If not provided, we will use the default base URL. ...
__init__
python
openai/openai-agents-python
src/agents/voice/models/openai_model_provider.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/models/openai_model_provider.py
MIT
async def _wait_for_event( event_queue: asyncio.Queue[dict[str, Any]], expected_types: list[str], timeout: float ): """ Wait for an event from event_queue whose type is in expected_types within the specified timeout. """ start_time = time.time() while True: remaining = timeout - (time.ti...
Wait for an event from event_queue whose type is in expected_types within the specified timeout.
_wait_for_event
python
openai/openai-agents-python
src/agents/voice/models/openai_stt.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/models/openai_stt.py
MIT
def __init__( self, model: str, openai_client: AsyncOpenAI, ): """Create a new OpenAI speech-to-text model. Args: model: The name of the model to use. openai_client: The OpenAI client to use. """ self.model = model self._client...
Create a new OpenAI speech-to-text model. Args: model: The name of the model to use. openai_client: The OpenAI client to use.
__init__
python
openai/openai-agents-python
src/agents/voice/models/openai_stt.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/models/openai_stt.py
MIT
async def transcribe( self, input: AudioInput, settings: STTModelSettings, trace_include_sensitive_data: bool, trace_include_sensitive_audio_data: bool, ) -> str: """Transcribe an audio input. Args: input: The audio input to transcribe. ...
Transcribe an audio input. Args: input: The audio input to transcribe. settings: The settings to use for the transcription. Returns: The transcribed text.
transcribe
python
openai/openai-agents-python
src/agents/voice/models/openai_stt.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/models/openai_stt.py
MIT
async def create_session( self, input: StreamedAudioInput, settings: STTModelSettings, trace_include_sensitive_data: bool, trace_include_sensitive_audio_data: bool, ) -> StreamedTranscriptionSession: """Create a new transcription session. Args: in...
Create a new transcription session. Args: input: The audio input to transcribe. settings: The settings to use for the transcription. trace_include_sensitive_data: Whether to include sensitive data in traces. trace_include_sensitive_audio_data: Whether to include ...
create_session
python
openai/openai-agents-python
src/agents/voice/models/openai_stt.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/models/openai_stt.py
MIT
def __init__( self, model: str, openai_client: AsyncOpenAI, ): """Create a new OpenAI text-to-speech model. Args: model: The name of the model to use. openai_client: The OpenAI client to use. """ self.model = model self._client...
Create a new OpenAI text-to-speech model. Args: model: The name of the model to use. openai_client: The OpenAI client to use.
__init__
python
openai/openai-agents-python
src/agents/voice/models/openai_tts.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/models/openai_tts.py
MIT
async def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]: """Run the text-to-speech model. Args: text: The text to convert to speech. settings: The settings to use for the text-to-speech model. Returns: An iterator of audio chunks. ...
Run the text-to-speech model. Args: text: The text to convert to speech. settings: The settings to use for the text-to-speech model. Returns: An iterator of audio chunks.
run
python
openai/openai-agents-python
src/agents/voice/models/openai_tts.py
https://github.com/openai/openai-agents-python/blob/master/src/agents/voice/models/openai_tts.py
MIT
async def test_previous_response_id_passed_between_runs(): """Test that previous_response_id is passed to the model on subsequent runs.""" model = FakeModel() model.set_next_output([get_text_message("done")]) agent = Agent(name="test", model=model) assert model.last_turn_args.get("previous_response...
Test that previous_response_id is passed to the model on subsequent runs.
test_previous_response_id_passed_between_runs
python
openai/openai-agents-python
tests/test_agent_runner.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_agent_runner.py
MIT
async def test_multi_turn_previous_response_id_passed_between_runs(): """Test that previous_response_id is passed to the model on subsequent runs.""" model = FakeModel() agent = Agent( name="test", model=model, tools=[get_function_tool("foo", "tool_result")], ) model.add_mu...
Test that previous_response_id is passed to the model on subsequent runs.
test_multi_turn_previous_response_id_passed_between_runs
python
openai/openai-agents-python
tests/test_agent_runner.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_agent_runner.py
MIT
async def test_previous_response_id_passed_between_runs_streamed(): """Test that previous_response_id is passed to the model on subsequent streamed runs.""" model = FakeModel() model.set_next_output([get_text_message("done")]) agent = Agent( name="test", model=model, ) assert mo...
Test that previous_response_id is passed to the model on subsequent streamed runs.
test_previous_response_id_passed_between_runs_streamed
python
openai/openai-agents-python
tests/test_agent_runner.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_agent_runner.py
MIT
async def test_previous_response_id_passed_between_runs_streamed_multi_turn(): """Test that previous_response_id is passed to the model on subsequent streamed runs.""" model = FakeModel() agent = Agent( name="test", model=model, tools=[get_function_tool("foo", "tool_result")], )...
Test that previous_response_id is passed to the model on subsequent streamed runs.
test_previous_response_id_passed_between_runs_streamed_multi_turn
python
openai/openai-agents-python
tests/test_agent_runner.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_agent_runner.py
MIT
async def test_dynamic_tool_addition_run() -> None: """Test that tools can be added to an agent during a run.""" model = FakeModel() executed: dict[str, bool] = {"called": False} agent = Agent(name="test", model=model, tool_use_behavior="run_llm_again") @function_tool(name_override="tool2") d...
Test that tools can be added to an agent during a run.
test_dynamic_tool_addition_run
python
openai/openai-agents-python
tests/test_agent_runner.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_agent_runner.py
MIT
async def test_get_screenshot_sync_executes_action_and_takes_screenshot( action: Any, expected_call: tuple[str, tuple[Any, ...]] ) -> None: """For each action type, assert that the corresponding computer method is invoked and that a screenshot is taken and returned.""" computer = LoggingComputer(screens...
For each action type, assert that the corresponding computer method is invoked and that a screenshot is taken and returned.
test_get_screenshot_sync_executes_action_and_takes_screenshot
python
openai/openai-agents-python
tests/test_computer_action.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_computer_action.py
MIT
async def test_get_screenshot_async_executes_action_and_takes_screenshot( action: Any, expected_call: tuple[str, tuple[Any, ...]] ) -> None: """For each action type on an `AsyncComputer`, the corresponding coroutine should be awaited and a screenshot taken.""" computer = LoggingAsyncComputer(screenshot_...
For each action type on an `AsyncComputer`, the corresponding coroutine should be awaited and a screenshot taken.
test_get_screenshot_async_executes_action_and_takes_screenshot
python
openai/openai-agents-python
tests/test_computer_action.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_computer_action.py
MIT
async def test_extra_headers_passed_to_openai_responses_model(): """ Ensure extra_headers in ModelSettings is passed to the OpenAIResponsesModel client. """ called_kwargs = {} class DummyResponses: async def create(self, **kwargs): nonlocal called_kwargs called_kwarg...
Ensure extra_headers in ModelSettings is passed to the OpenAIResponsesModel client.
test_extra_headers_passed_to_openai_responses_model
python
openai/openai-agents-python
tests/test_extra_headers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_extra_headers.py
MIT
async def test_extra_headers_passed_to_openai_client(): """ Ensure extra_headers in ModelSettings is passed to the OpenAI client. """ called_kwargs = {} class DummyCompletions: async def create(self, **kwargs): nonlocal called_kwargs called_kwargs = kwargs ...
Ensure extra_headers in ModelSettings is passed to the OpenAI client.
test_extra_headers_passed_to_openai_client
python
openai/openai-agents-python
tests/test_extra_headers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_extra_headers.py
MIT
def test_simple_function(): """Test a function that has simple typed parameters and defaults.""" func_schema = function_schema(simple_function) # Check that the JSON schema is a dictionary with title, type, etc. assert isinstance(func_schema.params_json_schema, dict) assert func_schema.params_json_...
Test a function that has simple typed parameters and defaults.
test_simple_function
python
openai/openai-agents-python
tests/test_function_schema.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_function_schema.py
MIT
def test_varargs_function(): """Test a function that uses *args and **kwargs.""" func_schema = function_schema(varargs_function, strict_json_schema=False) # Check JSON schema structure assert isinstance(func_schema.params_json_schema, dict) assert func_schema.params_json_schema.get("title") == "var...
Test a function that uses *args and **kwargs.
test_varargs_function
python
openai/openai-agents-python
tests/test_function_schema.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_function_schema.py
MIT
def test_schema_with_mapping_raises_strict_mode_error(): """A mapping type is not allowed in strict mode. Same for dicts. Ensure we raise a UserError.""" def func_with_mapping(test_one: Mapping[str, int]) -> str: return "foo" with pytest.raises(UserError): function_schema(func_with_mapping...
A mapping type is not allowed in strict mode. Same for dicts. Ensure we raise a UserError.
test_schema_with_mapping_raises_strict_mode_error
python
openai/openai-agents-python
tests/test_function_schema.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_function_schema.py
MIT
async def test_extract_descriptions_from_docstring(): """Ensure that we extract function and param descriptions from docstrings.""" tool = get_weather assert tool.description == "Get the weather for a given city." params_json_schema = tool.params_json_schema assert params_json_schema == snapshot( ...
Ensure that we extract function and param descriptions from docstrings.
test_extract_descriptions_from_docstring
python
openai/openai-agents-python
tests/test_function_tool_decorator.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_function_tool_decorator.py
MIT
def make_message( content_items: list[ResponseOutputText | ResponseOutputRefusal], ) -> ResponseOutputMessage: """ Helper to construct a ResponseOutputMessage with a single batch of content items, using a fixed id/status. """ return ResponseOutputMessage( id="msg123", content=con...
Helper to construct a ResponseOutputMessage with a single batch of content items, using a fixed id/status.
make_message
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
def test_text_message_outputs_across_list_of_runitems() -> None: """ Compose several RunItem instances, including a non-message run item, and ensure that only MessageOutputItem instances contribute any text. The non-message (ReasoningItem) should be ignored by Helpers.text_message_outputs. """ m...
Compose several RunItem instances, including a non-message run item, and ensure that only MessageOutputItem instances contribute any text. The non-message (ReasoningItem) should be ignored by Helpers.text_message_outputs.
test_text_message_outputs_across_list_of_runitems
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
def test_to_input_items_for_message() -> None: """An output message should convert into an input dict matching the message's own structure.""" content = ResponseOutputText(annotations=[], text="hello world", type="output_text") message = ResponseOutputMessage( id="m1", content=[content], role="assis...
An output message should convert into an input dict matching the message's own structure.
test_to_input_items_for_message
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
def test_to_input_items_for_function_call() -> None: """A function tool call output should produce the same dict as a function tool call input.""" tool_call = ResponseFunctionToolCall( id="f1", arguments="{}", call_id="c1", name="func", type="function_call" ) resp = ModelResponse(output=[tool_ca...
A function tool call output should produce the same dict as a function tool call input.
test_to_input_items_for_function_call
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
def test_to_input_items_for_file_search_call() -> None: """A file search tool call output should produce the same dict as a file search input.""" fs_call = ResponseFileSearchToolCall( id="fs1", queries=["query"], status="completed", type="file_search_call" ) resp = ModelResponse(output=[fs_call]...
A file search tool call output should produce the same dict as a file search input.
test_to_input_items_for_file_search_call
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
def test_to_input_items_for_web_search_call() -> None: """A web search tool call output should produce the same dict as a web search input.""" ws_call = ResponseFunctionWebSearch(id="w1", status="completed", type="web_search_call") resp = ModelResponse(output=[ws_call], usage=Usage(), response_id=None) ...
A web search tool call output should produce the same dict as a web search input.
test_to_input_items_for_web_search_call
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
def test_to_input_items_for_computer_call_click() -> None: """A computer call output should yield a dict whose shape matches the computer call input.""" action = ActionScreenshot(type="screenshot") comp_call = ResponseComputerToolCall( id="comp1", action=action, type="computer_call",...
A computer call output should yield a dict whose shape matches the computer call input.
test_to_input_items_for_computer_call_click
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
def test_to_input_items_for_reasoning() -> None: """A reasoning output should produce the same dict as a reasoning input item.""" rc = Summary(text="why", type="summary_text") reasoning = ResponseReasoningItem(id="rid1", summary=[rc], type="reasoning") resp = ModelResponse(output=[reasoning], usage=Usag...
A reasoning output should produce the same dict as a reasoning input item.
test_to_input_items_for_reasoning
python
openai/openai-agents-python
tests/test_items_helpers.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_items_helpers.py
MIT
async def test_get_response_with_text_message(monkeypatch) -> None: """ When the model returns a ChatCompletionMessage with plain text content, `get_response` should produce a single `ResponseOutputMessage` containing a `ResponseOutputText` with that content, and a `Usage` populated from the complet...
When the model returns a ChatCompletionMessage with plain text content, `get_response` should produce a single `ResponseOutputMessage` containing a `ResponseOutputText` with that content, and a `Usage` populated from the completion's usage.
test_get_response_with_text_message
python
openai/openai-agents-python
tests/test_openai_chatcompletions.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions.py
MIT
async def test_get_response_with_refusal(monkeypatch) -> None: """ When the model returns a ChatCompletionMessage with a `refusal` instead of normal `content`, `get_response` should produce a single `ResponseOutputMessage` containing a `ResponseOutputRefusal` part. """ msg = ChatCompletionMessag...
When the model returns a ChatCompletionMessage with a `refusal` instead of normal `content`, `get_response` should produce a single `ResponseOutputMessage` containing a `ResponseOutputRefusal` part.
test_get_response_with_refusal
python
openai/openai-agents-python
tests/test_openai_chatcompletions.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions.py
MIT
async def test_get_response_with_tool_call(monkeypatch) -> None: """ If the ChatCompletionMessage includes one or more tool_calls, `get_response` should append corresponding `ResponseFunctionToolCall` items after the assistant message item with matching name/arguments. """ tool_call = ChatComple...
If the ChatCompletionMessage includes one or more tool_calls, `get_response` should append corresponding `ResponseFunctionToolCall` items after the assistant message item with matching name/arguments.
test_get_response_with_tool_call
python
openai/openai-agents-python
tests/test_openai_chatcompletions.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions.py
MIT
async def test_get_response_with_no_message(monkeypatch) -> None: """If the model returns no message, get_response should return an empty output.""" msg = ChatCompletionMessage(role="assistant", content="ignored") choice = Choice(index=0, finish_reason="content_filter", message=msg) choice.message = Non...
If the model returns no message, get_response should return an empty output.
test_get_response_with_no_message
python
openai/openai-agents-python
tests/test_openai_chatcompletions.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions.py
MIT
async def test_fetch_response_non_stream(monkeypatch) -> None: """ Verify that `_fetch_response` builds the correct OpenAI API call when not streaming and returns the ChatCompletion object directly. We supply a dummy ChatCompletion through a stubbed OpenAI client and inspect the captured kwargs. ...
Verify that `_fetch_response` builds the correct OpenAI API call when not streaming and returns the ChatCompletion object directly. We supply a dummy ChatCompletion through a stubbed OpenAI client and inspect the captured kwargs.
test_fetch_response_non_stream
python
openai/openai-agents-python
tests/test_openai_chatcompletions.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions.py
MIT
def test_store_param(): """Should default to True for OpenAI API calls, and False otherwise.""" model_settings = ModelSettings() client = AsyncOpenAI() assert ChatCmplHelpers.get_store_param(client, model_settings) is True, ( "Should default to True for OpenAI API calls" ) model_settin...
Should default to True for OpenAI API calls, and False otherwise.
test_store_param
python
openai/openai-agents-python
tests/test_openai_chatcompletions.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions.py
MIT
def test_message_to_output_items_with_text_only(): """ Make sure a simple ChatCompletionMessage with string content is converted into a single ResponseOutputMessage containing one ResponseOutputText. """ msg = ChatCompletionMessage(role="assistant", content="Hello") items = Converter.message_to_...
Make sure a simple ChatCompletionMessage with string content is converted into a single ResponseOutputMessage containing one ResponseOutputText.
test_message_to_output_items_with_text_only
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT
def test_message_to_output_items_with_refusal(): """ Make sure a message with a refusal string produces a ResponseOutputMessage with a ResponseOutputRefusal content part. """ msg = ChatCompletionMessage(role="assistant", refusal="I'm sorry") items = Converter.message_to_output_items(msg) ass...
Make sure a message with a refusal string produces a ResponseOutputMessage with a ResponseOutputRefusal content part.
test_message_to_output_items_with_refusal
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT
def test_message_to_output_items_with_tool_call(): """ If the ChatCompletionMessage contains one or more tool_calls, they should be reflected as separate `ResponseFunctionToolCall` items appended after the message item. """ tool_call = ChatCompletionMessageToolCall( id="tool1", t...
If the ChatCompletionMessage contains one or more tool_calls, they should be reflected as separate `ResponseFunctionToolCall` items appended after the message item.
test_message_to_output_items_with_tool_call
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT
def test_items_to_messages_with_string_user_content(): """ A simple string as the items argument should be converted into a user message param dict with the same content. """ result = Converter.items_to_messages("Ask me anything") assert isinstance(result, list) assert len(result) == 1 m...
A simple string as the items argument should be converted into a user message param dict with the same content.
test_items_to_messages_with_string_user_content
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT
def test_items_to_messages_with_easy_input_message(): """ Given an easy input message dict (just role/content), the converter should produce the appropriate ChatCompletionMessageParam with the same content. """ items: list[TResponseInputItem] = [ { "role": "user", "co...
Given an easy input message dict (just role/content), the converter should produce the appropriate ChatCompletionMessageParam with the same content.
test_items_to_messages_with_easy_input_message
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT
def test_items_to_messages_with_output_message_and_function_call(): """ Given a sequence of one ResponseOutputMessageParam followed by a ResponseFunctionToolCallParam, the converter should produce a single ChatCompletionAssistantMessageParam that includes both the assistant's textual content and a p...
Given a sequence of one ResponseOutputMessageParam followed by a ResponseFunctionToolCallParam, the converter should produce a single ChatCompletionAssistantMessageParam that includes both the assistant's textual content and a populated `tool_calls` reflecting the function call.
test_items_to_messages_with_output_message_and_function_call
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT
def test_convert_tool_choice_handles_standard_and_named_options() -> None: """ The `Converter.convert_tool_choice` method should return NOT_GIVEN if no choice is provided, pass through values like "auto", "required", or "none" unchanged, and translate any other string into a function selection dict....
The `Converter.convert_tool_choice` method should return NOT_GIVEN if no choice is provided, pass through values like "auto", "required", or "none" unchanged, and translate any other string into a function selection dict.
test_convert_tool_choice_handles_standard_and_named_options
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT
def test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas() -> None: """ The `Converter.convert_response_format` method should return NOT_GIVEN when no output schema is provided or if the output schema indicates plain text. For structured output schemas, it should return a d...
The `Converter.convert_response_format` method should return NOT_GIVEN when no output schema is provided or if the output schema indicates plain text. For structured output schemas, it should return a dict with type `json_schema` and include the generated JSON schema and strict flag from the provid...
test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas
python
openai/openai-agents-python
tests/test_openai_chatcompletions_converter.py
https://github.com/openai/openai-agents-python/blob/master/tests/test_openai_chatcompletions_converter.py
MIT