sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
openai/openai-python:src/openai/types/realtime/realtime_mcp_approval_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeMcpApprovalResponse"]
class RealtimeMcpApprovalResponse(BaseModel):
"""A Realtime item responding to an MCP approval request."""
id: str
"""The unique ID of the approval response."""
approval_request_id: str
"""The ID of the approval request being answered."""
approve: bool
"""Whether the request was approved."""
type: Literal["mcp_approval_response"]
"""The type of the item. Always `mcp_approval_response`."""
reason: Optional[str] = None
"""Optional reason for the decision."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_approval_response.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_approval_response_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeMcpApprovalResponseParam"]
class RealtimeMcpApprovalResponseParam(TypedDict, total=False):
"""A Realtime item responding to an MCP approval request."""
id: Required[str]
"""The unique ID of the approval response."""
approval_request_id: Required[str]
"""The ID of the approval request being answered."""
approve: Required[bool]
"""Whether the request was approved."""
type: Required[Literal["mcp_approval_response"]]
"""The type of the item. Always `mcp_approval_response`."""
reason: Optional[str]
"""Optional reason for the decision."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_approval_response_param.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_list_tools.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeMcpListTools", "Tool"]
class Tool(BaseModel):
"""A tool available on an MCP server."""
input_schema: object
"""The JSON schema describing the tool's input."""
name: str
"""The name of the tool."""
annotations: Optional[object] = None
"""Additional annotations about the tool."""
description: Optional[str] = None
"""The description of the tool."""
class RealtimeMcpListTools(BaseModel):
"""A Realtime item listing tools available on an MCP server."""
server_label: str
"""The label of the MCP server."""
tools: List[Tool]
"""The tools available on the server."""
type: Literal["mcp_list_tools"]
"""The type of the item. Always `mcp_list_tools`."""
id: Optional[str] = None
"""The unique ID of the list."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_list_tools.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_list_tools_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeMcpListToolsParam", "Tool"]
class Tool(TypedDict, total=False):
"""A tool available on an MCP server."""
input_schema: Required[object]
"""The JSON schema describing the tool's input."""
name: Required[str]
"""The name of the tool."""
annotations: Optional[object]
"""Additional annotations about the tool."""
description: Optional[str]
"""The description of the tool."""
class RealtimeMcpListToolsParam(TypedDict, total=False):
"""A Realtime item listing tools available on an MCP server."""
server_label: Required[str]
"""The label of the MCP server."""
tools: Required[Iterable[Tool]]
"""The tools available on the server."""
type: Required[Literal["mcp_list_tools"]]
"""The type of the item. Always `mcp_list_tools`."""
id: str
"""The unique ID of the list."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_list_tools_param.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_protocol_error.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeMcpProtocolError"]
class RealtimeMcpProtocolError(BaseModel):
code: int
message: str
type: Literal["protocol_error"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_protocol_error.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_protocol_error_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeMcpProtocolErrorParam"]
class RealtimeMcpProtocolErrorParam(TypedDict, total=False):
code: Required[int]
message: Required[str]
type: Required[Literal["protocol_error"]]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_protocol_error_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_tool_call.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .realtime_mcphttp_error import RealtimeMcphttpError
from .realtime_mcp_protocol_error import RealtimeMcpProtocolError
from .realtime_mcp_tool_execution_error import RealtimeMcpToolExecutionError
__all__ = ["RealtimeMcpToolCall", "Error"]
Error: TypeAlias = Annotated[
Union[RealtimeMcpProtocolError, RealtimeMcpToolExecutionError, RealtimeMcphttpError, None],
PropertyInfo(discriminator="type"),
]
class RealtimeMcpToolCall(BaseModel):
"""A Realtime item representing an invocation of a tool on an MCP server."""
id: str
"""The unique ID of the tool call."""
arguments: str
"""A JSON string of the arguments passed to the tool."""
name: str
"""The name of the tool that was run."""
server_label: str
"""The label of the MCP server running the tool."""
type: Literal["mcp_call"]
"""The type of the item. Always `mcp_call`."""
approval_request_id: Optional[str] = None
"""The ID of an associated approval request, if any."""
error: Optional[Error] = None
"""The error from the tool call, if any."""
output: Optional[str] = None
"""The output from the tool call."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_tool_call.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_tool_call_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .realtime_mcphttp_error_param import RealtimeMcphttpErrorParam
from .realtime_mcp_protocol_error_param import RealtimeMcpProtocolErrorParam
from .realtime_mcp_tool_execution_error_param import RealtimeMcpToolExecutionErrorParam
__all__ = ["RealtimeMcpToolCallParam", "Error"]
Error: TypeAlias = Union[RealtimeMcpProtocolErrorParam, RealtimeMcpToolExecutionErrorParam, RealtimeMcphttpErrorParam]
class RealtimeMcpToolCallParam(TypedDict, total=False):
"""A Realtime item representing an invocation of a tool on an MCP server."""
id: Required[str]
"""The unique ID of the tool call."""
arguments: Required[str]
"""A JSON string of the arguments passed to the tool."""
name: Required[str]
"""The name of the tool that was run."""
server_label: Required[str]
"""The label of the MCP server running the tool."""
type: Required[Literal["mcp_call"]]
"""The type of the item. Always `mcp_call`."""
approval_request_id: Optional[str]
"""The ID of an associated approval request, if any."""
error: Optional[Error]
"""The error from the tool call, if any."""
output: Optional[str]
"""The output from the tool call."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_tool_call_param.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_tool_execution_error.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeMcpToolExecutionError"]
class RealtimeMcpToolExecutionError(BaseModel):
message: str
type: Literal["tool_execution_error"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_tool_execution_error.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_tool_execution_error_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeMcpToolExecutionErrorParam"]
class RealtimeMcpToolExecutionErrorParam(TypedDict, total=False):
message: Required[str]
type: Required[Literal["tool_execution_error"]]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_tool_execution_error_param.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcphttp_error.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeMcphttpError"]
class RealtimeMcphttpError(BaseModel):
code: int
message: str
type: Literal["http_error"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcphttp_error.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcphttp_error_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeMcphttpErrorParam"]
class RealtimeMcphttpErrorParam(TypedDict, total=False):
code: Required[int]
message: Required[str]
type: Required[Literal["http_error"]]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcphttp_error_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from ..shared.metadata import Metadata
from .conversation_item import ConversationItem
from .realtime_audio_formats import RealtimeAudioFormats
from .realtime_response_usage import RealtimeResponseUsage
from .realtime_response_status import RealtimeResponseStatus
__all__ = ["RealtimeResponse", "Audio", "AudioOutput"]
class AudioOutput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the output audio."""
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None
] = None
"""The voice the model uses to respond.
Voice cannot be changed during the session once the model has responded with
audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
`coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend
`marin` and `cedar` for best quality.
"""
class Audio(BaseModel):
"""Configuration for audio output."""
output: Optional[AudioOutput] = None
class RealtimeResponse(BaseModel):
"""The response resource."""
id: Optional[str] = None
"""The unique ID of the response, will look like `resp_1234`."""
audio: Optional[Audio] = None
"""Configuration for audio output."""
conversation_id: Optional[str] = None
"""
Which conversation the response is added to, determined by the `conversation`
field in the `response.create` event. If `auto`, the response will be added to
the default conversation and the value of `conversation_id` will be an id like
`conv_1234`. If `none`, the response will not be added to any conversation and
the value of `conversation_id` will be `null`. If responses are being triggered
automatically by VAD the response will be added to the default conversation
"""
max_output_tokens: Union[int, Literal["inf"], None] = None
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls, that was used in this response.
"""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
object: Optional[Literal["realtime.response"]] = None
"""The object type, must be `realtime.response`."""
output: Optional[List[ConversationItem]] = None
"""The list of output items generated by the response."""
output_modalities: Optional[List[Literal["text", "audio"]]] = None
"""
The set of modalities the model used to respond, currently the only possible
values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text
transcript. Setting the output to mode `text` will disable audio output from the
model.
"""
status: Optional[Literal["completed", "cancelled", "failed", "incomplete", "in_progress"]] = None
"""
The final status of the response (`completed`, `cancelled`, `failed`, or
`incomplete`, `in_progress`).
"""
status_details: Optional[RealtimeResponseStatus] = None
"""Additional details about the status."""
usage: Optional[RealtimeResponseUsage] = None
"""Usage statistics for the Response, this will correspond to billing.
A Realtime API session will maintain a conversation context and append new Items
to the Conversation, thus output from previous turns (text and audio tokens)
will become the input for later turns.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_status.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeResponseStatus", "Error"]
class Error(BaseModel):
"""
A description of the error that caused the response to fail,
populated when the `status` is `failed`.
"""
code: Optional[str] = None
"""Error code, if any."""
type: Optional[str] = None
"""The type of error."""
class RealtimeResponseStatus(BaseModel):
"""Additional details about the status."""
error: Optional[Error] = None
"""
A description of the error that caused the response to fail, populated when the
`status` is `failed`.
"""
reason: Optional[Literal["turn_detected", "client_cancelled", "max_output_tokens", "content_filter"]] = None
"""The reason the Response did not complete.
For a `cancelled` Response, one of `turn_detected` (the server VAD detected a
new start of speech) or `client_cancelled` (the client sent a cancel event). For
an `incomplete` Response, one of `max_output_tokens` or `content_filter` (the
server-side safety filter activated and cut off the response).
"""
type: Optional[Literal["completed", "cancelled", "incomplete", "failed"]] = None
"""
The type of error that caused the response to fail, corresponding with the
`status` field (`completed`, `cancelled`, `incomplete`, `failed`).
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_status.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_usage.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .realtime_response_usage_input_token_details import RealtimeResponseUsageInputTokenDetails
from .realtime_response_usage_output_token_details import RealtimeResponseUsageOutputTokenDetails
__all__ = ["RealtimeResponseUsage"]
class RealtimeResponseUsage(BaseModel):
"""Usage statistics for the Response, this will correspond to billing.
A
Realtime API session will maintain a conversation context and append new
Items to the Conversation, thus output from previous turns (text and
audio tokens) will become the input for later turns.
"""
input_token_details: Optional[RealtimeResponseUsageInputTokenDetails] = None
"""Details about the input tokens used in the Response.
Cached tokens are tokens from previous turns in the conversation that are
included as context for the current response. Cached tokens here are counted as
a subset of input tokens, meaning input tokens will include cached and uncached
tokens.
"""
input_tokens: Optional[int] = None
"""
The number of input tokens used in the Response, including text and audio
tokens.
"""
output_token_details: Optional[RealtimeResponseUsageOutputTokenDetails] = None
"""Details about the output tokens used in the Response."""
output_tokens: Optional[int] = None
"""
The number of output tokens sent in the Response, including text and audio
tokens.
"""
total_tokens: Optional[int] = None
"""
The total number of tokens in the Response including input and output text and
audio tokens.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_usage.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_usage_input_token_details.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
__all__ = ["RealtimeResponseUsageInputTokenDetails", "CachedTokensDetails"]
class CachedTokensDetails(BaseModel):
"""Details about the cached tokens used as input for the Response."""
audio_tokens: Optional[int] = None
"""The number of cached audio tokens used as input for the Response."""
image_tokens: Optional[int] = None
"""The number of cached image tokens used as input for the Response."""
text_tokens: Optional[int] = None
"""The number of cached text tokens used as input for the Response."""
class RealtimeResponseUsageInputTokenDetails(BaseModel):
"""Details about the input tokens used in the Response.
Cached tokens are tokens from previous turns in the conversation that are included as context for the current response. Cached tokens here are counted as a subset of input tokens, meaning input tokens will include cached and uncached tokens.
"""
audio_tokens: Optional[int] = None
"""The number of audio tokens used as input for the Response."""
cached_tokens: Optional[int] = None
"""The number of cached tokens used as input for the Response."""
cached_tokens_details: Optional[CachedTokensDetails] = None
"""Details about the cached tokens used as input for the Response."""
image_tokens: Optional[int] = None
"""The number of image tokens used as input for the Response."""
text_tokens: Optional[int] = None
"""The number of text tokens used as input for the Response."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_usage_input_token_details.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_usage_output_token_details.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
__all__ = ["RealtimeResponseUsageOutputTokenDetails"]
class RealtimeResponseUsageOutputTokenDetails(BaseModel):
"""Details about the output tokens used in the Response."""
audio_tokens: Optional[int] = None
"""The number of audio tokens used in the Response."""
text_tokens: Optional[int] = None
"""The number of text tokens used in the Response."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_usage_output_token_details.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_server_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .conversation_item import ConversationItem
from .response_done_event import ResponseDoneEvent
from .realtime_error_event import RealtimeErrorEvent
from .mcp_list_tools_failed import McpListToolsFailed
from .session_created_event import SessionCreatedEvent
from .session_updated_event import SessionUpdatedEvent
from .conversation_item_done import ConversationItemDone
from .response_created_event import ResponseCreatedEvent
from .conversation_item_added import ConversationItemAdded
from .mcp_list_tools_completed import McpListToolsCompleted
from .response_mcp_call_failed import ResponseMcpCallFailed
from .response_text_done_event import ResponseTextDoneEvent
from .rate_limits_updated_event import RateLimitsUpdatedEvent
from .response_audio_done_event import ResponseAudioDoneEvent
from .response_text_delta_event import ResponseTextDeltaEvent
from .conversation_created_event import ConversationCreatedEvent
from .mcp_list_tools_in_progress import McpListToolsInProgress
from .response_audio_delta_event import ResponseAudioDeltaEvent
from .response_mcp_call_completed import ResponseMcpCallCompleted
from .response_mcp_call_in_progress import ResponseMcpCallInProgress
from .conversation_item_created_event import ConversationItemCreatedEvent
from .conversation_item_deleted_event import ConversationItemDeletedEvent
from .response_output_item_done_event import ResponseOutputItemDoneEvent
from .input_audio_buffer_cleared_event import InputAudioBufferClearedEvent
from .response_content_part_done_event import ResponseContentPartDoneEvent
from .response_mcp_call_arguments_done import ResponseMcpCallArgumentsDone
from .response_output_item_added_event import ResponseOutputItemAddedEvent
from .conversation_item_truncated_event import ConversationItemTruncatedEvent
from .response_content_part_added_event import ResponseContentPartAddedEvent
from .response_mcp_call_arguments_delta import ResponseMcpCallArgumentsDelta
from .input_audio_buffer_committed_event import InputAudioBufferCommittedEvent
from .input_audio_buffer_timeout_triggered import InputAudioBufferTimeoutTriggered
from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent
from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent
from .input_audio_buffer_speech_started_event import InputAudioBufferSpeechStartedEvent
from .input_audio_buffer_speech_stopped_event import InputAudioBufferSpeechStoppedEvent
from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
from .input_audio_buffer_dtmf_event_received_event import InputAudioBufferDtmfEventReceivedEvent
from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
from .conversation_item_input_audio_transcription_segment import ConversationItemInputAudioTranscriptionSegment
from .conversation_item_input_audio_transcription_delta_event import ConversationItemInputAudioTranscriptionDeltaEvent
from .conversation_item_input_audio_transcription_failed_event import ConversationItemInputAudioTranscriptionFailedEvent
from .conversation_item_input_audio_transcription_completed_event import (
ConversationItemInputAudioTranscriptionCompletedEvent,
)
__all__ = [
"RealtimeServerEvent",
"ConversationItemRetrieved",
"OutputAudioBufferStarted",
"OutputAudioBufferStopped",
"OutputAudioBufferCleared",
]
class ConversationItemRetrieved(BaseModel):
"""Returned when a conversation item is retrieved with `conversation.item.retrieve`.
This is provided as a way to fetch the server's representation of an item, for example to get access to the post-processed audio data after noise cancellation and VAD. It includes the full content of the Item, including audio data.
"""
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
type: Literal["conversation.item.retrieved"]
"""The event type, must be `conversation.item.retrieved`."""
class OutputAudioBufferStarted(BaseModel):
"""
**WebRTC/SIP Only:** Emitted when the server begins streaming audio to the client. This event is
emitted after an audio content part has been added (`response.content_part.added`)
to the response.
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
event_id: str
"""The unique ID of the server event."""
response_id: str
"""The unique ID of the response that produced the audio."""
type: Literal["output_audio_buffer.started"]
"""The event type, must be `output_audio_buffer.started`."""
class OutputAudioBufferStopped(BaseModel):
"""
**WebRTC/SIP Only:** Emitted when the output audio buffer has been completely drained on the server,
and no more audio is forthcoming. This event is emitted after the full response
data has been sent to the client (`response.done`).
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
event_id: str
"""The unique ID of the server event."""
response_id: str
"""The unique ID of the response that produced the audio."""
type: Literal["output_audio_buffer.stopped"]
"""The event type, must be `output_audio_buffer.stopped`."""
class OutputAudioBufferCleared(BaseModel):
"""**WebRTC/SIP Only:** Emitted when the output audio buffer is cleared.
This happens either in VAD
mode when the user has interrupted (`input_audio_buffer.speech_started`),
or when the client has emitted the `output_audio_buffer.clear` event to manually
cut off the current audio response.
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
event_id: str
"""The unique ID of the server event."""
response_id: str
"""The unique ID of the response that produced the audio."""
type: Literal["output_audio_buffer.cleared"]
"""The event type, must be `output_audio_buffer.cleared`."""
RealtimeServerEvent: TypeAlias = Annotated[
Union[
ConversationCreatedEvent,
ConversationItemCreatedEvent,
ConversationItemDeletedEvent,
ConversationItemInputAudioTranscriptionCompletedEvent,
ConversationItemInputAudioTranscriptionDeltaEvent,
ConversationItemInputAudioTranscriptionFailedEvent,
ConversationItemRetrieved,
ConversationItemTruncatedEvent,
RealtimeErrorEvent,
InputAudioBufferClearedEvent,
InputAudioBufferCommittedEvent,
InputAudioBufferDtmfEventReceivedEvent,
InputAudioBufferSpeechStartedEvent,
InputAudioBufferSpeechStoppedEvent,
RateLimitsUpdatedEvent,
ResponseAudioDeltaEvent,
ResponseAudioDoneEvent,
ResponseAudioTranscriptDeltaEvent,
ResponseAudioTranscriptDoneEvent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseCreatedEvent,
ResponseDoneEvent,
ResponseFunctionCallArgumentsDeltaEvent,
ResponseFunctionCallArgumentsDoneEvent,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
SessionCreatedEvent,
SessionUpdatedEvent,
OutputAudioBufferStarted,
OutputAudioBufferStopped,
OutputAudioBufferCleared,
ConversationItemAdded,
ConversationItemDone,
InputAudioBufferTimeoutTriggered,
ConversationItemInputAudioTranscriptionSegment,
McpListToolsInProgress,
McpListToolsCompleted,
McpListToolsFailed,
ResponseMcpCallArgumentsDelta,
ResponseMcpCallArgumentsDone,
ResponseMcpCallInProgress,
ResponseMcpCallCompleted,
ResponseMcpCallFailed,
],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_server_event.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_session_create_request.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_truncation import RealtimeTruncation
from .realtime_audio_config import RealtimeAudioConfig
from .realtime_tools_config import RealtimeToolsConfig
from .realtime_tracing_config import RealtimeTracingConfig
from ..responses.response_prompt import ResponsePrompt
from .realtime_tool_choice_config import RealtimeToolChoiceConfig
__all__ = ["RealtimeSessionCreateRequest"]
class RealtimeSessionCreateRequest(BaseModel):
"""Realtime session object configuration."""
type: Literal["realtime"]
"""The type of session to create. Always `realtime` for the Realtime API."""
audio: Optional[RealtimeAudioConfig] = None
"""Configuration for input and output audio."""
include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
instructions: Optional[str] = None
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
"""
max_output_tokens: Union[int, Literal["inf"], None] = None
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
None,
] = None
"""The Realtime model used for this session."""
output_modalities: Optional[List[Literal["text", "audio"]]] = None
"""The set of modalities the model can respond with.
It defaults to `["audio"]`, indicating that the model will respond with audio
plus a transcript. `["text"]` can be used to make the model respond with text
only. It is not possible to request both `text` and `audio` at the same time.
"""
prompt: Optional[ResponsePrompt] = None
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
tool_choice: Optional[RealtimeToolChoiceConfig] = None
"""How the model chooses tools.
Provide one of the string modes or force a specific function/MCP tool.
"""
tools: Optional[RealtimeToolsConfig] = None
"""Tools available to the model."""
tracing: Optional[RealtimeTracingConfig] = None
"""
Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
"""
truncation: Optional[RealtimeTruncation] = None
"""
When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_session_create_request.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_session_create_request_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Optional
from typing_extensions import Literal, Required, TypedDict
from .realtime_truncation_param import RealtimeTruncationParam
from .realtime_audio_config_param import RealtimeAudioConfigParam
from .realtime_tools_config_param import RealtimeToolsConfigParam
from .realtime_tracing_config_param import RealtimeTracingConfigParam
from ..responses.response_prompt_param import ResponsePromptParam
from .realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam
__all__ = ["RealtimeSessionCreateRequestParam"]
class RealtimeSessionCreateRequestParam(TypedDict, total=False):
"""Realtime session object configuration."""
type: Required[Literal["realtime"]]
"""The type of session to create. Always `realtime` for the Realtime API."""
audio: RealtimeAudioConfigParam
"""Configuration for input and output audio."""
include: List[Literal["item.input_audio_transcription.logprobs"]]
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
instructions: str
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
"""
max_output_tokens: Union[int, Literal["inf"]]
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
]
"""The Realtime model used for this session."""
output_modalities: List[Literal["text", "audio"]]
"""The set of modalities the model can respond with.
It defaults to `["audio"]`, indicating that the model will respond with audio
plus a transcript. `["text"]` can be used to make the model respond with text
only. It is not possible to request both `text` and `audio` at the same time.
"""
prompt: Optional[ResponsePromptParam]
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
tool_choice: RealtimeToolChoiceConfigParam
"""How the model chooses tools.
Provide one of the string modes or force a specific function/MCP tool.
"""
tools: RealtimeToolsConfigParam
"""Tools available to the model."""
tracing: Optional[RealtimeTracingConfigParam]
"""
Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
"""
truncation: RealtimeTruncationParam
"""
When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_session_create_request_param.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_session_create_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .audio_transcription import AudioTranscription
from .realtime_truncation import RealtimeTruncation
from .noise_reduction_type import NoiseReductionType
from .realtime_audio_formats import RealtimeAudioFormats
from .realtime_function_tool import RealtimeFunctionTool
from ..responses.response_prompt import ResponsePrompt
from ..responses.tool_choice_mcp import ToolChoiceMcp
from ..responses.tool_choice_options import ToolChoiceOptions
from .realtime_session_client_secret import RealtimeSessionClientSecret
from ..responses.tool_choice_function import ToolChoiceFunction
__all__ = [
"RealtimeSessionCreateResponse",
"Audio",
"AudioInput",
"AudioInputNoiseReduction",
"AudioInputTurnDetection",
"AudioInputTurnDetectionServerVad",
"AudioInputTurnDetectionSemanticVad",
"AudioOutput",
"ToolChoice",
"Tool",
"ToolMcpTool",
"ToolMcpToolAllowedTools",
"ToolMcpToolAllowedToolsMcpToolFilter",
"ToolMcpToolRequireApproval",
"ToolMcpToolRequireApprovalMcpToolApprovalFilter",
"ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways",
"ToolMcpToolRequireApprovalMcpToolApprovalFilterNever",
"Tracing",
"TracingTracingConfiguration",
]
class AudioInputNoiseReduction(BaseModel):
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off.
Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model.
Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio.
"""
type: Optional[NoiseReductionType] = None
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
class AudioInputTurnDetectionServerVad(BaseModel):
"""
Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence.
"""
type: Literal["server_vad"]
"""Type of turn detection, `server_vad` to turn on simple Server VAD."""
create_response: Optional[bool] = None
"""Whether or not to automatically generate a response when a VAD stop event
occurs.
If `interrupt_response` is set to `false` this may fail to create a response if
the model is already responding.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
idle_timeout_ms: Optional[int] = None
"""Optional timeout after which a model response will be triggered automatically.
This is useful for situations in which a long pause from the user is unexpected,
such as a phone call. The model will effectively prompt the user to continue the
conversation based on the current context.
The timeout value will be applied after the last model response's audio has
finished playing, i.e. it's set to the `response.done` time plus audio playback
duration.
An `input_audio_buffer.timeout_triggered` event (plus events associated with the
Response) will be emitted when the timeout is reached. Idle timeout is currently
only supported for `server_vad` mode.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt (cancel) any ongoing response with
output to the default conversation (i.e. `conversation` of `auto`) when a VAD
start event occurs. If `true` then the response will be cancelled, otherwise it
will continue until complete.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
prefix_padding_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
With shorter values the model will respond more quickly, but may jump in on
short pauses from the user.
"""
threshold: Optional[float] = None
"""Used only for `server_vad` mode.
Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
threshold will require louder audio to activate the model, and thus might
perform better in noisy environments.
"""
class AudioInputTurnDetectionSemanticVad(BaseModel):
"""
Server-side semantic turn detection which uses a model to determine when the user has finished speaking.
"""
type: Literal["semantic_vad"]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: Optional[bool] = None
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
AudioInputTurnDetection: TypeAlias = Annotated[
Union[AudioInputTurnDetectionServerVad, AudioInputTurnDetectionSemanticVad, None],
PropertyInfo(discriminator="type"),
]
class AudioInput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the input audio."""
noise_reduction: Optional[AudioInputNoiseReduction] = None
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off. Noise reduction filters audio added to
the input audio buffer before it is sent to VAD and the model. Filtering the
audio can improve VAD and turn detection accuracy (reducing false positives) and
model performance by improving perception of the input audio.
"""
transcription: Optional[AudioTranscription] = None
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
"""
turn_detection: Optional[AudioInputTurnDetection] = None
"""Configuration for turn detection, ether Server VAD or Semantic VAD.
This can be set to `null` to turn off, in which case the client must manually
trigger model response.
Server VAD means that the model will detect the start and end of speech based on
audio volume and respond at the end of user speech.
Semantic VAD is more advanced and uses a turn detection model (in conjunction
with VAD) to semantically estimate whether the user has finished speaking, then
dynamically sets a timeout based on this probability. For example, if user audio
trails off with "uhhm", the model will score a low probability of turn end and
wait longer for the user to continue speaking. This can be useful for more
natural conversations, but may have a higher latency.
"""
class AudioOutput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the output audio."""
speed: Optional[float] = None
"""
The speed of the model's spoken response as a multiple of the original speed.
1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
This value can only be changed in between model turns, not while a response is
in progress.
This parameter is a post-processing adjustment to the audio after it is
generated, it's also possible to prompt the model to speak faster or slower.
"""
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None
] = None
"""The voice the model uses to respond.
Voice cannot be changed during the session once the model has responded with
audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
`coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. We recommend
`marin` and `cedar` for best quality.
"""
class Audio(BaseModel):
"""Configuration for input and output audio."""
input: Optional[AudioInput] = None
output: Optional[AudioOutput] = None
ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunction, ToolChoiceMcp]
class ToolMcpToolAllowedToolsMcpToolFilter(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
ToolMcpToolAllowedTools: TypeAlias = Union[List[str], ToolMcpToolAllowedToolsMcpToolFilter, None]
class ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
class ToolMcpToolRequireApprovalMcpToolApprovalFilterNever(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
class ToolMcpToolRequireApprovalMcpToolApprovalFilter(BaseModel):
"""Specify which of the MCP server's tools require approval.
Can be
`always`, `never`, or a filter object associated with tools
that require approval.
"""
always: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterAlways] = None
"""A filter object to specify which tools are allowed."""
never: Optional[ToolMcpToolRequireApprovalMcpToolApprovalFilterNever] = None
"""A filter object to specify which tools are allowed."""
ToolMcpToolRequireApproval: TypeAlias = Union[
ToolMcpToolRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None
]
class ToolMcpTool(BaseModel):
"""
Give the model access to additional tools via remote Model Context Protocol
(MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
"""
server_label: str
"""A label for this MCP server, used to identify it in tool calls."""
type: Literal["mcp"]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[ToolMcpToolAllowedTools] = None
"""List of allowed tool names or a filter object."""
authorization: Optional[str] = None
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Optional[
Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
] = None
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[ToolMcpToolRequireApproval] = None
"""Specify which of the MCP server's tools require approval."""
server_description: Optional[str] = None
"""Optional description of the MCP server, used to provide more context."""
server_url: Optional[str] = None
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
Tool: TypeAlias = Union[RealtimeFunctionTool, ToolMcpTool]
class TracingTracingConfiguration(BaseModel):
"""Granular configuration for tracing."""
group_id: Optional[str] = None
"""
The group id to attach to this trace to enable filtering and grouping in the
Traces Dashboard.
"""
metadata: Optional[object] = None
"""
The arbitrary metadata to attach to this trace to enable filtering in the Traces
Dashboard.
"""
workflow_name: Optional[str] = None
"""The name of the workflow to attach to this trace.
This is used to name the trace in the Traces Dashboard.
"""
Tracing: TypeAlias = Union[Literal["auto"], TracingTracingConfiguration, None]
class RealtimeSessionCreateResponse(BaseModel):
"""A new Realtime session configuration, with an ephemeral key.
Default TTL
for keys is one minute.
"""
client_secret: RealtimeSessionClientSecret
"""Ephemeral key returned by the API."""
type: Literal["realtime"]
"""The type of session to create. Always `realtime` for the Realtime API."""
audio: Optional[Audio] = None
"""Configuration for input and output audio."""
include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
instructions: Optional[str] = None
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
"""
max_output_tokens: Union[int, Literal["inf"], None] = None
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
None,
] = None
"""The Realtime model used for this session."""
output_modalities: Optional[List[Literal["text", "audio"]]] = None
"""The set of modalities the model can respond with.
It defaults to `["audio"]`, indicating that the model will respond with audio
plus a transcript. `["text"]` can be used to make the model respond with text
only. It is not possible to request both `text` and `audio` at the same time.
"""
prompt: Optional[ResponsePrompt] = None
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
tool_choice: Optional[ToolChoice] = None
"""How the model chooses tools.
Provide one of the string modes or force a specific function/MCP tool.
"""
tools: Optional[List[Tool]] = None
"""Tools available to the model."""
tracing: Optional[Tracing] = None
"""
Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
"""
truncation: Optional[RealtimeTruncation] = None
"""
When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_session_create_response.py",
"license": "Apache License 2.0",
"lines": 405,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_tool_choice_config.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import TypeAlias
from ..responses.tool_choice_mcp import ToolChoiceMcp
from ..responses.tool_choice_options import ToolChoiceOptions
from ..responses.tool_choice_function import ToolChoiceFunction
__all__ = ["RealtimeToolChoiceConfig"]
RealtimeToolChoiceConfig: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunction, ToolChoiceMcp]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tool_choice_config.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_tool_choice_config_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import TypeAlias
from ..responses.tool_choice_options import ToolChoiceOptions
from ..responses.tool_choice_mcp_param import ToolChoiceMcpParam
from ..responses.tool_choice_function_param import ToolChoiceFunctionParam
__all__ = ["RealtimeToolChoiceConfigParam"]
RealtimeToolChoiceConfigParam: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunctionParam, ToolChoiceMcpParam]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tool_choice_config_param.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_tools_config.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List
from typing_extensions import TypeAlias
from .realtime_tools_config_union import RealtimeToolsConfigUnion
__all__ = ["RealtimeToolsConfig"]
RealtimeToolsConfig: TypeAlias = List[RealtimeToolsConfigUnion]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tools_config.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_tools_config_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
from .realtime_function_tool_param import RealtimeFunctionToolParam
__all__ = [
"RealtimeToolsConfigParam",
"RealtimeToolsConfigUnionParam",
"Mcp",
"McpAllowedTools",
"McpAllowedToolsMcpToolFilter",
"McpRequireApproval",
"McpRequireApprovalMcpToolApprovalFilter",
"McpRequireApprovalMcpToolApprovalFilterAlways",
"McpRequireApprovalMcpToolApprovalFilterNever",
]
class McpAllowedToolsMcpToolFilter(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
McpAllowedTools: TypeAlias = Union[SequenceNotStr[str], McpAllowedToolsMcpToolFilter]
class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False):
"""Specify which of the MCP server's tools require approval.
Can be
`always`, `never`, or a filter object associated with tools
that require approval.
"""
always: McpRequireApprovalMcpToolApprovalFilterAlways
"""A filter object to specify which tools are allowed."""
never: McpRequireApprovalMcpToolApprovalFilterNever
"""A filter object to specify which tools are allowed."""
McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]]
class Mcp(TypedDict, total=False):
"""
Give the model access to additional tools via remote Model Context Protocol
(MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
"""
server_label: Required[str]
"""A label for this MCP server, used to identify it in tool calls."""
type: Required[Literal["mcp"]]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[McpAllowedTools]
"""List of allowed tool names or a filter object."""
authorization: str
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[McpRequireApproval]
"""Specify which of the MCP server's tools require approval."""
server_description: str
"""Optional description of the MCP server, used to provide more context."""
server_url: str
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
RealtimeToolsConfigUnionParam: TypeAlias = Union[RealtimeFunctionToolParam, Mcp]
RealtimeToolsConfigParam: TypeAlias = List[RealtimeToolsConfigUnionParam]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tools_config_param.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_tools_config_union.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .realtime_function_tool import RealtimeFunctionTool
__all__ = [
"RealtimeToolsConfigUnion",
"Mcp",
"McpAllowedTools",
"McpAllowedToolsMcpToolFilter",
"McpRequireApproval",
"McpRequireApprovalMcpToolApprovalFilter",
"McpRequireApprovalMcpToolApprovalFilterAlways",
"McpRequireApprovalMcpToolApprovalFilterNever",
]
class McpAllowedToolsMcpToolFilter(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter, None]
class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilter(BaseModel):
"""Specify which of the MCP server's tools require approval.
Can be
`always`, `never`, or a filter object associated with tools
that require approval.
"""
always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None
"""A filter object to specify which tools are allowed."""
never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None
"""A filter object to specify which tools are allowed."""
McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None]
class Mcp(BaseModel):
"""
Give the model access to additional tools via remote Model Context Protocol
(MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
"""
server_label: str
"""A label for this MCP server, used to identify it in tool calls."""
type: Literal["mcp"]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[McpAllowedTools] = None
"""List of allowed tool names or a filter object."""
authorization: Optional[str] = None
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Optional[
Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
] = None
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[McpRequireApproval] = None
"""Specify which of the MCP server's tools require approval."""
server_description: Optional[str] = None
"""Optional description of the MCP server, used to provide more context."""
server_url: Optional[str] = None
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
RealtimeToolsConfigUnion: TypeAlias = Annotated[Union[RealtimeFunctionTool, Mcp], PropertyInfo(discriminator="type")]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tools_config_union.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_tools_config_union_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
from .realtime_function_tool_param import RealtimeFunctionToolParam
__all__ = [
"RealtimeToolsConfigUnionParam",
"Mcp",
"McpAllowedTools",
"McpAllowedToolsMcpToolFilter",
"McpRequireApproval",
"McpRequireApprovalMcpToolApprovalFilter",
"McpRequireApprovalMcpToolApprovalFilterAlways",
"McpRequireApprovalMcpToolApprovalFilterNever",
]
class McpAllowedToolsMcpToolFilter(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
McpAllowedTools: TypeAlias = Union[SequenceNotStr[str], McpAllowedToolsMcpToolFilter]
class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False):
"""Specify which of the MCP server's tools require approval.
Can be
`always`, `never`, or a filter object associated with tools
that require approval.
"""
always: McpRequireApprovalMcpToolApprovalFilterAlways
"""A filter object to specify which tools are allowed."""
never: McpRequireApprovalMcpToolApprovalFilterNever
"""A filter object to specify which tools are allowed."""
McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]]
class Mcp(TypedDict, total=False):
"""
Give the model access to additional tools via remote Model Context Protocol
(MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
"""
server_label: Required[str]
"""A label for this MCP server, used to identify it in tool calls."""
type: Required[Literal["mcp"]]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[McpAllowedTools]
"""List of allowed tool names or a filter object."""
authorization: str
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[McpRequireApproval]
"""Specify which of the MCP server's tools require approval."""
server_description: str
"""Optional description of the MCP server, used to provide more context."""
server_url: str
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
RealtimeToolsConfigUnionParam: TypeAlias = Union[RealtimeFunctionToolParam, Mcp]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tools_config_union_param.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_tracing_config.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
__all__ = ["RealtimeTracingConfig", "TracingConfiguration"]
class TracingConfiguration(BaseModel):
"""Granular configuration for tracing."""
group_id: Optional[str] = None
"""
The group id to attach to this trace to enable filtering and grouping in the
Traces Dashboard.
"""
metadata: Optional[object] = None
"""
The arbitrary metadata to attach to this trace to enable filtering in the Traces
Dashboard.
"""
workflow_name: Optional[str] = None
"""The name of the workflow to attach to this trace.
This is used to name the trace in the Traces Dashboard.
"""
RealtimeTracingConfig: TypeAlias = Union[Literal["auto"], TracingConfiguration, None]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tracing_config.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_tracing_config_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypeAlias, TypedDict
__all__ = ["RealtimeTracingConfigParam", "TracingConfiguration"]
class TracingConfiguration(TypedDict, total=False):
"""Granular configuration for tracing."""
group_id: str
"""
The group id to attach to this trace to enable filtering and grouping in the
Traces Dashboard.
"""
metadata: object
"""
The arbitrary metadata to attach to this trace to enable filtering in the Traces
Dashboard.
"""
workflow_name: str
"""The name of the workflow to attach to this trace.
This is used to name the trace in the Traces Dashboard.
"""
RealtimeTracingConfigParam: TypeAlias = Union[Literal["auto"], TracingConfiguration]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_tracing_config_param.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_create_request.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_transcription_session_audio import RealtimeTranscriptionSessionAudio
__all__ = ["RealtimeTranscriptionSessionCreateRequest"]
class RealtimeTranscriptionSessionCreateRequest(BaseModel):
"""Realtime transcription session object configuration."""
type: Literal["transcription"]
"""The type of session to create.
Always `transcription` for transcription sessions.
"""
audio: Optional[RealtimeTranscriptionSessionAudio] = None
"""Configuration for input and output audio."""
include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_create_request.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_create_request_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List
from typing_extensions import Literal, Required, TypedDict
from .realtime_transcription_session_audio_param import RealtimeTranscriptionSessionAudioParam
__all__ = ["RealtimeTranscriptionSessionCreateRequestParam"]
class RealtimeTranscriptionSessionCreateRequestParam(TypedDict, total=False):
"""Realtime transcription session object configuration."""
type: Required[Literal["transcription"]]
"""The type of session to create.
Always `transcription` for transcription sessions.
"""
audio: RealtimeTranscriptionSessionAudioParam
"""Configuration for input and output audio."""
include: List[Literal["item.input_audio_transcription.logprobs"]]
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_create_request_param.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_truncation.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, TypeAlias
from .realtime_truncation_retention_ratio import RealtimeTruncationRetentionRatio
__all__ = ["RealtimeTruncation"]
RealtimeTruncation: TypeAlias = Union[Literal["auto", "disabled"], RealtimeTruncationRetentionRatio]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_truncation.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_truncation_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypeAlias
from .realtime_truncation_retention_ratio_param import RealtimeTruncationRetentionRatioParam
__all__ = ["RealtimeTruncationParam"]
RealtimeTruncationParam: TypeAlias = Union[Literal["auto", "disabled"], RealtimeTruncationRetentionRatioParam]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_truncation_param.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_audio_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseAudioDeltaEvent"]
class ResponseAudioDeltaEvent(BaseModel):
"""Returned when the model-generated audio is updated."""
content_index: int
"""The index of the content part in the item's content array."""
delta: str
"""Base64-encoded audio data delta."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.output_audio.delta"]
"""The event type, must be `response.output_audio.delta`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_audio_delta_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_audio_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseAudioDoneEvent"]
class ResponseAudioDoneEvent(BaseModel):
"""Returned when the model-generated audio is done.
Also emitted when a Response
is interrupted, incomplete, or cancelled.
"""
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.output_audio.done"]
"""The event type, must be `response.output_audio.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_audio_done_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_audio_transcript_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseAudioTranscriptDeltaEvent"]
class ResponseAudioTranscriptDeltaEvent(BaseModel):
"""Returned when the model-generated transcription of audio output is updated."""
content_index: int
"""The index of the content part in the item's content array."""
delta: str
"""The transcript delta."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.output_audio_transcript.delta"]
"""The event type, must be `response.output_audio_transcript.delta`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_audio_transcript_delta_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_audio_transcript_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseAudioTranscriptDoneEvent"]
class ResponseAudioTranscriptDoneEvent(BaseModel):
"""
Returned when the model-generated transcription of audio output is done
streaming. Also emitted when a Response is interrupted, incomplete, or
cancelled.
"""
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
transcript: str
"""The final transcript of the audio."""
type: Literal["response.output_audio_transcript.done"]
"""The event type, must be `response.output_audio_transcript.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_audio_transcript_done_event.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_cancel_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCancelEvent"]
class ResponseCancelEvent(BaseModel):
"""Send this event to cancel an in-progress response.
The server will respond
with a `response.done` event with a status of `response.status=cancelled`. If
there is no response to cancel, the server will respond with an error. It's safe
to call `response.cancel` even if no response is in progress, an error will be
returned the session will remain unaffected.
"""
type: Literal["response.cancel"]
"""The event type, must be `response.cancel`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
response_id: Optional[str] = None
"""
A specific response ID to cancel - if not provided, will cancel an in-progress
response in the default conversation.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_cancel_event.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_cancel_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseCancelEventParam"]
class ResponseCancelEventParam(TypedDict, total=False):
"""Send this event to cancel an in-progress response.
The server will respond
with a `response.done` event with a status of `response.status=cancelled`. If
there is no response to cancel, the server will respond with an error. It's safe
to call `response.cancel` even if no response is in progress, an error will be
returned the session will remain unaffected.
"""
type: Required[Literal["response.cancel"]]
"""The event type, must be `response.cancel`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
response_id: str
"""
A specific response ID to cancel - if not provided, will cancel an in-progress
response in the default conversation.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_cancel_event_param.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_content_part_added_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseContentPartAddedEvent", "Part"]
class Part(BaseModel):
"""The content part that was added."""
audio: Optional[str] = None
"""Base64-encoded audio data (if type is "audio")."""
text: Optional[str] = None
"""The text content (if type is "text")."""
transcript: Optional[str] = None
"""The transcript of the audio (if type is "audio")."""
type: Optional[Literal["text", "audio"]] = None
"""The content type ("text", "audio")."""
class ResponseContentPartAddedEvent(BaseModel):
"""
Returned when a new content part is added to an assistant message item during
response generation.
"""
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item to which the content part was added."""
output_index: int
"""The index of the output item in the response."""
part: Part
"""The content part that was added."""
response_id: str
"""The ID of the response."""
type: Literal["response.content_part.added"]
"""The event type, must be `response.content_part.added`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_content_part_added_event.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_content_part_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseContentPartDoneEvent", "Part"]
class Part(BaseModel):
"""The content part that is done."""
audio: Optional[str] = None
"""Base64-encoded audio data (if type is "audio")."""
text: Optional[str] = None
"""The text content (if type is "text")."""
transcript: Optional[str] = None
"""The transcript of the audio (if type is "audio")."""
type: Optional[Literal["text", "audio"]] = None
"""The content type ("text", "audio")."""
class ResponseContentPartDoneEvent(BaseModel):
"""
Returned when a content part is done streaming in an assistant message item.
Also emitted when a Response is interrupted, incomplete, or cancelled.
"""
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
part: Part
"""The content part that is done."""
response_id: str
"""The ID of the response."""
type: Literal["response.content_part.done"]
"""The event type, must be `response.content_part.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_content_part_done_event.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_create_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_response_create_params import RealtimeResponseCreateParams
__all__ = ["ResponseCreateEvent"]
class ResponseCreateEvent(BaseModel):
"""
This event instructs the server to create a Response, which means triggering
model inference. When in Server VAD mode, the server will create Responses
automatically.
A Response will include at least one Item, and may have two, in which case
the second will be a function call. These Items will be appended to the
conversation history by default.
The server will respond with a `response.created` event, events for Items
and content created, and finally a `response.done` event to indicate the
Response is complete.
The `response.create` event includes inference configuration like
`instructions` and `tools`. If these are set, they will override the Session's
configuration for this Response only.
Responses can be created out-of-band of the default Conversation, meaning that they can
have arbitrary input, and it's possible to disable writing the output to the Conversation.
Only one Response can write to the default Conversation at a time, but otherwise multiple
Responses can be created in parallel. The `metadata` field is a good way to disambiguate
multiple simultaneous Responses.
Clients can set `conversation` to `none` to create a Response that does not write to the default
Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting
raw Items and references to existing Items.
"""
type: Literal["response.create"]
"""The event type, must be `response.create`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
response: Optional[RealtimeResponseCreateParams] = None
"""Create a new Realtime response with these parameters"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_create_event.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_create_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
from .realtime_response_create_params_param import RealtimeResponseCreateParamsParam
__all__ = ["ResponseCreateEventParam"]
class ResponseCreateEventParam(TypedDict, total=False):
"""
This event instructs the server to create a Response, which means triggering
model inference. When in Server VAD mode, the server will create Responses
automatically.
A Response will include at least one Item, and may have two, in which case
the second will be a function call. These Items will be appended to the
conversation history by default.
The server will respond with a `response.created` event, events for Items
and content created, and finally a `response.done` event to indicate the
Response is complete.
The `response.create` event includes inference configuration like
`instructions` and `tools`. If these are set, they will override the Session's
configuration for this Response only.
Responses can be created out-of-band of the default Conversation, meaning that they can
have arbitrary input, and it's possible to disable writing the output to the Conversation.
Only one Response can write to the default Conversation at a time, but otherwise multiple
Responses can be created in parallel. The `metadata` field is a good way to disambiguate
multiple simultaneous Responses.
Clients can set `conversation` to `none` to create a Response that does not write to the default
Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting
raw Items and references to existing Items.
"""
type: Required[Literal["response.create"]]
"""The event type, must be `response.create`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
response: RealtimeResponseCreateParamsParam
"""Create a new Realtime response with these parameters"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_create_event_param.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_created_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_response import RealtimeResponse
__all__ = ["ResponseCreatedEvent"]
class ResponseCreatedEvent(BaseModel):
"""Returned when a new Response is created.
The first event of response creation,
where the response is in an initial state of `in_progress`.
"""
event_id: str
"""The unique ID of the server event."""
response: RealtimeResponse
"""The response resource."""
type: Literal["response.created"]
"""The event type, must be `response.created`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_created_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_response import RealtimeResponse
__all__ = ["ResponseDoneEvent"]
class ResponseDoneEvent(BaseModel):
"""Returned when a Response is done streaming.
Always emitted, no matter the
final state. The Response object included in the `response.done` event will
include all output Items in the Response but will omit the raw audio data.
Clients should check the `status` field of the Response to determine if it was successful
(`completed`) or if there was another outcome: `cancelled`, `failed`, or `incomplete`.
A response will contain all output items that were generated during the response, excluding
any audio content.
"""
event_id: str
"""The unique ID of the server event."""
response: RealtimeResponse
"""The response resource."""
type: Literal["response.done"]
"""The event type, must be `response.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_done_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_function_call_arguments_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseFunctionCallArgumentsDeltaEvent"]
class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
"""Returned when the model-generated function call arguments are updated."""
call_id: str
"""The ID of the function call."""
delta: str
"""The arguments delta as a JSON string."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the function call item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.function_call_arguments.delta"]
"""The event type, must be `response.function_call_arguments.delta`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_function_call_arguments_delta_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_function_call_arguments_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseFunctionCallArgumentsDoneEvent"]
class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
"""
Returned when the model-generated function call arguments are done streaming.
Also emitted when a Response is interrupted, incomplete, or cancelled.
"""
arguments: str
"""The final arguments as a JSON string."""
call_id: str
"""The ID of the function call."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the function call item."""
name: str
"""The name of the function that was called."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.function_call_arguments.done"]
"""The event type, must be `response.function_call_arguments.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_function_call_arguments_done_event.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_mcp_call_arguments_delta.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallArgumentsDelta"]
class ResponseMcpCallArgumentsDelta(BaseModel):
"""Returned when MCP tool call arguments are updated during response generation."""
delta: str
"""The JSON-encoded arguments delta."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.mcp_call_arguments.delta"]
"""The event type, must be `response.mcp_call_arguments.delta`."""
obfuscation: Optional[str] = None
"""If present, indicates the delta text was obfuscated."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_mcp_call_arguments_delta.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_mcp_call_arguments_done.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallArgumentsDone"]
class ResponseMcpCallArgumentsDone(BaseModel):
"""Returned when MCP tool call arguments are finalized during response generation."""
arguments: str
"""The final JSON-encoded arguments string."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.mcp_call_arguments.done"]
"""The event type, must be `response.mcp_call_arguments.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_mcp_call_arguments_done.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_mcp_call_completed.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallCompleted"]
class ResponseMcpCallCompleted(BaseModel):
"""Returned when an MCP tool call has completed successfully."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
type: Literal["response.mcp_call.completed"]
"""The event type, must be `response.mcp_call.completed`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_mcp_call_completed.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_mcp_call_failed.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallFailed"]
class ResponseMcpCallFailed(BaseModel):
"""Returned when an MCP tool call has failed."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
type: Literal["response.mcp_call.failed"]
"""The event type, must be `response.mcp_call.failed`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_mcp_call_failed.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_mcp_call_in_progress.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallInProgress"]
class ResponseMcpCallInProgress(BaseModel):
"""Returned when an MCP tool call has started and is in progress."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
type: Literal["response.mcp_call.in_progress"]
"""The event type, must be `response.mcp_call.in_progress`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_mcp_call_in_progress.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_output_item_added_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
from .conversation_item import ConversationItem
__all__ = ["ResponseOutputItemAddedEvent"]
class ResponseOutputItemAddedEvent(BaseModel):
"""Returned when a new Item is created during Response generation."""
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
output_index: int
"""The index of the output item in the Response."""
response_id: str
"""The ID of the Response to which the item belongs."""
type: Literal["response.output_item.added"]
"""The event type, must be `response.output_item.added`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_output_item_added_event.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_output_item_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
from .conversation_item import ConversationItem
__all__ = ["ResponseOutputItemDoneEvent"]
class ResponseOutputItemDoneEvent(BaseModel):
"""Returned when an Item is done streaming.
Also emitted when a Response is
interrupted, incomplete, or cancelled.
"""
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
output_index: int
"""The index of the output item in the Response."""
response_id: str
"""The ID of the Response to which the item belongs."""
type: Literal["response.output_item.done"]
"""The event type, must be `response.output_item.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_output_item_done_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/response_text_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseTextDeltaEvent"]
class ResponseTextDeltaEvent(BaseModel):
"""Returned when the text value of an "output_text" content part is updated."""
content_index: int
"""The index of the content part in the item's content array."""
delta: str
"""The text delta."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.output_text.delta"]
"""The event type, must be `response.output_text.delta`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_text_delta_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/response_text_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseTextDoneEvent"]
class ResponseTextDoneEvent(BaseModel):
"""Returned when the text value of an "output_text" content part is done streaming.
Also
emitted when a Response is interrupted, incomplete, or cancelled.
"""
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
text: str
"""The final text content."""
type: Literal["response.output_text.done"]
"""The event type, must be `response.output_text.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/response_text_done_event.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/session_created_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from .realtime_session_create_request import RealtimeSessionCreateRequest
from .realtime_transcription_session_create_request import RealtimeTranscriptionSessionCreateRequest
__all__ = ["SessionCreatedEvent", "Session"]
Session: TypeAlias = Union[RealtimeSessionCreateRequest, RealtimeTranscriptionSessionCreateRequest]
class SessionCreatedEvent(BaseModel):
"""Returned when a Session is created.
Emitted automatically when a new
connection is established as the first server event. This event will contain
the default Session configuration.
"""
event_id: str
"""The unique ID of the server event."""
session: Session
"""The session configuration."""
type: Literal["session.created"]
"""The event type, must be `session.created`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/session_created_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/session_update_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from .realtime_session_create_request import RealtimeSessionCreateRequest
from .realtime_transcription_session_create_request import RealtimeTranscriptionSessionCreateRequest
__all__ = ["SessionUpdateEvent", "Session"]
Session: TypeAlias = Union[RealtimeSessionCreateRequest, RealtimeTranscriptionSessionCreateRequest]
class SessionUpdateEvent(BaseModel):
"""
Send this event to update the session’s configuration.
The client may send this event at any time to update any field
except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet.
When the server receives a `session.update`, it will respond
with a `session.updated` event showing the full, effective configuration.
Only the fields that are present in the `session.update` are updated. To clear a field like
`instructions`, pass an empty string. To clear a field like `tools`, pass an empty array.
To clear a field like `turn_detection`, pass `null`.
"""
session: Session
"""Update the Realtime session.
Choose either a realtime session or a transcription session.
"""
type: Literal["session.update"]
"""The event type, must be `session.update`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event.
This is an arbitrary string that a client may assign. It will be passed back if
there is an error with the event, but the corresponding `session.updated` event
will not include it.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/session_update_event.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/session_update_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .realtime_session_create_request_param import RealtimeSessionCreateRequestParam
from .realtime_transcription_session_create_request_param import RealtimeTranscriptionSessionCreateRequestParam
__all__ = ["SessionUpdateEventParam", "Session"]
Session: TypeAlias = Union[RealtimeSessionCreateRequestParam, RealtimeTranscriptionSessionCreateRequestParam]
class SessionUpdateEventParam(TypedDict, total=False):
"""
Send this event to update the session’s configuration.
The client may send this event at any time to update any field
except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet.
When the server receives a `session.update`, it will respond
with a `session.updated` event showing the full, effective configuration.
Only the fields that are present in the `session.update` are updated. To clear a field like
`instructions`, pass an empty string. To clear a field like `tools`, pass an empty array.
To clear a field like `turn_detection`, pass `null`.
"""
session: Required[Session]
"""Update the Realtime session.
Choose either a realtime session or a transcription session.
"""
type: Required[Literal["session.update"]]
"""The event type, must be `session.update`."""
event_id: str
"""Optional client-generated ID used to identify this event.
This is an arbitrary string that a client may assign. It will be passed back if
there is an error with the event, but the corresponding `session.updated` event
will not include it.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/session_update_event_param.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/session_updated_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from .realtime_session_create_request import RealtimeSessionCreateRequest
from .realtime_transcription_session_create_request import RealtimeTranscriptionSessionCreateRequest
__all__ = ["SessionUpdatedEvent", "Session"]
Session: TypeAlias = Union[RealtimeSessionCreateRequest, RealtimeTranscriptionSessionCreateRequest]
class SessionUpdatedEvent(BaseModel):
"""
Returned when a session is updated with a `session.update` event, unless
there is an error.
"""
event_id: str
"""The unique ID of the server event."""
session: Session
"""The session configuration."""
type: Literal["session.updated"]
"""The event type, must be `session.updated`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/session_updated_event.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/web_search_preview_tool.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["WebSearchPreviewTool", "UserLocation"]
class UserLocation(BaseModel):
"""The user's location."""
type: Literal["approximate"]
"""The type of location approximation. Always `approximate`."""
city: Optional[str] = None
"""Free text input for the city of the user, e.g. `San Francisco`."""
country: Optional[str] = None
"""
The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
the user, e.g. `US`.
"""
region: Optional[str] = None
"""Free text input for the region of the user, e.g. `California`."""
timezone: Optional[str] = None
"""
The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
user, e.g. `America/Los_Angeles`.
"""
class WebSearchPreviewTool(BaseModel):
"""This tool searches the web for relevant results to use in a response.
Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
"""
type: Literal["web_search_preview", "web_search_preview_2025_03_11"]
"""The type of the web search tool.
One of `web_search_preview` or `web_search_preview_2025_03_11`.
"""
search_context_size: Optional[Literal["low", "medium", "high"]] = None
"""High level guidance for the amount of context window space to use for the
search.
One of `low`, `medium`, or `high`. `medium` is the default.
"""
user_location: Optional[UserLocation] = None
"""The user's location."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/web_search_preview_tool.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/web_search_preview_tool_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["WebSearchPreviewToolParam", "UserLocation"]
class UserLocation(TypedDict, total=False):
"""The user's location."""
type: Required[Literal["approximate"]]
"""The type of location approximation. Always `approximate`."""
city: Optional[str]
"""Free text input for the city of the user, e.g. `San Francisco`."""
country: Optional[str]
"""
The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
the user, e.g. `US`.
"""
region: Optional[str]
"""Free text input for the region of the user, e.g. `California`."""
timezone: Optional[str]
"""
The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
user, e.g. `America/Los_Angeles`.
"""
class WebSearchPreviewToolParam(TypedDict, total=False):
"""This tool searches the web for relevant results to use in a response.
Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
"""
type: Required[Literal["web_search_preview", "web_search_preview_2025_03_11"]]
"""The type of the web search tool.
One of `web_search_preview` or `web_search_preview_2025_03_11`.
"""
search_context_size: Literal["low", "medium", "high"]
"""High level guidance for the amount of context window space to use for the
search.
One of `low`, `medium`, or `high`. `medium` is the default.
"""
user_location: Optional[UserLocation]
"""The user's location."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/web_search_preview_tool_param.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/webhooks/realtime_call_incoming_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeCallIncomingWebhookEvent", "Data", "DataSipHeader"]
class DataSipHeader(BaseModel):
"""A header from the SIP Invite."""
name: str
"""Name of the SIP Header."""
value: str
"""Value of the SIP Header."""
class Data(BaseModel):
"""Event data payload."""
call_id: str
"""The unique ID of this call."""
sip_headers: List[DataSipHeader]
"""Headers from the SIP Invite."""
class RealtimeCallIncomingWebhookEvent(BaseModel):
"""Sent when Realtime API Receives a incoming SIP call."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the model response was completed."""
data: Data
"""Event data payload."""
type: Literal["realtime.call.incoming"]
"""The type of the event. Always `realtime.call.incoming`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/realtime_call_incoming_webhook_event.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:tests/api_resources/realtime/test_client_secrets.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types.realtime import ClientSecretCreateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestClientSecrets:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
client_secret = client.realtime.client_secrets.create()
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
client_secret = client.realtime.client_secrets.create(
expires_after={
"anchor": "created_at",
"seconds": 10,
},
session={
"type": "realtime",
"audio": {
"input": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"noise_reduction": {"type": "near_field"},
"transcription": {
"language": "language",
"model": "string",
"prompt": "prompt",
},
"turn_detection": {
"type": "server_vad",
"create_response": True,
"idle_timeout_ms": 5000,
"interrupt_response": True,
"prefix_padding_ms": 0,
"silence_duration_ms": 0,
"threshold": 0,
},
},
"output": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"speed": 0.25,
"voice": "ash",
},
},
"include": ["item.input_audio_transcription.logprobs"],
"instructions": "instructions",
"max_output_tokens": 0,
"model": "string",
"output_modalities": ["text"],
"prompt": {
"id": "id",
"variables": {"foo": "string"},
"version": "version",
},
"tool_choice": "none",
"tools": [
{
"description": "description",
"name": "name",
"parameters": {},
"type": "function",
}
],
"tracing": "auto",
"truncation": "auto",
},
)
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.realtime.client_secrets.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
client_secret = response.parse()
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.realtime.client_secrets.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
client_secret = response.parse()
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
assert cast(Any, response.is_closed) is True
class TestAsyncClientSecrets:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
client_secret = await async_client.realtime.client_secrets.create()
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
client_secret = await async_client.realtime.client_secrets.create(
expires_after={
"anchor": "created_at",
"seconds": 10,
},
session={
"type": "realtime",
"audio": {
"input": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"noise_reduction": {"type": "near_field"},
"transcription": {
"language": "language",
"model": "string",
"prompt": "prompt",
},
"turn_detection": {
"type": "server_vad",
"create_response": True,
"idle_timeout_ms": 5000,
"interrupt_response": True,
"prefix_padding_ms": 0,
"silence_duration_ms": 0,
"threshold": 0,
},
},
"output": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"speed": 0.25,
"voice": "ash",
},
},
"include": ["item.input_audio_transcription.logprobs"],
"instructions": "instructions",
"max_output_tokens": 0,
"model": "string",
"output_modalities": ["text"],
"prompt": {
"id": "id",
"variables": {"foo": "string"},
"version": "version",
},
"tool_choice": "none",
"tools": [
{
"description": "description",
"name": "name",
"parameters": {},
"type": "function",
}
],
"tracing": "auto",
"truncation": "auto",
},
)
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.realtime.client_secrets.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
client_secret = response.parse()
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.realtime.client_secrets.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
client_secret = await response.parse()
assert_matches_type(ClientSecretCreateResponse, client_secret, path=["response"])
assert cast(Any, response.is_closed) is True
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/realtime/test_client_secrets.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/test_realtime.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
import pytest
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestRealtime:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
class TestAsyncRealtime:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/test_realtime.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/resources/conversations/conversations.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable, Optional
import httpx
from ... import _legacy_response
from .items import (
Items,
AsyncItems,
ItemsWithRawResponse,
AsyncItemsWithRawResponse,
ItemsWithStreamingResponse,
AsyncItemsWithStreamingResponse,
)
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.conversations import conversation_create_params, conversation_update_params
from ...types.shared_params.metadata import Metadata
from ...types.conversations.conversation import Conversation
from ...types.responses.response_input_item_param import ResponseInputItemParam
from ...types.conversations.conversation_deleted_resource import ConversationDeletedResource
__all__ = ["Conversations", "AsyncConversations"]
class Conversations(SyncAPIResource):
@cached_property
def items(self) -> Items:
return Items(self._client)
@cached_property
def with_raw_response(self) -> ConversationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ConversationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ConversationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ConversationsWithStreamingResponse(self)
def create(
self,
*,
items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Create a conversation.
Args:
items: Initial items to include in the conversation context. You may add up to 20 items
at a time.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/conversations",
body=maybe_transform(
{
"items": items,
"metadata": metadata,
},
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def retrieve(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Get a conversation
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def update(
self,
conversation_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Update a conversation
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
f"/conversations/{conversation_id}",
body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
def delete(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationDeletedResource:
"""Delete a conversation.
Items in the conversation will not be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._delete(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ConversationDeletedResource,
)
class AsyncConversations(AsyncAPIResource):
@cached_property
def items(self) -> AsyncItems:
return AsyncItems(self._client)
@cached_property
def with_raw_response(self) -> AsyncConversationsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncConversationsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncConversationsWithStreamingResponse(self)
async def create(
self,
*,
items: Optional[Iterable[ResponseInputItemParam]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Create a conversation.
Args:
items: Initial items to include in the conversation context. You may add up to 20 items
at a time.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/conversations",
body=await async_maybe_transform(
{
"items": items,
"metadata": metadata,
},
conversation_create_params.ConversationCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def retrieve(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Get a conversation
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._get(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def update(
self,
conversation_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Update a conversation
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
f"/conversations/{conversation_id}",
body=await async_maybe_transform(
{"metadata": metadata}, conversation_update_params.ConversationUpdateParams
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
async def delete(
self,
conversation_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationDeletedResource:
"""Delete a conversation.
Items in the conversation will not be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._delete(
f"/conversations/{conversation_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ConversationDeletedResource,
)
class ConversationsWithRawResponse:
def __init__(self, conversations: Conversations) -> None:
self._conversations = conversations
self.create = _legacy_response.to_raw_response_wrapper(
conversations.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
conversations.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
conversations.update,
)
self.delete = _legacy_response.to_raw_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> ItemsWithRawResponse:
return ItemsWithRawResponse(self._conversations.items)
class AsyncConversationsWithRawResponse:
def __init__(self, conversations: AsyncConversations) -> None:
self._conversations = conversations
self.create = _legacy_response.async_to_raw_response_wrapper(
conversations.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
conversations.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
conversations.update,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> AsyncItemsWithRawResponse:
return AsyncItemsWithRawResponse(self._conversations.items)
class ConversationsWithStreamingResponse:
def __init__(self, conversations: Conversations) -> None:
self._conversations = conversations
self.create = to_streamed_response_wrapper(
conversations.create,
)
self.retrieve = to_streamed_response_wrapper(
conversations.retrieve,
)
self.update = to_streamed_response_wrapper(
conversations.update,
)
self.delete = to_streamed_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> ItemsWithStreamingResponse:
return ItemsWithStreamingResponse(self._conversations.items)
class AsyncConversationsWithStreamingResponse:
def __init__(self, conversations: AsyncConversations) -> None:
self._conversations = conversations
self.create = async_to_streamed_response_wrapper(
conversations.create,
)
self.retrieve = async_to_streamed_response_wrapper(
conversations.retrieve,
)
self.update = async_to_streamed_response_wrapper(
conversations.update,
)
self.delete = async_to_streamed_response_wrapper(
conversations.delete,
)
@cached_property
def items(self) -> AsyncItemsWithStreamingResponse:
return AsyncItemsWithStreamingResponse(self._conversations.items)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/conversations/conversations.py",
"license": "Apache License 2.0",
"lines": 401,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/conversations/items.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Any, List, Iterable, cast
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.conversations import item_list_params, item_create_params, item_retrieve_params
from ...types.conversations.conversation import Conversation
from ...types.responses.response_includable import ResponseIncludable
from ...types.conversations.conversation_item import ConversationItem
from ...types.responses.response_input_item_param import ResponseInputItemParam
from ...types.conversations.conversation_item_list import ConversationItemList
__all__ = ["Items", "AsyncItems"]
class Items(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ItemsWithStreamingResponse(self)
def create(
self,
conversation_id: str,
*,
items: Iterable[ResponseInputItemParam],
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItemList:
"""
Create items in a conversation with the given ID.
Args:
items: The items to add to the conversation. You may add up to 20 items at a time.
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._post(
f"/conversations/{conversation_id}/items",
body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_create_params.ItemCreateParams),
),
cast_to=ConversationItemList,
)
def retrieve(
self,
item_id: str,
*,
conversation_id: str,
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItem:
"""
Get a single item from a conversation with the given IDs.
Args:
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return cast(
ConversationItem,
self._get(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
),
cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
),
)
def list(
self,
conversation_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[ConversationItem]:
"""
List all items for a conversation with the given ID.
Args:
after: An item ID to list items after, used in pagination.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
f"/conversations/{conversation_id}/items",
page=SyncConversationCursorPage[ConversationItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
item_list_params.ItemListParams,
),
),
model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
)
def delete(
self,
item_id: str,
*,
conversation_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Delete an item from a conversation with the given IDs.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return self._delete(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
class AsyncItems(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncItemsWithStreamingResponse(self)
async def create(
self,
conversation_id: str,
*,
items: Iterable[ResponseInputItemParam],
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItemList:
"""
Create items in a conversation with the given ID.
Args:
items: The items to add to the conversation. You may add up to 20 items at a time.
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return await self._post(
f"/conversations/{conversation_id}/items",
body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, item_create_params.ItemCreateParams),
),
cast_to=ConversationItemList,
)
async def retrieve(
self,
item_id: str,
*,
conversation_id: str,
include: List[ResponseIncludable] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ConversationItem:
"""
Get a single item from a conversation with the given IDs.
Args:
include: Additional fields to include in the response. See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return cast(
ConversationItem,
await self._get(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
),
cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
),
)
def list(
self,
conversation_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ConversationItem, AsyncConversationCursorPage[ConversationItem]]:
"""
List all items for a conversation with the given ID.
Args:
after: An item ID to list items after, used in pagination.
include: Specify additional output data to include in the model response. Currently
supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
return self._get_api_list(
f"/conversations/{conversation_id}/items",
page=AsyncConversationCursorPage[ConversationItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
item_list_params.ItemListParams,
),
),
model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
)
async def delete(
self,
item_id: str,
*,
conversation_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Conversation:
"""
Delete an item from a conversation with the given IDs.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not conversation_id:
raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
if not item_id:
raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
return await self._delete(
f"/conversations/{conversation_id}/items/{item_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Conversation,
)
class ItemsWithRawResponse:
def __init__(self, items: Items) -> None:
self._items = items
self.create = _legacy_response.to_raw_response_wrapper(
items.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
items.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
items.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
items.delete,
)
class AsyncItemsWithRawResponse:
def __init__(self, items: AsyncItems) -> None:
self._items = items
self.create = _legacy_response.async_to_raw_response_wrapper(
items.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
items.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
items.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
items.delete,
)
class ItemsWithStreamingResponse:
def __init__(self, items: Items) -> None:
self._items = items
self.create = to_streamed_response_wrapper(
items.create,
)
self.retrieve = to_streamed_response_wrapper(
items.retrieve,
)
self.list = to_streamed_response_wrapper(
items.list,
)
self.delete = to_streamed_response_wrapper(
items.delete,
)
class AsyncItemsWithStreamingResponse:
def __init__(self, items: AsyncItems) -> None:
self._items = items
self.create = async_to_streamed_response_wrapper(
items.create,
)
self.retrieve = async_to_streamed_response_wrapper(
items.retrieve,
)
self.list = async_to_streamed_response_wrapper(
items.list,
)
self.delete = async_to_streamed_response_wrapper(
items.delete,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/conversations/items.py",
"license": "Apache License 2.0",
"lines": 472,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/conversations/computer_screenshot_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ComputerScreenshotContent"]
class ComputerScreenshotContent(BaseModel):
"""A screenshot of a computer."""
file_id: Optional[str] = None
"""The identifier of an uploaded file that contains the screenshot."""
image_url: Optional[str] = None
"""The URL of the screenshot image."""
type: Literal["computer_screenshot"]
"""Specifies the event type.
For a computer screenshot, this property is always set to `computer_screenshot`.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/computer_screenshot_content.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/conversation.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["Conversation"]
class Conversation(BaseModel):
id: str
"""The unique ID of the conversation."""
created_at: int
"""
The time at which the conversation was created, measured in seconds since the
Unix epoch.
"""
metadata: object
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard. Keys are
strings with a maximum length of 64 characters. Values are strings with a
maximum length of 512 characters.
"""
object: Literal["conversation"]
"""The object type, which is always `conversation`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/conversation.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/conversations/conversation_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable, Optional
from typing_extensions import TypedDict
from ..shared_params.metadata import Metadata
from ..responses.response_input_item_param import ResponseInputItemParam
__all__ = ["ConversationCreateParams"]
class ConversationCreateParams(TypedDict, total=False):
items: Optional[Iterable[ResponseInputItemParam]]
"""Initial items to include in the conversation context.
You may add up to 20 items at a time.
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/conversation_create_params.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/conversations/conversation_deleted_resource.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationDeletedResource"]
class ConversationDeletedResource(BaseModel):
id: str
deleted: bool
object: Literal["conversation.deleted"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/conversation_deleted_resource.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/conversation_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from .message import Message
from ..._utils import PropertyInfo
from ..._models import BaseModel
from ..responses.response_reasoning_item import ResponseReasoningItem
from ..responses.response_custom_tool_call import ResponseCustomToolCall
from ..responses.response_computer_tool_call import ResponseComputerToolCall
from ..responses.response_function_web_search import ResponseFunctionWebSearch
from ..responses.response_apply_patch_tool_call import ResponseApplyPatchToolCall
from ..responses.response_file_search_tool_call import ResponseFileSearchToolCall
from ..responses.response_custom_tool_call_output import ResponseCustomToolCallOutput
from ..responses.response_function_tool_call_item import ResponseFunctionToolCallItem
from ..responses.response_function_shell_tool_call import ResponseFunctionShellToolCall
from ..responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
from ..responses.response_apply_patch_tool_call_output import ResponseApplyPatchToolCallOutput
from ..responses.response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
from ..responses.response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
from ..responses.response_function_shell_tool_call_output import ResponseFunctionShellToolCallOutput
__all__ = [
"ConversationItem",
"ImageGenerationCall",
"LocalShellCall",
"LocalShellCallAction",
"LocalShellCallOutput",
"McpListTools",
"McpListToolsTool",
"McpApprovalRequest",
"McpApprovalResponse",
"McpCall",
]
class ImageGenerationCall(BaseModel):
"""An image generation request made by the model."""
id: str
"""The unique ID of the image generation call."""
result: Optional[str] = None
"""The generated image encoded in base64."""
status: Literal["in_progress", "completed", "generating", "failed"]
"""The status of the image generation call."""
type: Literal["image_generation_call"]
"""The type of the image generation call. Always `image_generation_call`."""
class LocalShellCallAction(BaseModel):
"""Execute a shell command on the server."""
command: List[str]
"""The command to run."""
env: Dict[str, str]
"""Environment variables to set for the command."""
type: Literal["exec"]
"""The type of the local shell action. Always `exec`."""
timeout_ms: Optional[int] = None
"""Optional timeout in milliseconds for the command."""
user: Optional[str] = None
"""Optional user to run the command as."""
working_directory: Optional[str] = None
"""Optional working directory to run the command in."""
class LocalShellCall(BaseModel):
"""A tool call to run a command on the local shell."""
id: str
"""The unique ID of the local shell call."""
action: LocalShellCallAction
"""Execute a shell command on the server."""
call_id: str
"""The unique ID of the local shell tool call generated by the model."""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the local shell call."""
type: Literal["local_shell_call"]
"""The type of the local shell call. Always `local_shell_call`."""
class LocalShellCallOutput(BaseModel):
"""The output of a local shell tool call."""
id: str
"""The unique ID of the local shell tool call generated by the model."""
output: str
"""A JSON string of the output of the local shell tool call."""
type: Literal["local_shell_call_output"]
"""The type of the local shell tool call output. Always `local_shell_call_output`."""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
class McpListToolsTool(BaseModel):
"""A tool available on an MCP server."""
input_schema: object
"""The JSON schema describing the tool's input."""
name: str
"""The name of the tool."""
annotations: Optional[object] = None
"""Additional annotations about the tool."""
description: Optional[str] = None
"""The description of the tool."""
class McpListTools(BaseModel):
"""A list of tools available on an MCP server."""
id: str
"""The unique ID of the list."""
server_label: str
"""The label of the MCP server."""
tools: List[McpListToolsTool]
"""The tools available on the server."""
type: Literal["mcp_list_tools"]
"""The type of the item. Always `mcp_list_tools`."""
error: Optional[str] = None
"""Error message if the server could not list tools."""
class McpApprovalRequest(BaseModel):
"""A request for human approval of a tool invocation."""
id: str
"""The unique ID of the approval request."""
arguments: str
"""A JSON string of arguments for the tool."""
name: str
"""The name of the tool to run."""
server_label: str
"""The label of the MCP server making the request."""
type: Literal["mcp_approval_request"]
"""The type of the item. Always `mcp_approval_request`."""
class McpApprovalResponse(BaseModel):
"""A response to an MCP approval request."""
id: str
"""The unique ID of the approval response"""
approval_request_id: str
"""The ID of the approval request being answered."""
approve: bool
"""Whether the request was approved."""
type: Literal["mcp_approval_response"]
"""The type of the item. Always `mcp_approval_response`."""
reason: Optional[str] = None
"""Optional reason for the decision."""
class McpCall(BaseModel):
"""An invocation of a tool on an MCP server."""
id: str
"""The unique ID of the tool call."""
arguments: str
"""A JSON string of the arguments passed to the tool."""
name: str
"""The name of the tool that was run."""
server_label: str
"""The label of the MCP server running the tool."""
type: Literal["mcp_call"]
"""The type of the item. Always `mcp_call`."""
approval_request_id: Optional[str] = None
"""
Unique identifier for the MCP tool call approval request. Include this value in
a subsequent `mcp_approval_response` input to approve or reject the
corresponding tool call.
"""
error: Optional[str] = None
"""The error from the tool call, if any."""
output: Optional[str] = None
"""The output from the tool call."""
status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None
"""The status of the tool call.
One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`.
"""
ConversationItem: TypeAlias = Annotated[
Union[
Message,
ResponseFunctionToolCallItem,
ResponseFunctionToolCallOutputItem,
ResponseFileSearchToolCall,
ResponseFunctionWebSearch,
ImageGenerationCall,
ResponseComputerToolCall,
ResponseComputerToolCallOutputItem,
ResponseReasoningItem,
ResponseCodeInterpreterToolCall,
LocalShellCall,
LocalShellCallOutput,
ResponseFunctionShellToolCall,
ResponseFunctionShellToolCallOutput,
ResponseApplyPatchToolCall,
ResponseApplyPatchToolCallOutput,
McpListTools,
McpApprovalRequest,
McpApprovalResponse,
McpCall,
ResponseCustomToolCall,
ResponseCustomToolCallOutput,
],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/conversation_item.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/conversation_item_list.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List
from typing_extensions import Literal
from ..._models import BaseModel
from .conversation_item import ConversationItem
__all__ = ["ConversationItemList"]
class ConversationItemList(BaseModel):
"""A list of Conversation items."""
data: List[ConversationItem]
"""A list of conversation items."""
first_id: str
"""The ID of the first item in the list."""
has_more: bool
"""Whether there are more items available."""
last_id: str
"""The ID of the last item in the list."""
object: Literal["list"]
"""The type of object returned, must be `list`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/conversation_item_list.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/conversation_update_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Required, TypedDict
from ..shared_params.metadata import Metadata
__all__ = ["ConversationUpdateParams"]
class ConversationUpdateParams(TypedDict, total=False):
metadata: Required[Optional[Metadata]]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/conversation_update_params.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/conversations/item_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Iterable
from typing_extensions import Required, TypedDict
from ..responses.response_includable import ResponseIncludable
from ..responses.response_input_item_param import ResponseInputItemParam
__all__ = ["ItemCreateParams"]
class ItemCreateParams(TypedDict, total=False):
items: Required[Iterable[ResponseInputItemParam]]
"""The items to add to the conversation. You may add up to 20 items at a time."""
include: List[ResponseIncludable]
"""Additional fields to include in the response.
See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/item_create_params.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/item_list_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List
from typing_extensions import Literal, TypedDict
from ..responses.response_includable import ResponseIncludable
__all__ = ["ItemListParams"]
class ItemListParams(TypedDict, total=False):
after: str
"""An item ID to list items after, used in pagination."""
include: List[ResponseIncludable]
"""Specify additional output data to include in the model response.
Currently supported values are:
- `web_search_call.action.sources`: Include the sources of the web search tool
call.
- `code_interpreter_call.outputs`: Includes the outputs of python code execution
in code interpreter tool call items.
- `computer_call_output.output.image_url`: Include image urls from the computer
call output.
- `file_search_call.results`: Include the search results of the file search tool
call.
- `message.input_image.image_url`: Include image urls from the input message.
- `message.output_text.logprobs`: Include logprobs with assistant messages.
- `reasoning.encrypted_content`: Includes an encrypted version of reasoning
tokens in reasoning item outputs. This enables reasoning items to be used in
multi-turn conversations when using the Responses API statelessly (like when
the `store` parameter is set to `false`, or when an organization is enrolled
in the zero data retention program).
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/item_list_params.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/conversations/item_retrieve_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List
from typing_extensions import Required, TypedDict
from ..responses.response_includable import ResponseIncludable
__all__ = ["ItemRetrieveParams"]
class ItemRetrieveParams(TypedDict, total=False):
conversation_id: Required[str]
include: List[ResponseIncludable]
"""Additional fields to include in the response.
See the `include` parameter for
[listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
for more information.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/item_retrieve_params.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/message.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .text_content import TextContent
from .summary_text_content import SummaryTextContent
from .computer_screenshot_content import ComputerScreenshotContent
from ..responses.response_input_file import ResponseInputFile
from ..responses.response_input_text import ResponseInputText
from ..responses.response_input_image import ResponseInputImage
from ..responses.response_output_text import ResponseOutputText
from ..responses.response_output_refusal import ResponseOutputRefusal
__all__ = ["Message", "Content", "ContentReasoningText"]
class ContentReasoningText(BaseModel):
"""Reasoning text from the model."""
text: str
"""The reasoning text from the model."""
type: Literal["reasoning_text"]
"""The type of the reasoning text. Always `reasoning_text`."""
Content: TypeAlias = Annotated[
Union[
ResponseInputText,
ResponseOutputText,
TextContent,
SummaryTextContent,
ContentReasoningText,
ResponseOutputRefusal,
ResponseInputImage,
ComputerScreenshotContent,
ResponseInputFile,
],
PropertyInfo(discriminator="type"),
]
class Message(BaseModel):
"""A message to or from the model."""
id: str
"""The unique ID of the message."""
content: List[Content]
"""The content of the message"""
role: Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"]
"""The role of the message.
One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`,
`developer`, or `tool`.
"""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
type: Literal["message"]
"""The type of the message. Always set to `message`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/message.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/summary_text_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["SummaryTextContent"]
class SummaryTextContent(BaseModel):
"""A summary text from the model."""
text: str
"""A summary of the reasoning output from the model so far."""
type: Literal["summary_text"]
"""The type of the object. Always `summary_text`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/summary_text_content.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/conversations/text_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["TextContent"]
class TextContent(BaseModel):
"""A text content."""
text: str
type: Literal["text"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/conversations/text_content.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_conversation_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from ..._models import BaseModel
__all__ = ["ResponseConversationParam"]
class ResponseConversationParam(BaseModel):
"""The conversation that this response belongs to."""
id: str
"""The unique ID of the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_conversation_param.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/api_resources/conversations/test_items.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from openai.types.conversations import (
Conversation,
ConversationItem,
ConversationItemList,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestItems:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
item = client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
item = client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"phase": "commentary",
"type": "message",
}
],
include=["file_search_call.results"],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.create(
conversation_id="",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
item = client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
item = client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
include=["file_search_call.results"],
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
client.conversations.items.with_raw_response.retrieve(
item_id="",
conversation_id="conv_123",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
item = client.conversations.items.list(
conversation_id="conv_123",
)
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
item = client.conversations.items.list(
conversation_id="conv_123",
after="after",
include=["file_search_call.results"],
limit=0,
order="asc",
)
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.list(
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.list(
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.list(
conversation_id="",
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
item = client.conversations.items.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(Conversation, item, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(Conversation, item, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.conversations.items.with_streaming_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(Conversation, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
client.conversations.items.with_raw_response.delete(
item_id="",
conversation_id="conv_123",
)
class TestAsyncItems:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"phase": "commentary",
"type": "message",
}
],
include=["file_search_call.results"],
)
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.create(
conversation_id="conv_123",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(ConversationItemList, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.create(
conversation_id="",
items=[
{
"content": "string",
"role": "user",
"type": "message",
}
],
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
include=["file_search_call.results"],
)
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.retrieve(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(ConversationItem, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.retrieve(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
await async_client.conversations.items.with_raw_response.retrieve(
item_id="",
conversation_id="conv_123",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.list(
conversation_id="conv_123",
)
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.list(
conversation_id="conv_123",
after="after",
include=["file_search_call.results"],
limit=0,
order="asc",
)
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.list(
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.list(
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.list(
conversation_id="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
item = await async_client.conversations.items.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert_matches_type(Conversation, item, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = response.parse()
assert_matches_type(Conversation, item, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.items.with_streaming_response.delete(
item_id="msg_abc",
conversation_id="conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
item = await response.parse()
assert_matches_type(Conversation, item, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.items.with_raw_response.delete(
item_id="msg_abc",
conversation_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
await async_client.conversations.items.with_raw_response.delete(
item_id="",
conversation_id="conv_123",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/conversations/test_items.py",
"license": "Apache License 2.0",
"lines": 426,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/test_conversations.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types.conversations import (
Conversation,
ConversationDeletedResource,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestConversations:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
conversation = client.conversations.create()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
conversation = client.conversations.create(
items=[
{
"content": "string",
"role": "user",
"phase": "commentary",
"type": "message",
}
],
metadata={"foo": "string"},
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.conversations.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.conversations.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
conversation = client.conversations.retrieve(
"conv_123",
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.conversations.with_raw_response.retrieve(
"conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.conversations.with_streaming_response.retrieve(
"conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_update(self, client: OpenAI) -> None:
conversation = client.conversations.update(
conversation_id="conv_123",
metadata={"foo": "string"},
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
response = client.conversations.with_raw_response.update(
conversation_id="conv_123",
metadata={"foo": "string"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
with client.conversations.with_streaming_response.update(
conversation_id="conv_123",
metadata={"foo": "string"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_update(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.with_raw_response.update(
conversation_id="",
metadata={"foo": "string"},
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
conversation = client.conversations.delete(
"conv_123",
)
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.conversations.with_raw_response.delete(
"conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.conversations.with_streaming_response.delete(
"conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
client.conversations.with_raw_response.delete(
"",
)
class TestAsyncConversations:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.create()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.create(
items=[
{
"content": "string",
"role": "user",
"phase": "commentary",
"type": "message",
}
],
metadata={"foo": "string"},
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.retrieve(
"conv_123",
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.retrieve(
"conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.retrieve(
"conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.update(
conversation_id="conv_123",
metadata={"foo": "string"},
)
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.update(
conversation_id="conv_123",
metadata={"foo": "string"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.update(
conversation_id="conv_123",
metadata={"foo": "string"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(Conversation, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.update(
conversation_id="",
metadata={"foo": "string"},
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
conversation = await async_client.conversations.delete(
"conv_123",
)
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.conversations.with_raw_response.delete(
"conv_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = response.parse()
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.conversations.with_streaming_response.delete(
"conv_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
conversation = await response.parse()
assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
await async_client.conversations.with_raw_response.delete(
"",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/test_conversations.py",
"license": "Apache License 2.0",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:scripts/detect-breaking-changes.py | from __future__ import annotations
import sys
from typing import Iterator
from pathlib import Path
import rich
import griffe
from rich.text import Text
from rich.style import Style
def public_members(obj: griffe.Object | griffe.Alias) -> dict[str, griffe.Object | griffe.Alias]:
if isinstance(obj, griffe.Alias):
# ignore imports for now, they're technically part of the public API
# but we don't have good preventative measures in place to prevent
# changing them
return {}
return {name: value for name, value in obj.all_members.items() if not name.startswith("_")}
def find_breaking_changes(
new_obj: griffe.Object | griffe.Alias,
old_obj: griffe.Object | griffe.Alias,
*,
path: list[str],
) -> Iterator[Text | str]:
new_members = public_members(new_obj)
old_members = public_members(old_obj)
for name, old_member in old_members.items():
if isinstance(old_member, griffe.Alias) and len(path) > 2:
# ignore imports in `/types/` for now, they're technically part of the public API
# but we don't have good preventative measures in place to prevent changing them
continue
new_member = new_members.get(name)
if new_member is None:
cls_name = old_member.__class__.__name__
yield Text(f"({cls_name})", style=Style(color="rgb(119, 119, 119)"))
yield from [" " for _ in range(10 - len(cls_name))]
yield f" {'.'.join(path)}.{name}"
yield "\n"
continue
yield from find_breaking_changes(new_member, old_member, path=[*path, name])
def main() -> None:
try:
against_ref = sys.argv[1]
except IndexError as err:
raise RuntimeError("You must specify a base ref to run breaking change detection against") from err
package = griffe.load(
"openai",
search_paths=[Path(__file__).parent.parent.joinpath("src")],
)
old_package = griffe.load_git(
"openai",
ref=against_ref,
search_paths=["src"],
)
assert isinstance(package, griffe.Module)
assert isinstance(old_package, griffe.Module)
output = list(find_breaking_changes(package, old_package, path=["openai"]))
if output:
rich.print(Text("Breaking changes detected!", style=Style(color="rgb(165, 79, 87)")))
rich.print()
for text in output:
rich.print(text, end="")
sys.exit(1)
main()
| {
"repo_id": "openai/openai-python",
"file_path": "scripts/detect-breaking-changes.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:tests/lib/snapshots.py | from __future__ import annotations
import os
import json
from typing import Any, Callable, Awaitable
from typing_extensions import TypeVar
import httpx
from respx import MockRouter
from inline_snapshot import get_snapshot_value
from openai import OpenAI, AsyncOpenAI
_T = TypeVar("_T")
def make_snapshot_request(
func: Callable[[OpenAI], _T],
*,
content_snapshot: Any,
respx_mock: MockRouter,
mock_client: OpenAI,
path: str,
) -> _T:
live = os.environ.get("OPENAI_LIVE") == "1"
if live:
def _on_response(response: httpx.Response) -> None:
# update the content snapshot
assert json.dumps(json.loads(response.read())) == content_snapshot
respx_mock.stop()
client = OpenAI(
http_client=httpx.Client(
event_hooks={
"response": [_on_response],
}
)
)
else:
respx_mock.post(path).mock(
return_value=httpx.Response(
200,
content=get_snapshot_value(content_snapshot),
headers={"content-type": "application/json"},
)
)
client = mock_client
result = func(client)
if live:
client.close()
return result
async def make_async_snapshot_request(
func: Callable[[AsyncOpenAI], Awaitable[_T]],
*,
content_snapshot: Any,
respx_mock: MockRouter,
mock_client: AsyncOpenAI,
path: str,
) -> _T:
live = os.environ.get("OPENAI_LIVE") == "1"
if live:
async def _on_response(response: httpx.Response) -> None:
# update the content snapshot
assert json.dumps(json.loads(await response.aread())) == content_snapshot
respx_mock.stop()
client = AsyncOpenAI(
http_client=httpx.AsyncClient(
event_hooks={
"response": [_on_response],
}
)
)
else:
respx_mock.post(path).mock(
return_value=httpx.Response(
200,
content=get_snapshot_value(content_snapshot),
headers={"content-type": "application/json"},
)
)
client = mock_client
result = await func(client)
if live:
await client.close()
return result
| {
"repo_id": "openai/openai-python",
"file_path": "tests/lib/snapshots.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/compat/test_tool_param.py | from openai.types.chat import ChatCompletionToolParam
def test_tool_param_can_be_instantiated() -> None:
assert ChatCompletionToolParam(type="function", function={"name": "test"}) == {
"function": {"name": "test"},
"type": "function",
}
| {
"repo_id": "openai/openai-python",
"file_path": "tests/compat/test_tool_param.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/types/chat/chat_completion_allowed_tool_choice_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
from .chat_completion_allowed_tools_param import ChatCompletionAllowedToolsParam
__all__ = ["ChatCompletionAllowedToolChoiceParam"]
class ChatCompletionAllowedToolChoiceParam(TypedDict, total=False):
"""Constrains the tools available to the model to a pre-defined set."""
allowed_tools: Required[ChatCompletionAllowedToolsParam]
"""Constrains the tools available to the model to a pre-defined set."""
type: Required[Literal["allowed_tools"]]
"""Allowed tool configuration type. Always `allowed_tools`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_allowed_tool_choice_param.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/chat/chat_completion_allowed_tools_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Iterable
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ChatCompletionAllowedToolsParam"]
class ChatCompletionAllowedToolsParam(TypedDict, total=False):
"""Constrains the tools available to the model to a pre-defined set."""
mode: Required[Literal["auto", "required"]]
"""Constrains the tools available to the model to a pre-defined set.
`auto` allows the model to pick from among the allowed tools and generate a
message.
`required` requires the model to call one or more of the allowed tools.
"""
tools: Required[Iterable[Dict[str, object]]]
"""A list of tool definitions that the model should be allowed to call.
For the Chat Completions API, the list of tool definitions might look like:
```json
[
{ "type": "function", "function": { "name": "get_weather" } },
{ "type": "function", "function": { "name": "get_time" } }
]
```
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_allowed_tools_param.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/chat/chat_completion_custom_tool_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, Required, TypeAlias, TypedDict
__all__ = [
"ChatCompletionCustomToolParam",
"Custom",
"CustomFormat",
"CustomFormatText",
"CustomFormatGrammar",
"CustomFormatGrammarGrammar",
]
class CustomFormatText(TypedDict, total=False):
"""Unconstrained free-form text."""
type: Required[Literal["text"]]
"""Unconstrained text format. Always `text`."""
class CustomFormatGrammarGrammar(TypedDict, total=False):
"""Your chosen grammar."""
definition: Required[str]
"""The grammar definition."""
syntax: Required[Literal["lark", "regex"]]
"""The syntax of the grammar definition. One of `lark` or `regex`."""
class CustomFormatGrammar(TypedDict, total=False):
"""A grammar defined by the user."""
grammar: Required[CustomFormatGrammarGrammar]
"""Your chosen grammar."""
type: Required[Literal["grammar"]]
"""Grammar format. Always `grammar`."""
CustomFormat: TypeAlias = Union[CustomFormatText, CustomFormatGrammar]
class Custom(TypedDict, total=False):
"""Properties of the custom tool."""
name: Required[str]
"""The name of the custom tool, used to identify it in tool calls."""
description: str
"""Optional description of the custom tool, used to provide more context."""
format: CustomFormat
"""The input format for the custom tool. Default is unconstrained text."""
class ChatCompletionCustomToolParam(TypedDict, total=False):
"""A custom tool that processes input using a specified format."""
custom: Required[Custom]
"""Properties of the custom tool."""
type: Required[Literal["custom"]]
"""The type of the custom tool. Always `custom`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_custom_tool_param.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/chat/chat_completion_function_tool_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
from ..shared_params.function_definition import FunctionDefinition
__all__ = ["ChatCompletionFunctionToolParam"]
class ChatCompletionFunctionToolParam(TypedDict, total=False):
"""A function tool that can be used to generate a response."""
function: Required[FunctionDefinition]
type: Required[Literal["function"]]
"""The type of the tool. Currently, only `function` is supported."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_function_tool_param.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/chat/chat_completion_message_custom_tool_call.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ChatCompletionMessageCustomToolCall", "Custom"]
class Custom(BaseModel):
"""The custom tool that the model called."""
input: str
"""The input for the custom tool call generated by the model."""
name: str
"""The name of the custom tool to call."""
class ChatCompletionMessageCustomToolCall(BaseModel):
"""A call to a custom tool created by the model."""
id: str
"""The ID of the tool call."""
custom: Custom
"""The custom tool that the model called."""
type: Literal["custom"]
"""The type of the tool. Always `custom`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_message_custom_tool_call.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/chat/chat_completion_message_custom_tool_call_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ChatCompletionMessageCustomToolCallParam", "Custom"]
class Custom(TypedDict, total=False):
"""The custom tool that the model called."""
input: Required[str]
"""The input for the custom tool call generated by the model."""
name: Required[str]
"""The name of the custom tool to call."""
class ChatCompletionMessageCustomToolCallParam(TypedDict, total=False):
"""A call to a custom tool created by the model."""
id: Required[str]
"""The ID of the tool call."""
custom: Required[Custom]
"""The custom tool that the model called."""
type: Required[Literal["custom"]]
"""The type of the tool. Always `custom`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_message_custom_tool_call_param.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/chat/chat_completion_message_function_tool_call.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ChatCompletionMessageFunctionToolCall", "Function"]
class Function(BaseModel):
"""The function that the model called."""
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
class ChatCompletionMessageFunctionToolCall(BaseModel):
"""A call to a function tool created by the model."""
id: str
"""The ID of the tool call."""
function: Function
"""The function that the model called."""
type: Literal["function"]
"""The type of the tool. Currently, only `function` is supported."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_message_function_tool_call.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/chat/chat_completion_message_function_tool_call_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ChatCompletionMessageFunctionToolCallParam", "Function"]
class Function(TypedDict, total=False):
"""The function that the model called."""
arguments: Required[str]
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: Required[str]
"""The name of the function to call."""
class ChatCompletionMessageFunctionToolCallParam(TypedDict, total=False):
"""A call to a function tool created by the model."""
id: Required[str]
"""The ID of the tool call."""
function: Required[Function]
"""The function that the model called."""
type: Required[Literal["function"]]
"""The type of the tool. Currently, only `function` is supported."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_message_function_tool_call_param.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ChatCompletionNamedToolChoiceCustomParam", "Custom"]
class Custom(TypedDict, total=False):
name: Required[str]
"""The name of the custom tool to call."""
class ChatCompletionNamedToolChoiceCustomParam(TypedDict, total=False):
"""Specifies a tool the model should use.
Use to force the model to call a specific custom tool.
"""
custom: Required[Custom]
type: Required[Literal["custom"]]
"""For custom tool calling, the type is always `custom`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/custom_tool.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
from ..shared.custom_tool_input_format import CustomToolInputFormat
__all__ = ["CustomTool"]
class CustomTool(BaseModel):
"""A custom tool that processes input using a specified format.
Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
"""
name: str
"""The name of the custom tool, used to identify it in tool calls."""
type: Literal["custom"]
"""The type of the custom tool. Always `custom`."""
description: Optional[str] = None
"""Optional description of the custom tool, used to provide more context."""
format: Optional[CustomToolInputFormat] = None
"""The input format for the custom tool. Default is unconstrained text."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/custom_tool.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/custom_tool_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
from ..shared_params.custom_tool_input_format import CustomToolInputFormat
__all__ = ["CustomToolParam"]
class CustomToolParam(TypedDict, total=False):
"""A custom tool that processes input using a specified format.
Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
"""
name: Required[str]
"""The name of the custom tool, used to identify it in tool calls."""
type: Required[Literal["custom"]]
"""The type of the custom tool. Always `custom`."""
description: str
"""Optional description of the custom tool, used to provide more context."""
format: CustomToolInputFormat
"""The input format for the custom tool. Default is unconstrained text."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/custom_tool_param.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_custom_tool_call.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCustomToolCall"]
class ResponseCustomToolCall(BaseModel):
"""A call to a custom tool created by the model."""
call_id: str
"""An identifier used to map this custom tool call to a tool call output."""
input: str
"""The input for the custom tool call generated by the model."""
name: str
"""The name of the custom tool being called."""
type: Literal["custom_tool_call"]
"""The type of the custom tool call. Always `custom_tool_call`."""
id: Optional[str] = None
"""The unique ID of the custom tool call in the OpenAI platform."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_custom_tool_call.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_custom_tool_call_input_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCustomToolCallInputDeltaEvent"]
class ResponseCustomToolCallInputDeltaEvent(BaseModel):
"""Event representing a delta (partial update) to the input of a custom tool call."""
delta: str
"""The incremental input data (delta) for the custom tool call."""
item_id: str
"""Unique identifier for the API item associated with this event."""
output_index: int
"""The index of the output this delta applies to."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.custom_tool_call_input.delta"]
"""The event type identifier."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_custom_tool_call_input_delta_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_custom_tool_call_input_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCustomToolCallInputDoneEvent"]
class ResponseCustomToolCallInputDoneEvent(BaseModel):
"""Event indicating that input for a custom tool call is complete."""
input: str
"""The complete input data for the custom tool call."""
item_id: str
"""Unique identifier for the API item associated with this event."""
output_index: int
"""The index of the output this event applies to."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.custom_tool_call_input.done"]
"""The event type identifier."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_custom_tool_call_input_done_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.