sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
openai/openai-python:src/openai/types/responses/response_input_file_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseInputFileContent"]
class ResponseInputFileContent(BaseModel):
"""A file input to the model."""
type: Literal["input_file"]
"""The type of the input item. Always `input_file`."""
file_data: Optional[str] = None
"""The base64-encoded data of the file to be sent to the model."""
file_id: Optional[str] = None
"""The ID of the file to be sent to the model."""
file_url: Optional[str] = None
"""The URL of the file to be sent to the model."""
filename: Optional[str] = None
"""The name of the file to be sent to the model."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_file_content.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_input_file_content_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseInputFileContentParam"]
class ResponseInputFileContentParam(TypedDict, total=False):
"""A file input to the model."""
type: Required[Literal["input_file"]]
"""The type of the input item. Always `input_file`."""
file_data: Optional[str]
"""The base64-encoded data of the file to be sent to the model."""
file_id: Optional[str]
"""The ID of the file to be sent to the model."""
file_url: Optional[str]
"""The URL of the file to be sent to the model."""
filename: Optional[str]
"""The name of the file to be sent to the model."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_file_content_param.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_input_image_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseInputImageContent"]
class ResponseInputImageContent(BaseModel):
"""An image input to the model.
Learn about [image inputs](https://platform.openai.com/docs/guides/vision)
"""
type: Literal["input_image"]
"""The type of the input item. Always `input_image`."""
detail: Optional[Literal["low", "high", "auto"]] = None
"""The detail level of the image to be sent to the model.
One of `high`, `low`, or `auto`. Defaults to `auto`.
"""
file_id: Optional[str] = None
"""The ID of the file to be sent to the model."""
image_url: Optional[str] = None
"""The URL of the image to be sent to the model.
A fully qualified URL or base64 encoded image in a data URL.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_image_content.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_input_image_content_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseInputImageContentParam"]
class ResponseInputImageContentParam(TypedDict, total=False):
"""An image input to the model.
Learn about [image inputs](https://platform.openai.com/docs/guides/vision)
"""
type: Required[Literal["input_image"]]
"""The type of the input item. Always `input_image`."""
detail: Optional[Literal["low", "high", "auto"]]
"""The detail level of the image to be sent to the model.
One of `high`, `low`, or `auto`. Defaults to `auto`.
"""
file_id: Optional[str]
"""The ID of the file to be sent to the model."""
image_url: Optional[str]
"""The URL of the image to be sent to the model.
A fully qualified URL or base64 encoded image in a data URL.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_image_content_param.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_input_text_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseInputTextContent"]
class ResponseInputTextContent(BaseModel):
"""A text input to the model."""
text: str
"""The text input to the model."""
type: Literal["input_text"]
"""The type of the input item. Always `input_text`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_text_content.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_input_text_content_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseInputTextContentParam"]
class ResponseInputTextContentParam(TypedDict, total=False):
"""A text input to the model."""
text: Required[str]
"""The text input to the model."""
type: Required[Literal["input_text"]]
"""The type of the input item. Always `input_text`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_text_content_param.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/audio_transcription.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["AudioTranscription"]
class AudioTranscription(BaseModel):
language: Optional[str] = None
"""The language of the input audio.
Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
"""
model: Union[
str,
Literal[
"whisper-1",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-transcribe-2025-12-15",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
],
None,
] = None
"""The model to use for transcription.
Current options are `whisper-1`, `gpt-4o-mini-transcribe`,
`gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and
`gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need
diarization with speaker labels.
"""
prompt: Optional[str] = None
"""
An optional text to guide the model's style or continue a previous audio
segment. For `whisper-1`, the
[prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the
prompt is a free text string, for example "expect words related to technology".
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/audio_transcription.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/audio_transcription_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypedDict
__all__ = ["AudioTranscriptionParam"]
class AudioTranscriptionParam(TypedDict, total=False):
language: str
"""The language of the input audio.
Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
"""
model: Union[
str,
Literal[
"whisper-1",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-transcribe-2025-12-15",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
],
]
"""The model to use for transcription.
Current options are `whisper-1`, `gpt-4o-mini-transcribe`,
`gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and
`gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need
diarization with speaker labels.
"""
prompt: str
"""
An optional text to guide the model's style or continue a previous audio
segment. For `whisper-1`, the
[prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the
prompt is a free text string, for example "expect words related to technology".
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/audio_transcription_param.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_audio_config_input.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .audio_transcription import AudioTranscription
from .noise_reduction_type import NoiseReductionType
from .realtime_audio_formats import RealtimeAudioFormats
from .realtime_audio_input_turn_detection import RealtimeAudioInputTurnDetection
__all__ = ["RealtimeAudioConfigInput", "NoiseReduction"]
class NoiseReduction(BaseModel):
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off.
Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model.
Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio.
"""
type: Optional[NoiseReductionType] = None
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
class RealtimeAudioConfigInput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the input audio."""
noise_reduction: Optional[NoiseReduction] = None
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off. Noise reduction filters audio added to
the input audio buffer before it is sent to VAD and the model. Filtering the
audio can improve VAD and turn detection accuracy (reducing false positives) and
model performance by improving perception of the input audio.
"""
transcription: Optional[AudioTranscription] = None
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
"""
turn_detection: Optional[RealtimeAudioInputTurnDetection] = None
"""Configuration for turn detection, ether Server VAD or Semantic VAD.
This can be set to `null` to turn off, in which case the client must manually
trigger model response.
Server VAD means that the model will detect the start and end of speech based on
audio volume and respond at the end of user speech.
Semantic VAD is more advanced and uses a turn detection model (in conjunction
with VAD) to semantically estimate whether the user has finished speaking, then
dynamically sets a timeout based on this probability. For example, if user audio
trails off with "uhhm", the model will score a low probability of turn end and
wait longer for the user to continue speaking. This can be useful for more
natural conversations, but may have a higher latency.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_config_input.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_audio_config_input_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import TypedDict
from .noise_reduction_type import NoiseReductionType
from .audio_transcription_param import AudioTranscriptionParam
from .realtime_audio_formats_param import RealtimeAudioFormatsParam
from .realtime_audio_input_turn_detection_param import RealtimeAudioInputTurnDetectionParam
__all__ = ["RealtimeAudioConfigInputParam", "NoiseReduction"]
class NoiseReduction(TypedDict, total=False):
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off.
Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model.
Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio.
"""
type: NoiseReductionType
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
class RealtimeAudioConfigInputParam(TypedDict, total=False):
format: RealtimeAudioFormatsParam
"""The format of the input audio."""
noise_reduction: NoiseReduction
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off. Noise reduction filters audio added to
the input audio buffer before it is sent to VAD and the model. Filtering the
audio can improve VAD and turn detection accuracy (reducing false positives) and
model performance by improving perception of the input audio.
"""
transcription: AudioTranscriptionParam
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
"""
turn_detection: Optional[RealtimeAudioInputTurnDetectionParam]
"""Configuration for turn detection, ether Server VAD or Semantic VAD.
This can be set to `null` to turn off, in which case the client must manually
trigger model response.
Server VAD means that the model will detect the start and end of speech based on
audio volume and respond at the end of user speech.
Semantic VAD is more advanced and uses a turn detection model (in conjunction
with VAD) to semantically estimate whether the user has finished speaking, then
dynamically sets a timeout based on this probability. For example, if user audio
trails off with "uhhm", the model will score a low probability of turn end and
wait longer for the user to continue speaking. This can be useful for more
natural conversations, but may have a higher latency.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_config_input_param.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_audio_config_output.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_audio_formats import RealtimeAudioFormats
__all__ = ["RealtimeAudioConfigOutput"]
class RealtimeAudioConfigOutput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the output audio."""
speed: Optional[float] = None
"""
The speed of the model's spoken response as a multiple of the original speed.
1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
This value can only be changed in between model turns, not while a response is
in progress.
This parameter is a post-processing adjustment to the audio after it is
generated, it's also possible to prompt the model to speak faster or slower.
"""
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None
] = None
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
`shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
session once the model has responded with audio at least once. We recommend
`marin` and `cedar` for best quality.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_config_output.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_audio_config_output_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypedDict
from .realtime_audio_formats_param import RealtimeAudioFormatsParam
__all__ = ["RealtimeAudioConfigOutputParam"]
class RealtimeAudioConfigOutputParam(TypedDict, total=False):
format: RealtimeAudioFormatsParam
"""The format of the output audio."""
speed: float
"""
The speed of the model's spoken response as a multiple of the original speed.
1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
This value can only be changed in between model turns, not while a response is
in progress.
This parameter is a post-processing adjustment to the audio after it is
generated, it's also possible to prompt the model to speak faster or slower.
"""
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
`shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
session once the model has responded with audio at least once. We recommend
`marin` and `cedar` for best quality.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_config_output_param.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_audio_formats.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = ["RealtimeAudioFormats", "AudioPCM", "AudioPCMU", "AudioPCMA"]
class AudioPCM(BaseModel):
"""The PCM audio format. Only a 24kHz sample rate is supported."""
rate: Optional[Literal[24000]] = None
"""The sample rate of the audio. Always `24000`."""
type: Optional[Literal["audio/pcm"]] = None
"""The audio format. Always `audio/pcm`."""
class AudioPCMU(BaseModel):
"""The G.711 μ-law format."""
type: Optional[Literal["audio/pcmu"]] = None
"""The audio format. Always `audio/pcmu`."""
class AudioPCMA(BaseModel):
"""The G.711 A-law format."""
type: Optional[Literal["audio/pcma"]] = None
"""The audio format. Always `audio/pcma`."""
RealtimeAudioFormats: TypeAlias = Annotated[Union[AudioPCM, AudioPCMU, AudioPCMA], PropertyInfo(discriminator="type")]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_formats.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_audio_formats_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypeAlias, TypedDict
__all__ = ["RealtimeAudioFormatsParam", "AudioPCM", "AudioPCMU", "AudioPCMA"]
class AudioPCM(TypedDict, total=False):
"""The PCM audio format. Only a 24kHz sample rate is supported."""
rate: Literal[24000]
"""The sample rate of the audio. Always `24000`."""
type: Literal["audio/pcm"]
"""The audio format. Always `audio/pcm`."""
class AudioPCMU(TypedDict, total=False):
"""The G.711 μ-law format."""
type: Literal["audio/pcmu"]
"""The audio format. Always `audio/pcmu`."""
class AudioPCMA(TypedDict, total=False):
"""The G.711 A-law format."""
type: Literal["audio/pcma"]
"""The audio format. Always `audio/pcma`."""
RealtimeAudioFormatsParam: TypeAlias = Union[AudioPCM, AudioPCMU, AudioPCMA]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_formats_param.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_audio_input_turn_detection.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = ["RealtimeAudioInputTurnDetection", "ServerVad", "SemanticVad"]
class ServerVad(BaseModel):
"""
Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence.
"""
type: Literal["server_vad"]
"""Type of turn detection, `server_vad` to turn on simple Server VAD."""
create_response: Optional[bool] = None
"""Whether or not to automatically generate a response when a VAD stop event
occurs.
If `interrupt_response` is set to `false` this may fail to create a response if
the model is already responding.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
idle_timeout_ms: Optional[int] = None
"""Optional timeout after which a model response will be triggered automatically.
This is useful for situations in which a long pause from the user is unexpected,
such as a phone call. The model will effectively prompt the user to continue the
conversation based on the current context.
The timeout value will be applied after the last model response's audio has
finished playing, i.e. it's set to the `response.done` time plus audio playback
duration.
An `input_audio_buffer.timeout_triggered` event (plus events associated with the
Response) will be emitted when the timeout is reached. Idle timeout is currently
only supported for `server_vad` mode.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt (cancel) any ongoing response with
output to the default conversation (i.e. `conversation` of `auto`) when a VAD
start event occurs. If `true` then the response will be cancelled, otherwise it
will continue until complete.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
prefix_padding_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
With shorter values the model will respond more quickly, but may jump in on
short pauses from the user.
"""
threshold: Optional[float] = None
"""Used only for `server_vad` mode.
Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
threshold will require louder audio to activate the model, and thus might
perform better in noisy environments.
"""
class SemanticVad(BaseModel):
"""
Server-side semantic turn detection which uses a model to determine when the user has finished speaking.
"""
type: Literal["semantic_vad"]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: Optional[bool] = None
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
RealtimeAudioInputTurnDetection: TypeAlias = Annotated[
Union[ServerVad, SemanticVad, None], PropertyInfo(discriminator="type")
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_input_turn_detection.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_audio_input_turn_detection_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
__all__ = ["RealtimeAudioInputTurnDetectionParam", "ServerVad", "SemanticVad"]
class ServerVad(TypedDict, total=False):
"""
Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence.
"""
type: Required[Literal["server_vad"]]
"""Type of turn detection, `server_vad` to turn on simple Server VAD."""
create_response: bool
"""Whether or not to automatically generate a response when a VAD stop event
occurs.
If `interrupt_response` is set to `false` this may fail to create a response if
the model is already responding.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
idle_timeout_ms: Optional[int]
"""Optional timeout after which a model response will be triggered automatically.
This is useful for situations in which a long pause from the user is unexpected,
such as a phone call. The model will effectively prompt the user to continue the
conversation based on the current context.
The timeout value will be applied after the last model response's audio has
finished playing, i.e. it's set to the `response.done` time plus audio playback
duration.
An `input_audio_buffer.timeout_triggered` event (plus events associated with the
Response) will be emitted when the timeout is reached. Idle timeout is currently
only supported for `server_vad` mode.
"""
interrupt_response: bool
"""
Whether or not to automatically interrupt (cancel) any ongoing response with
output to the default conversation (i.e. `conversation` of `auto`) when a VAD
start event occurs. If `true` then the response will be cancelled, otherwise it
will continue until complete.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
prefix_padding_ms: int
"""Used only for `server_vad` mode.
Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: int
"""Used only for `server_vad` mode.
Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
With shorter values the model will respond more quickly, but may jump in on
short pauses from the user.
"""
threshold: float
"""Used only for `server_vad` mode.
Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
threshold will require louder audio to activate the model, and thus might
perform better in noisy environments.
"""
class SemanticVad(TypedDict, total=False):
"""
Server-side semantic turn detection which uses a model to determine when the user has finished speaking.
"""
type: Required[Literal["semantic_vad"]]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: bool
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Literal["low", "medium", "high", "auto"]
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: bool
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
RealtimeAudioInputTurnDetectionParam: TypeAlias = Union[ServerVad, SemanticVad]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_input_turn_detection_param.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_create_audio_output.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_audio_formats import RealtimeAudioFormats
__all__ = ["RealtimeResponseCreateAudioOutput", "Output"]
class Output(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The format of the output audio."""
voice: Union[
str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"], None
] = None
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
`shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
session once the model has responded with audio at least once.
"""
class RealtimeResponseCreateAudioOutput(BaseModel):
"""Configuration for audio input and output."""
output: Optional[Output] = None
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_create_audio_output.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_response_create_audio_output_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypedDict
from .realtime_audio_formats_param import RealtimeAudioFormatsParam
__all__ = ["RealtimeResponseCreateAudioOutputParam", "Output"]
class Output(TypedDict, total=False):
format: RealtimeAudioFormatsParam
"""The format of the output audio."""
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]]
"""The voice the model uses to respond.
Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`,
`shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the
session once the model has responded with audio at least once.
"""
class RealtimeResponseCreateAudioOutputParam(TypedDict, total=False):
"""Configuration for audio input and output."""
output: Output
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_create_audio_output_param.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_response_create_mcp_tool.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, List, Union, Optional
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
__all__ = [
"RealtimeResponseCreateMcpTool",
"AllowedTools",
"AllowedToolsMcpToolFilter",
"RequireApproval",
"RequireApprovalMcpToolApprovalFilter",
"RequireApprovalMcpToolApprovalFilterAlways",
"RequireApprovalMcpToolApprovalFilterNever",
]
class AllowedToolsMcpToolFilter(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
AllowedTools: TypeAlias = Union[List[str], AllowedToolsMcpToolFilter, None]
class RequireApprovalMcpToolApprovalFilterAlways(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
class RequireApprovalMcpToolApprovalFilterNever(BaseModel):
"""A filter object to specify which tools are allowed."""
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
class RequireApprovalMcpToolApprovalFilter(BaseModel):
"""Specify which of the MCP server's tools require approval.
Can be
`always`, `never`, or a filter object associated with tools
that require approval.
"""
always: Optional[RequireApprovalMcpToolApprovalFilterAlways] = None
"""A filter object to specify which tools are allowed."""
never: Optional[RequireApprovalMcpToolApprovalFilterNever] = None
"""A filter object to specify which tools are allowed."""
RequireApproval: TypeAlias = Union[RequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None]
class RealtimeResponseCreateMcpTool(BaseModel):
"""
Give the model access to additional tools via remote Model Context Protocol
(MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
"""
server_label: str
"""A label for this MCP server, used to identify it in tool calls."""
type: Literal["mcp"]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[AllowedTools] = None
"""List of allowed tool names or a filter object."""
authorization: Optional[str] = None
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Optional[
Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
] = None
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[RequireApproval] = None
"""Specify which of the MCP server's tools require approval."""
server_description: Optional[str] = None
"""Optional description of the MCP server, used to provide more context."""
server_url: Optional[str] = None
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_create_mcp_tool.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_create_mcp_tool_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
__all__ = [
"RealtimeResponseCreateMcpToolParam",
"AllowedTools",
"AllowedToolsMcpToolFilter",
"RequireApproval",
"RequireApprovalMcpToolApprovalFilter",
"RequireApprovalMcpToolApprovalFilterAlways",
"RequireApprovalMcpToolApprovalFilterNever",
]
class AllowedToolsMcpToolFilter(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
AllowedTools: TypeAlias = Union[SequenceNotStr[str], AllowedToolsMcpToolFilter]
class RequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
class RequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False):
"""A filter object to specify which tools are allowed."""
read_only: bool
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: SequenceNotStr[str]
"""List of allowed tool names."""
class RequireApprovalMcpToolApprovalFilter(TypedDict, total=False):
"""Specify which of the MCP server's tools require approval.
Can be
`always`, `never`, or a filter object associated with tools
that require approval.
"""
always: RequireApprovalMcpToolApprovalFilterAlways
"""A filter object to specify which tools are allowed."""
never: RequireApprovalMcpToolApprovalFilterNever
"""A filter object to specify which tools are allowed."""
RequireApproval: TypeAlias = Union[RequireApprovalMcpToolApprovalFilter, Literal["always", "never"]]
class RealtimeResponseCreateMcpToolParam(TypedDict, total=False):
"""
Give the model access to additional tools via remote Model Context Protocol
(MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
"""
server_label: Required[str]
"""A label for this MCP server, used to identify it in tool calls."""
type: Required[Literal["mcp"]]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[AllowedTools]
"""List of allowed tool names or a filter object."""
authorization: str
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[RequireApproval]
"""Specify which of the MCP server's tools require approval."""
server_description: str
"""Optional description of the MCP server, used to provide more context."""
server_url: str
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_create_mcp_tool_param.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from ..shared.metadata import Metadata
from .conversation_item import ConversationItem
from .realtime_function_tool import RealtimeFunctionTool
from ..responses.response_prompt import ResponsePrompt
from ..responses.tool_choice_mcp import ToolChoiceMcp
from ..responses.tool_choice_options import ToolChoiceOptions
from ..responses.tool_choice_function import ToolChoiceFunction
from .realtime_response_create_mcp_tool import RealtimeResponseCreateMcpTool
from .realtime_response_create_audio_output import RealtimeResponseCreateAudioOutput
__all__ = ["RealtimeResponseCreateParams", "ToolChoice", "Tool"]
ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunction, ToolChoiceMcp]
Tool: TypeAlias = Union[RealtimeFunctionTool, RealtimeResponseCreateMcpTool]
class RealtimeResponseCreateParams(BaseModel):
"""Create a new Realtime response with these parameters"""
audio: Optional[RealtimeResponseCreateAudioOutput] = None
"""Configuration for audio input and output."""
conversation: Union[str, Literal["auto", "none"], None] = None
"""Controls which conversation the response is added to.
Currently supports `auto` and `none`, with `auto` as the default value. The
`auto` value means that the contents of the response will be added to the
default conversation. Set this to `none` to create an out-of-band response which
will not add items to default conversation.
"""
input: Optional[List[ConversationItem]] = None
"""Input items to include in the prompt for the model.
Using this field creates a new context for this Response instead of using the
default conversation. An empty array `[]` will clear the context for this
Response. Note that this can include references to items that previously
appeared in the session using their id.
"""
instructions: Optional[str] = None
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior. Note that the server sets default instructions which will be used if
this field is not set and are visible in the `session.created` event at the
start of the session.
"""
max_output_tokens: Union[int, Literal["inf"], None] = None
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
output_modalities: Optional[List[Literal["text", "audio"]]] = None
"""
The set of modalities the model used to respond, currently the only possible
values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text
transcript. Setting the output to mode `text` will disable audio output from the
model.
"""
prompt: Optional[ResponsePrompt] = None
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
tool_choice: Optional[ToolChoice] = None
"""How the model chooses tools.
Provide one of the string modes or force a specific function/MCP tool.
"""
tools: Optional[List[Tool]] = None
"""Tools available to the model."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_create_params.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_response_create_params_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Iterable, Optional
from typing_extensions import Literal, TypeAlias, TypedDict
from ..shared_params.metadata import Metadata
from .conversation_item_param import ConversationItemParam
from .realtime_function_tool_param import RealtimeFunctionToolParam
from ..responses.tool_choice_options import ToolChoiceOptions
from ..responses.response_prompt_param import ResponsePromptParam
from ..responses.tool_choice_mcp_param import ToolChoiceMcpParam
from ..responses.tool_choice_function_param import ToolChoiceFunctionParam
from .realtime_response_create_mcp_tool_param import RealtimeResponseCreateMcpToolParam
from .realtime_response_create_audio_output_param import RealtimeResponseCreateAudioOutputParam
__all__ = ["RealtimeResponseCreateParamsParam", "ToolChoice", "Tool"]
ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceFunctionParam, ToolChoiceMcpParam]
Tool: TypeAlias = Union[RealtimeFunctionToolParam, RealtimeResponseCreateMcpToolParam]
class RealtimeResponseCreateParamsParam(TypedDict, total=False):
"""Create a new Realtime response with these parameters"""
audio: RealtimeResponseCreateAudioOutputParam
"""Configuration for audio input and output."""
conversation: Union[str, Literal["auto", "none"]]
"""Controls which conversation the response is added to.
Currently supports `auto` and `none`, with `auto` as the default value. The
`auto` value means that the contents of the response will be added to the
default conversation. Set this to `none` to create an out-of-band response which
will not add items to default conversation.
"""
input: Iterable[ConversationItemParam]
"""Input items to include in the prompt for the model.
Using this field creates a new context for this Response instead of using the
default conversation. An empty array `[]` will clear the context for this
Response. Note that this can include references to items that previously
appeared in the session using their id.
"""
instructions: str
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior. Note that the server sets default instructions which will be used if
this field is not set and are visible in the `session.created` event at the
start of the session.
"""
max_output_tokens: Union[int, Literal["inf"]]
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
output_modalities: List[Literal["text", "audio"]]
"""
The set of modalities the model used to respond, currently the only possible
values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text
transcript. Setting the output to mode `text` will disable audio output from the
model.
"""
prompt: Optional[ResponsePromptParam]
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
tool_choice: ToolChoice
"""How the model chooses tools.
Provide one of the string modes or force a specific function/MCP tool.
"""
tools: Iterable[Tool]
"""Tools available to the model."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_response_create_params_param.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_session_client_secret.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from ..._models import BaseModel
__all__ = ["RealtimeSessionClientSecret"]
class RealtimeSessionClientSecret(BaseModel):
"""Ephemeral key returned by the API."""
expires_at: int
"""Timestamp for when the token expires.
Currently, all tokens expire after one minute.
"""
value: str
"""
Ephemeral key usable in client environments to authenticate connections to the
Realtime API. Use this in client-side environments rather than a standard API
token, which should only be used server-side.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_session_client_secret.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_audio.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .realtime_transcription_session_audio_input import RealtimeTranscriptionSessionAudioInput
__all__ = ["RealtimeTranscriptionSessionAudio"]
class RealtimeTranscriptionSessionAudio(BaseModel):
"""Configuration for input and output audio."""
input: Optional[RealtimeTranscriptionSessionAudioInput] = None
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_audio.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_audio_input.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .audio_transcription import AudioTranscription
from .noise_reduction_type import NoiseReductionType
from .realtime_audio_formats import RealtimeAudioFormats
from .realtime_transcription_session_audio_input_turn_detection import (
RealtimeTranscriptionSessionAudioInputTurnDetection,
)
__all__ = ["RealtimeTranscriptionSessionAudioInput", "NoiseReduction"]
class NoiseReduction(BaseModel):
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off.
Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model.
Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio.
"""
type: Optional[NoiseReductionType] = None
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
class RealtimeTranscriptionSessionAudioInput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The PCM audio format. Only a 24kHz sample rate is supported."""
noise_reduction: Optional[NoiseReduction] = None
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off. Noise reduction filters audio added to
the input audio buffer before it is sent to VAD and the model. Filtering the
audio can improve VAD and turn detection accuracy (reducing false positives) and
model performance by improving perception of the input audio.
"""
transcription: Optional[AudioTranscription] = None
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
"""
turn_detection: Optional[RealtimeTranscriptionSessionAudioInputTurnDetection] = None
"""Configuration for turn detection, ether Server VAD or Semantic VAD.
This can be set to `null` to turn off, in which case the client must manually
trigger model response.
Server VAD means that the model will detect the start and end of speech based on
audio volume and respond at the end of user speech.
Semantic VAD is more advanced and uses a turn detection model (in conjunction
with VAD) to semantically estimate whether the user has finished speaking, then
dynamically sets a timeout based on this probability. For example, if user audio
trails off with "uhhm", the model will score a low probability of turn end and
wait longer for the user to continue speaking. This can be useful for more
natural conversations, but may have a higher latency.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_audio_input.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_audio_input_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import TypedDict
from .noise_reduction_type import NoiseReductionType
from .audio_transcription_param import AudioTranscriptionParam
from .realtime_audio_formats_param import RealtimeAudioFormatsParam
from .realtime_transcription_session_audio_input_turn_detection_param import (
RealtimeTranscriptionSessionAudioInputTurnDetectionParam,
)
__all__ = ["RealtimeTranscriptionSessionAudioInputParam", "NoiseReduction"]
class NoiseReduction(TypedDict, total=False):
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off.
Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model.
Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio.
"""
type: NoiseReductionType
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
class RealtimeTranscriptionSessionAudioInputParam(TypedDict, total=False):
format: RealtimeAudioFormatsParam
"""The PCM audio format. Only a 24kHz sample rate is supported."""
noise_reduction: NoiseReduction
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off. Noise reduction filters audio added to
the input audio buffer before it is sent to VAD and the model. Filtering the
audio can improve VAD and turn detection accuracy (reducing false positives) and
model performance by improving perception of the input audio.
"""
transcription: AudioTranscriptionParam
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
"""
turn_detection: Optional[RealtimeTranscriptionSessionAudioInputTurnDetectionParam]
"""Configuration for turn detection, ether Server VAD or Semantic VAD.
This can be set to `null` to turn off, in which case the client must manually
trigger model response.
Server VAD means that the model will detect the start and end of speech based on
audio volume and respond at the end of user speech.
Semantic VAD is more advanced and uses a turn detection model (in conjunction
with VAD) to semantically estimate whether the user has finished speaking, then
dynamically sets a timeout based on this probability. For example, if user audio
trails off with "uhhm", the model will score a low probability of turn end and
wait longer for the user to continue speaking. This can be useful for more
natural conversations, but may have a higher latency.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_audio_input_param.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = ["RealtimeTranscriptionSessionAudioInputTurnDetection", "ServerVad", "SemanticVad"]
class ServerVad(BaseModel):
"""
Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence.
"""
type: Literal["server_vad"]
"""Type of turn detection, `server_vad` to turn on simple Server VAD."""
create_response: Optional[bool] = None
"""Whether or not to automatically generate a response when a VAD stop event
occurs.
If `interrupt_response` is set to `false` this may fail to create a response if
the model is already responding.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
idle_timeout_ms: Optional[int] = None
"""Optional timeout after which a model response will be triggered automatically.
This is useful for situations in which a long pause from the user is unexpected,
such as a phone call. The model will effectively prompt the user to continue the
conversation based on the current context.
The timeout value will be applied after the last model response's audio has
finished playing, i.e. it's set to the `response.done` time plus audio playback
duration.
An `input_audio_buffer.timeout_triggered` event (plus events associated with the
Response) will be emitted when the timeout is reached. Idle timeout is currently
only supported for `server_vad` mode.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt (cancel) any ongoing response with
output to the default conversation (i.e. `conversation` of `auto`) when a VAD
start event occurs. If `true` then the response will be cancelled, otherwise it
will continue until complete.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
prefix_padding_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
With shorter values the model will respond more quickly, but may jump in on
short pauses from the user.
"""
threshold: Optional[float] = None
"""Used only for `server_vad` mode.
Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
threshold will require louder audio to activate the model, and thus might
perform better in noisy environments.
"""
class SemanticVad(BaseModel):
"""
Server-side semantic turn detection which uses a model to determine when the user has finished speaking.
"""
type: Literal["semantic_vad"]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: Optional[bool] = None
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
RealtimeTranscriptionSessionAudioInputTurnDetection: TypeAlias = Annotated[
Union[ServerVad, SemanticVad, None], PropertyInfo(discriminator="type")
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
__all__ = ["RealtimeTranscriptionSessionAudioInputTurnDetectionParam", "ServerVad", "SemanticVad"]
class ServerVad(TypedDict, total=False):
"""
Server-side voice activity detection (VAD) which flips on when user speech is detected and off after a period of silence.
"""
type: Required[Literal["server_vad"]]
"""Type of turn detection, `server_vad` to turn on simple Server VAD."""
create_response: bool
"""Whether or not to automatically generate a response when a VAD stop event
occurs.
If `interrupt_response` is set to `false` this may fail to create a response if
the model is already responding.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
idle_timeout_ms: Optional[int]
"""Optional timeout after which a model response will be triggered automatically.
This is useful for situations in which a long pause from the user is unexpected,
such as a phone call. The model will effectively prompt the user to continue the
conversation based on the current context.
The timeout value will be applied after the last model response's audio has
finished playing, i.e. it's set to the `response.done` time plus audio playback
duration.
An `input_audio_buffer.timeout_triggered` event (plus events associated with the
Response) will be emitted when the timeout is reached. Idle timeout is currently
only supported for `server_vad` mode.
"""
interrupt_response: bool
"""
Whether or not to automatically interrupt (cancel) any ongoing response with
output to the default conversation (i.e. `conversation` of `auto`) when a VAD
start event occurs. If `true` then the response will be cancelled, otherwise it
will continue until complete.
If both `create_response` and `interrupt_response` are set to `false`, the model
will never respond automatically but VAD events will still be emitted.
"""
prefix_padding_ms: int
"""Used only for `server_vad` mode.
Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: int
"""Used only for `server_vad` mode.
Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
With shorter values the model will respond more quickly, but may jump in on
short pauses from the user.
"""
threshold: float
"""Used only for `server_vad` mode.
Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
threshold will require louder audio to activate the model, and thus might
perform better in noisy environments.
"""
class SemanticVad(TypedDict, total=False):
"""
Server-side semantic turn detection which uses a model to determine when the user has finished speaking.
"""
type: Required[Literal["semantic_vad"]]
"""Type of turn detection, `semantic_vad` to turn on Semantic VAD."""
create_response: bool
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Literal["low", "medium", "high", "auto"]
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`. `low`, `medium`, and `high` have max timeouts of 8s,
4s, and 2s respectively.
"""
interrupt_response: bool
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
RealtimeTranscriptionSessionAudioInputTurnDetectionParam: TypeAlias = Union[ServerVad, SemanticVad]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_audio_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
from .realtime_transcription_session_audio_input_param import RealtimeTranscriptionSessionAudioInputParam
__all__ = ["RealtimeTranscriptionSessionAudioParam"]
class RealtimeTranscriptionSessionAudioParam(TypedDict, total=False):
"""Configuration for input and output audio."""
input: RealtimeTranscriptionSessionAudioInputParam
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_audio_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_create_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .audio_transcription import AudioTranscription
from .noise_reduction_type import NoiseReductionType
from .realtime_audio_formats import RealtimeAudioFormats
from .realtime_transcription_session_turn_detection import RealtimeTranscriptionSessionTurnDetection
__all__ = ["RealtimeTranscriptionSessionCreateResponse", "Audio", "AudioInput", "AudioInputNoiseReduction"]
class AudioInputNoiseReduction(BaseModel):
"""Configuration for input audio noise reduction."""
type: Optional[NoiseReductionType] = None
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
class AudioInput(BaseModel):
format: Optional[RealtimeAudioFormats] = None
"""The PCM audio format. Only a 24kHz sample rate is supported."""
noise_reduction: Optional[AudioInputNoiseReduction] = None
"""Configuration for input audio noise reduction."""
transcription: Optional[AudioTranscription] = None
"""Configuration of the transcription model."""
turn_detection: Optional[RealtimeTranscriptionSessionTurnDetection] = None
"""Configuration for turn detection.
Can be set to `null` to turn off. Server VAD means that the model will detect
the start and end of speech based on audio volume and respond at the end of user
speech.
"""
class Audio(BaseModel):
"""Configuration for input audio for the session."""
input: Optional[AudioInput] = None
class RealtimeTranscriptionSessionCreateResponse(BaseModel):
"""A Realtime transcription session configuration object."""
id: str
"""Unique identifier for the session that looks like `sess_1234567890abcdef`."""
object: str
"""The object type. Always `realtime.transcription_session`."""
type: Literal["transcription"]
"""The type of session. Always `transcription` for transcription sessions."""
audio: Optional[Audio] = None
"""Configuration for input audio for the session."""
expires_at: Optional[int] = None
"""Expiration timestamp for the session, in seconds since epoch."""
include: Optional[List[Literal["item.input_audio_transcription.logprobs"]]] = None
"""Additional fields to include in server outputs.
- `item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_create_response.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_transcription_session_turn_detection.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
__all__ = ["RealtimeTranscriptionSessionTurnDetection"]
class RealtimeTranscriptionSessionTurnDetection(BaseModel):
"""Configuration for turn detection.
Can be set to `null` to turn off. Server
VAD means that the model will detect the start and end of speech based on
audio volume and respond at the end of user speech.
"""
prefix_padding_ms: Optional[int] = None
"""Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: Optional[int] = None
"""Duration of silence to detect speech stop (in milliseconds).
Defaults to 500ms. With shorter values the model will respond more quickly, but
may jump in on short pauses from the user.
"""
threshold: Optional[float] = None
"""Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5.
A higher threshold will require louder audio to activate the model, and thus
might perform better in noisy environments.
"""
type: Optional[str] = None
"""Type of turn detection, only `server_vad` is currently supported."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_transcription_session_turn_detection.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_truncation_retention_ratio.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeTruncationRetentionRatio", "TokenLimits"]
class TokenLimits(BaseModel):
"""Optional custom token limits for this truncation strategy.
If not provided, the model's default token limits will be used.
"""
post_instructions: Optional[int] = None
"""
Maximum tokens allowed in the conversation after instructions (which including
tool definitions). For example, setting this to 5,000 would mean that truncation
would occur when the conversation exceeds 5,000 tokens after instructions. This
cannot be higher than the model's context window size minus the maximum output
tokens.
"""
class RealtimeTruncationRetentionRatio(BaseModel):
"""
Retain a fraction of the conversation tokens when the conversation exceeds the input token limit. This allows you to amortize truncations across multiple turns, which can help improve cached token usage.
"""
retention_ratio: float
"""
Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when
the conversation exceeds the input token limit. Setting this to `0.8` means that
messages will be dropped until 80% of the maximum allowed tokens are used. This
helps reduce the frequency of truncations and improve cache rates.
"""
type: Literal["retention_ratio"]
"""Use retention ratio truncation."""
token_limits: Optional[TokenLimits] = None
"""Optional custom token limits for this truncation strategy.
If not provided, the model's default token limits will be used.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_truncation_retention_ratio.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_truncation_retention_ratio_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeTruncationRetentionRatioParam", "TokenLimits"]
class TokenLimits(TypedDict, total=False):
"""Optional custom token limits for this truncation strategy.
If not provided, the model's default token limits will be used.
"""
post_instructions: int
"""
Maximum tokens allowed in the conversation after instructions (which including
tool definitions). For example, setting this to 5,000 would mean that truncation
would occur when the conversation exceeds 5,000 tokens after instructions. This
cannot be higher than the model's context window size minus the maximum output
tokens.
"""
class RealtimeTruncationRetentionRatioParam(TypedDict, total=False):
"""
Retain a fraction of the conversation tokens when the conversation exceeds the input token limit. This allows you to amortize truncations across multiple turns, which can help improve cached token usage.
"""
retention_ratio: Required[float]
"""
Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when
the conversation exceeds the input token limit. Setting this to `0.8` means that
messages will be dropped until 80% of the maximum allowed tokens are used. This
helps reduce the frequency of truncations and improve cache rates.
"""
type: Required[Literal["retention_ratio"]]
"""Use retention ratio truncation."""
token_limits: TokenLimits
"""Optional custom token limits for this truncation strategy.
If not provided, the model's default token limits will be used.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_truncation_retention_ratio_param.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/_utils/_compat.py | from __future__ import annotations
import sys
import typing_extensions
from typing import Any, Type, Union, Literal, Optional
from datetime import date, datetime
from typing_extensions import get_args as _get_args, get_origin as _get_origin
from .._types import StrBytesIntFloat
from ._datetime_parse import parse_date as _parse_date, parse_datetime as _parse_datetime
_LITERAL_TYPES = {Literal, typing_extensions.Literal}
def get_args(tp: type[Any]) -> tuple[Any, ...]:
return _get_args(tp)
def get_origin(tp: type[Any]) -> type[Any] | None:
return _get_origin(tp)
def is_union(tp: Optional[Type[Any]]) -> bool:
if sys.version_info < (3, 10):
return tp is Union # type: ignore[comparison-overlap]
else:
import types
return tp is Union or tp is types.UnionType # type: ignore[comparison-overlap]
def is_typeddict(tp: Type[Any]) -> bool:
return typing_extensions.is_typeddict(tp)
def is_literal_type(tp: Type[Any]) -> bool:
return get_origin(tp) in _LITERAL_TYPES
def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
return _parse_date(value)
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
return _parse_datetime(value)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/_utils/_compat.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/_utils/_datetime_parse.py | """
This file contains code from https://github.com/pydantic/pydantic/blob/main/pydantic/v1/datetime_parse.py
without the Pydantic v1 specific errors.
"""
from __future__ import annotations
import re
from typing import Dict, Union, Optional
from datetime import date, datetime, timezone, timedelta
from .._types import StrBytesIntFloat
date_expr = r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
time_expr = (
r"(?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
date_re = re.compile(f"{date_expr}$")
datetime_re = re.compile(f"{date_expr}[T ]{time_expr}")
EPOCH = datetime(1970, 1, 1)
# if greater than this, the number is in ms, if less than or equal it's in seconds
# (in seconds this is 11th October 2603, in ms it's 20th August 1970)
MS_WATERSHED = int(2e10)
# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9
MAX_NUMBER = int(3e20)
def _get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]:
if isinstance(value, (int, float)):
return value
try:
return float(value)
except ValueError:
return None
except TypeError:
raise TypeError(f"invalid type; expected {native_expected_type}, string, bytes, int or float") from None
def _from_unix_seconds(seconds: Union[int, float]) -> datetime:
if seconds > MAX_NUMBER:
return datetime.max
elif seconds < -MAX_NUMBER:
return datetime.min
while abs(seconds) > MS_WATERSHED:
seconds /= 1000
dt = EPOCH + timedelta(seconds=seconds)
return dt.replace(tzinfo=timezone.utc)
def _parse_timezone(value: Optional[str]) -> Union[None, int, timezone]:
if value == "Z":
return timezone.utc
elif value is not None:
offset_mins = int(value[-2:]) if len(value) > 3 else 0
offset = 60 * int(value[1:3]) + offset_mins
if value[0] == "-":
offset = -offset
return timezone(timedelta(minutes=offset))
else:
return None
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime:
"""
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, datetime):
return value
number = _get_numeric(value, "datetime")
if number is not None:
return _from_unix_seconds(number)
if isinstance(value, bytes):
value = value.decode()
assert not isinstance(value, (float, int))
match = datetime_re.match(value)
if match is None:
raise ValueError("invalid datetime format")
kw = match.groupdict()
if kw["microsecond"]:
kw["microsecond"] = kw["microsecond"].ljust(6, "0")
tzinfo = _parse_timezone(kw.pop("tzinfo"))
kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_["tzinfo"] = tzinfo
return datetime(**kw_) # type: ignore
def parse_date(value: Union[date, StrBytesIntFloat]) -> date:
"""
Parse a date/int/float/string and return a datetime.date.
Raise ValueError if the input is well formatted but not a valid date.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, date):
if isinstance(value, datetime):
return value.date()
else:
return value
number = _get_numeric(value, "date")
if number is not None:
return _from_unix_seconds(number).date()
if isinstance(value, bytes):
value = value.decode()
assert not isinstance(value, (float, int))
match = date_re.match(value)
if match is None:
raise ValueError("invalid date format")
kw = {k: int(v) for k, v in match.groupdict().items()}
try:
return date(**kw)
except ValueError:
raise ValueError("invalid date format") from None
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/_utils/_datetime_parse.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:tests/test_utils/test_datetime_parse.py | """
Copied from https://github.com/pydantic/pydantic/blob/v1.10.22/tests/test_datetime_parse.py
with modifications so it works without pydantic v1 imports.
"""
from typing import Type, Union
from datetime import date, datetime, timezone, timedelta
import pytest
from openai._utils import parse_date, parse_datetime
def create_tz(minutes: int) -> timezone:
return timezone(timedelta(minutes=minutes))
@pytest.mark.parametrize(
"value,result",
[
# Valid inputs
("1494012444.883309", date(2017, 5, 5)),
(b"1494012444.883309", date(2017, 5, 5)),
(1_494_012_444.883_309, date(2017, 5, 5)),
("1494012444", date(2017, 5, 5)),
(1_494_012_444, date(2017, 5, 5)),
(0, date(1970, 1, 1)),
("2012-04-23", date(2012, 4, 23)),
(b"2012-04-23", date(2012, 4, 23)),
("2012-4-9", date(2012, 4, 9)),
(date(2012, 4, 9), date(2012, 4, 9)),
(datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
# Invalid inputs
("x20120423", ValueError),
("2012-04-56", ValueError),
(19_999_999_999, date(2603, 10, 11)), # just before watershed
(20_000_000_001, date(1970, 8, 20)), # just after watershed
(1_549_316_052, date(2019, 2, 4)), # nowish in s
(1_549_316_052_104, date(2019, 2, 4)), # nowish in ms
(1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs
(1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns
("infinity", date(9999, 12, 31)),
("inf", date(9999, 12, 31)),
(float("inf"), date(9999, 12, 31)),
("infinity ", date(9999, 12, 31)),
(int("1" + "0" * 100), date(9999, 12, 31)),
(1e1000, date(9999, 12, 31)),
("-infinity", date(1, 1, 1)),
("-inf", date(1, 1, 1)),
("nan", ValueError),
],
)
def test_date_parsing(value: Union[str, bytes, int, float], result: Union[date, Type[Exception]]) -> None:
if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
with pytest.raises(result):
parse_date(value)
else:
assert parse_date(value) == result
@pytest.mark.parametrize(
"value,result",
[
# Valid inputs
# values in seconds
("1494012444.883309", datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
(1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
("1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(b"1494012444", datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
# values in ms
("1494012444000.883309", datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
("-1494012444000.883309", datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
(1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
("2012-04-23T09:15:00", datetime(2012, 4, 23, 9, 15)),
("2012-4-9 4:8:16", datetime(2012, 4, 9, 4, 8, 16)),
("2012-04-23T09:15:00Z", datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
("2012-4-9 4:8:16-0320", datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
("2012-04-23T10:20:30.400+02:30", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
("2012-04-23T10:20:30.400+02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
("2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(b"2012-04-23T10:20:30.400-02", datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(datetime(2017, 5, 5), datetime(2017, 5, 5)),
(0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
# Invalid inputs
("x20120423091500", ValueError),
("2012-04-56T09:15:90", ValueError),
("2012-04-23T11:05:00-25:00", ValueError),
(19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed
(20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed
(1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s
(1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms
(1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs
(1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns
("infinity", datetime(9999, 12, 31, 23, 59, 59, 999999)),
("inf", datetime(9999, 12, 31, 23, 59, 59, 999999)),
("inf ", datetime(9999, 12, 31, 23, 59, 59, 999999)),
(1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
(float("inf"), datetime(9999, 12, 31, 23, 59, 59, 999999)),
("-infinity", datetime(1, 1, 1, 0, 0)),
("-inf", datetime(1, 1, 1, 0, 0)),
("nan", ValueError),
],
)
def test_datetime_parsing(value: Union[str, bytes, int, float], result: Union[datetime, Type[Exception]]) -> None:
if type(result) == type and issubclass(result, Exception): # pyright: ignore[reportUnnecessaryIsInstance]
with pytest.raises(result):
parse_datetime(value)
else:
assert parse_datetime(value) == result
| {
"repo_id": "openai/openai-python",
"file_path": "tests/test_utils/test_datetime_parse.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/types/responses/response_input_audio.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseInputAudio", "InputAudio"]
class InputAudio(BaseModel):
data: str
"""Base64-encoded audio data."""
format: Literal["mp3", "wav"]
"""The format of the audio data. Currently supported formats are `mp3` and `wav`."""
class ResponseInputAudio(BaseModel):
"""An audio input to the model."""
input_audio: InputAudio
type: Literal["input_audio"]
"""The type of the input item. Always `input_audio`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_audio.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_input_audio_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseInputAudioParam", "InputAudio"]
class InputAudio(TypedDict, total=False):
data: Required[str]
"""Base64-encoded audio data."""
format: Required[Literal["mp3", "wav"]]
"""The format of the audio data. Currently supported formats are `mp3` and `wav`."""
class ResponseInputAudioParam(TypedDict, total=False):
"""An audio input to the model."""
input_audio: Required[InputAudio]
type: Required[Literal["input_audio"]]
"""The type of the input item. Always `input_audio`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_audio_param.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:examples/realtime/realtime.py | #!/usr/bin/env rye run python
import asyncio
from openai import AsyncOpenAI
# Azure OpenAI Realtime Docs
# How-to: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio
# Supported models and API versions: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio#supported-models
# Entra ID auth: https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity
async def main() -> None:
"""The following example demonstrates how to configure OpenAI to use the Realtime API.
For an audio example, see push_to_talk_app.py and update the client and model parameter accordingly.
When prompted for user input, type a message and hit enter to send it to the model.
Enter "q" to quit the conversation.
"""
client = AsyncOpenAI()
async with client.realtime.connect(
model="gpt-realtime",
) as connection:
await connection.session.update(
session={
"output_modalities": ["text"],
"model": "gpt-realtime",
"type": "realtime",
}
)
while True:
user_input = input("Enter a message: ")
if user_input == "q":
break
await connection.conversation.item.create(
item={
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": user_input}],
}
)
await connection.response.create()
async for event in connection:
if event.type == "response.output_text.delta":
print(event.delta, flush=True, end="")
elif event.type == "response.output_text.done":
print()
elif event.type == "response.done":
break
asyncio.run(main())
| {
"repo_id": "openai/openai-python",
"file_path": "examples/realtime/realtime.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/realtime/client_secrets.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.realtime import client_secret_create_params
from ...types.realtime.client_secret_create_response import ClientSecretCreateResponse
__all__ = ["ClientSecrets", "AsyncClientSecrets"]
class ClientSecrets(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ClientSecretsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ClientSecretsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ClientSecretsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ClientSecretsWithStreamingResponse(self)
def create(
self,
*,
expires_after: client_secret_create_params.ExpiresAfter | Omit = omit,
session: client_secret_create_params.Session | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ClientSecretCreateResponse:
"""
Create a Realtime client secret with an associated session configuration.
Client secrets are short-lived tokens that can be passed to a client app, such
as a web frontend or mobile client, which grants access to the Realtime API
without leaking your main API key. You can configure a custom TTL for each
client secret.
You can also attach session configuration options to the client secret, which
will be applied to any sessions created using that client secret, but these can
also be overridden by the client connection.
[Learn more about authentication with client secrets over WebRTC](https://platform.openai.com/docs/guides/realtime-webrtc).
Returns the created client secret and the effective session object. The client
secret is a string that looks like `ek_1234`.
Args:
expires_after: Configuration for the client secret expiration. Expiration refers to the time
after which a client secret will no longer be valid for creating sessions. The
session itself may continue after that time once started. A secret can be used
to create multiple sessions until it expires.
session: Session configuration to use for the client secret. Choose either a realtime
session or a transcription session.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/realtime/client_secrets",
body=maybe_transform(
{
"expires_after": expires_after,
"session": session,
},
client_secret_create_params.ClientSecretCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ClientSecretCreateResponse,
)
class AsyncClientSecrets(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncClientSecretsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncClientSecretsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncClientSecretsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncClientSecretsWithStreamingResponse(self)
async def create(
self,
*,
expires_after: client_secret_create_params.ExpiresAfter | Omit = omit,
session: client_secret_create_params.Session | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ClientSecretCreateResponse:
"""
Create a Realtime client secret with an associated session configuration.
Client secrets are short-lived tokens that can be passed to a client app, such
as a web frontend or mobile client, which grants access to the Realtime API
without leaking your main API key. You can configure a custom TTL for each
client secret.
You can also attach session configuration options to the client secret, which
will be applied to any sessions created using that client secret, but these can
also be overridden by the client connection.
[Learn more about authentication with client secrets over WebRTC](https://platform.openai.com/docs/guides/realtime-webrtc).
Returns the created client secret and the effective session object. The client
secret is a string that looks like `ek_1234`.
Args:
expires_after: Configuration for the client secret expiration. Expiration refers to the time
after which a client secret will no longer be valid for creating sessions. The
session itself may continue after that time once started. A secret can be used
to create multiple sessions until it expires.
session: Session configuration to use for the client secret. Choose either a realtime
session or a transcription session.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/realtime/client_secrets",
body=await async_maybe_transform(
{
"expires_after": expires_after,
"session": session,
},
client_secret_create_params.ClientSecretCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ClientSecretCreateResponse,
)
class ClientSecretsWithRawResponse:
def __init__(self, client_secrets: ClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = _legacy_response.to_raw_response_wrapper(
client_secrets.create,
)
class AsyncClientSecretsWithRawResponse:
def __init__(self, client_secrets: AsyncClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = _legacy_response.async_to_raw_response_wrapper(
client_secrets.create,
)
class ClientSecretsWithStreamingResponse:
def __init__(self, client_secrets: ClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = to_streamed_response_wrapper(
client_secrets.create,
)
class AsyncClientSecretsWithStreamingResponse:
def __init__(self, client_secrets: AsyncClientSecrets) -> None:
self._client_secrets = client_secrets
self.create = async_to_streamed_response_wrapper(
client_secrets.create,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/realtime/client_secrets.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/realtime/realtime.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import json
import logging
from types import TracebackType
from typing import TYPE_CHECKING, Any, Iterator, cast
from typing_extensions import AsyncIterator
import httpx
from pydantic import BaseModel
from .calls import (
Calls,
AsyncCalls,
CallsWithRawResponse,
AsyncCallsWithRawResponse,
CallsWithStreamingResponse,
AsyncCallsWithStreamingResponse,
)
from ..._types import Omit, Query, Headers, omit
from ..._utils import (
is_azure_client,
maybe_transform,
strip_not_given,
async_maybe_transform,
is_async_azure_client,
)
from ..._compat import cached_property
from ..._models import construct_type_unchecked
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._exceptions import OpenAIError
from ..._base_client import _merge_mappings
from .client_secrets import (
ClientSecrets,
AsyncClientSecrets,
ClientSecretsWithRawResponse,
AsyncClientSecretsWithRawResponse,
ClientSecretsWithStreamingResponse,
AsyncClientSecretsWithStreamingResponse,
)
from ...types.realtime import session_update_event_param
from ...types.websocket_connection_options import WebsocketConnectionOptions
from ...types.realtime.realtime_client_event import RealtimeClientEvent
from ...types.realtime.realtime_server_event import RealtimeServerEvent
from ...types.realtime.conversation_item_param import ConversationItemParam
from ...types.realtime.realtime_client_event_param import RealtimeClientEventParam
from ...types.realtime.realtime_response_create_params_param import RealtimeResponseCreateParamsParam
if TYPE_CHECKING:
from websockets.sync.client import ClientConnection as WebsocketConnection
from websockets.asyncio.client import ClientConnection as AsyncWebsocketConnection
from ..._client import OpenAI, AsyncOpenAI
__all__ = ["Realtime", "AsyncRealtime"]
log: logging.Logger = logging.getLogger(__name__)
class Realtime(SyncAPIResource):
@cached_property
def client_secrets(self) -> ClientSecrets:
return ClientSecrets(self._client)
@cached_property
def calls(self) -> Calls:
from ...lib._realtime import _Calls
return _Calls(self._client)
@cached_property
def with_raw_response(self) -> RealtimeWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return RealtimeWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> RealtimeWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return RealtimeWithStreamingResponse(self)
def connect(
self,
*,
call_id: str | Omit = omit,
model: str | Omit = omit,
extra_query: Query = {},
extra_headers: Headers = {},
websocket_connection_options: WebsocketConnectionOptions = {},
) -> RealtimeConnectionManager:
"""
The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
Some notable benefits of the API include:
- Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
- Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
- Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
The Realtime API is a stateful, event-based API that communicates over a WebSocket.
"""
return RealtimeConnectionManager(
client=self._client,
extra_query=extra_query,
extra_headers=extra_headers,
websocket_connection_options=websocket_connection_options,
call_id=call_id,
model=model,
)
class AsyncRealtime(AsyncAPIResource):
@cached_property
def client_secrets(self) -> AsyncClientSecrets:
return AsyncClientSecrets(self._client)
@cached_property
def calls(self) -> AsyncCalls:
from ...lib._realtime import _AsyncCalls
return _AsyncCalls(self._client)
@cached_property
def with_raw_response(self) -> AsyncRealtimeWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncRealtimeWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncRealtimeWithStreamingResponse(self)
def connect(
self,
*,
call_id: str | Omit = omit,
model: str | Omit = omit,
extra_query: Query = {},
extra_headers: Headers = {},
websocket_connection_options: WebsocketConnectionOptions = {},
) -> AsyncRealtimeConnectionManager:
"""
The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
Some notable benefits of the API include:
- Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
- Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
- Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
The Realtime API is a stateful, event-based API that communicates over a WebSocket.
"""
return AsyncRealtimeConnectionManager(
client=self._client,
extra_query=extra_query,
extra_headers=extra_headers,
websocket_connection_options=websocket_connection_options,
call_id=call_id,
model=model,
)
class RealtimeWithRawResponse:
def __init__(self, realtime: Realtime) -> None:
self._realtime = realtime
@cached_property
def client_secrets(self) -> ClientSecretsWithRawResponse:
return ClientSecretsWithRawResponse(self._realtime.client_secrets)
@cached_property
def calls(self) -> CallsWithRawResponse:
return CallsWithRawResponse(self._realtime.calls)
class AsyncRealtimeWithRawResponse:
def __init__(self, realtime: AsyncRealtime) -> None:
self._realtime = realtime
@cached_property
def client_secrets(self) -> AsyncClientSecretsWithRawResponse:
return AsyncClientSecretsWithRawResponse(self._realtime.client_secrets)
@cached_property
def calls(self) -> AsyncCallsWithRawResponse:
return AsyncCallsWithRawResponse(self._realtime.calls)
class RealtimeWithStreamingResponse:
def __init__(self, realtime: Realtime) -> None:
self._realtime = realtime
@cached_property
def client_secrets(self) -> ClientSecretsWithStreamingResponse:
return ClientSecretsWithStreamingResponse(self._realtime.client_secrets)
@cached_property
def calls(self) -> CallsWithStreamingResponse:
return CallsWithStreamingResponse(self._realtime.calls)
class AsyncRealtimeWithStreamingResponse:
def __init__(self, realtime: AsyncRealtime) -> None:
self._realtime = realtime
@cached_property
def client_secrets(self) -> AsyncClientSecretsWithStreamingResponse:
return AsyncClientSecretsWithStreamingResponse(self._realtime.client_secrets)
@cached_property
def calls(self) -> AsyncCallsWithStreamingResponse:
return AsyncCallsWithStreamingResponse(self._realtime.calls)
class AsyncRealtimeConnection:
"""Represents a live WebSocket connection to the Realtime API"""
session: AsyncRealtimeSessionResource
response: AsyncRealtimeResponseResource
input_audio_buffer: AsyncRealtimeInputAudioBufferResource
conversation: AsyncRealtimeConversationResource
output_audio_buffer: AsyncRealtimeOutputAudioBufferResource
_connection: AsyncWebsocketConnection
def __init__(self, connection: AsyncWebsocketConnection) -> None:
self._connection = connection
self.session = AsyncRealtimeSessionResource(self)
self.response = AsyncRealtimeResponseResource(self)
self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self)
self.conversation = AsyncRealtimeConversationResource(self)
self.output_audio_buffer = AsyncRealtimeOutputAudioBufferResource(self)
async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]:
"""
An infinite-iterator that will continue to yield events until
the connection is closed.
"""
from websockets.exceptions import ConnectionClosedOK
try:
while True:
yield await self.recv()
except ConnectionClosedOK:
return
async def recv(self) -> RealtimeServerEvent:
"""
Receive the next message from the connection and parses it into a `RealtimeServerEvent` object.
Canceling this method is safe. There's no risk of losing data.
"""
return self.parse_event(await self.recv_bytes())
async def recv_bytes(self) -> bytes:
"""Receive the next message from the connection as raw bytes.
Canceling this method is safe. There's no risk of losing data.
If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does,
then you can call `.parse_event(data)`.
"""
message = await self._connection.recv(decode=False)
log.debug(f"Received websocket message: %s", message)
return message
async def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
data = (
event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
if isinstance(event, BaseModel)
else json.dumps(await async_maybe_transform(event, RealtimeClientEventParam))
)
await self._connection.send(data)
async def close(self, *, code: int = 1000, reason: str = "") -> None:
await self._connection.close(code=code, reason=reason)
def parse_event(self, data: str | bytes) -> RealtimeServerEvent:
"""
Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object.
This is helpful if you're using `.recv_bytes()`.
"""
return cast(
RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent))
)
class AsyncRealtimeConnectionManager:
"""
Context manager over a `AsyncRealtimeConnection` that is returned by `realtime.connect()`
This context manager ensures that the connection will be closed when it exits.
---
Note that if your application doesn't work well with the context manager approach then you
can call the `.enter()` method directly to initiate a connection.
**Warning**: You must remember to close the connection with `.close()`.
```py
connection = await client.realtime.connect(...).enter()
# ...
await connection.close()
```
"""
def __init__(
self,
*,
client: AsyncOpenAI,
call_id: str | Omit = omit,
model: str | Omit = omit,
extra_query: Query,
extra_headers: Headers,
websocket_connection_options: WebsocketConnectionOptions,
) -> None:
self.__client = client
self.__call_id = call_id
self.__model = model
self.__connection: AsyncRealtimeConnection | None = None
self.__extra_query = extra_query
self.__extra_headers = extra_headers
self.__websocket_connection_options = websocket_connection_options
async def __aenter__(self) -> AsyncRealtimeConnection:
"""
👋 If your application doesn't work well with the context manager approach then you
can call this method directly to initiate a connection.
**Warning**: You must remember to close the connection with `.close()`.
```py
connection = await client.realtime.connect(...).enter()
# ...
await connection.close()
```
"""
try:
from websockets.asyncio.client import connect
except ImportError as exc:
raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
extra_query = self.__extra_query
await self.__client._refresh_api_key()
auth_headers = self.__client.auth_headers
extra_query = self.__extra_query
if self.__call_id is not omit:
extra_query = {**extra_query, "call_id": self.__call_id}
if is_async_azure_client(self.__client):
model = self.__model
if not model:
raise OpenAIError("`model` is required for Azure Realtime API")
else:
url, auth_headers = await self.__client._configure_realtime(model, extra_query)
else:
url = self._prepare_url().copy_with(
params={
**self.__client.base_url.params,
**({"model": self.__model} if self.__model is not omit else {}),
**extra_query,
},
)
log.debug("Connecting to %s", url)
if self.__websocket_connection_options:
log.debug("Connection options: %s", self.__websocket_connection_options)
self.__connection = AsyncRealtimeConnection(
await connect(
str(url),
user_agent_header=self.__client.user_agent,
additional_headers=_merge_mappings(
{
**auth_headers,
},
self.__extra_headers,
),
**self.__websocket_connection_options,
)
)
return self.__connection
enter = __aenter__
def _prepare_url(self) -> httpx.URL:
if self.__client.websocket_base_url is not None:
base_url = httpx.URL(self.__client.websocket_base_url)
else:
base_url = self.__client._base_url.copy_with(scheme="wss")
merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
return base_url.copy_with(raw_path=merge_raw_path)
async def __aexit__(
self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
) -> None:
if self.__connection is not None:
await self.__connection.close()
class RealtimeConnection:
"""Represents a live WebSocket connection to the Realtime API"""
session: RealtimeSessionResource
response: RealtimeResponseResource
input_audio_buffer: RealtimeInputAudioBufferResource
conversation: RealtimeConversationResource
output_audio_buffer: RealtimeOutputAudioBufferResource
_connection: WebsocketConnection
def __init__(self, connection: WebsocketConnection) -> None:
self._connection = connection
self.session = RealtimeSessionResource(self)
self.response = RealtimeResponseResource(self)
self.input_audio_buffer = RealtimeInputAudioBufferResource(self)
self.conversation = RealtimeConversationResource(self)
self.output_audio_buffer = RealtimeOutputAudioBufferResource(self)
def __iter__(self) -> Iterator[RealtimeServerEvent]:
"""
An infinite-iterator that will continue to yield events until
the connection is closed.
"""
from websockets.exceptions import ConnectionClosedOK
try:
while True:
yield self.recv()
except ConnectionClosedOK:
return
def recv(self) -> RealtimeServerEvent:
"""
Receive the next message from the connection and parses it into a `RealtimeServerEvent` object.
Canceling this method is safe. There's no risk of losing data.
"""
return self.parse_event(self.recv_bytes())
def recv_bytes(self) -> bytes:
"""Receive the next message from the connection as raw bytes.
Canceling this method is safe. There's no risk of losing data.
If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does,
then you can call `.parse_event(data)`.
"""
message = self._connection.recv(decode=False)
log.debug(f"Received websocket message: %s", message)
return message
def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
data = (
event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
if isinstance(event, BaseModel)
else json.dumps(maybe_transform(event, RealtimeClientEventParam))
)
self._connection.send(data)
def close(self, *, code: int = 1000, reason: str = "") -> None:
self._connection.close(code=code, reason=reason)
def parse_event(self, data: str | bytes) -> RealtimeServerEvent:
"""
Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object.
This is helpful if you're using `.recv_bytes()`.
"""
return cast(
RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent))
)
class RealtimeConnectionManager:
"""
Context manager over a `RealtimeConnection` that is returned by `realtime.connect()`
This context manager ensures that the connection will be closed when it exits.
---
Note that if your application doesn't work well with the context manager approach then you
can call the `.enter()` method directly to initiate a connection.
**Warning**: You must remember to close the connection with `.close()`.
```py
connection = client.realtime.connect(...).enter()
# ...
connection.close()
```
"""
def __init__(
self,
*,
client: OpenAI,
call_id: str | Omit = omit,
model: str | Omit = omit,
extra_query: Query,
extra_headers: Headers,
websocket_connection_options: WebsocketConnectionOptions,
) -> None:
self.__client = client
self.__call_id = call_id
self.__model = model
self.__connection: RealtimeConnection | None = None
self.__extra_query = extra_query
self.__extra_headers = extra_headers
self.__websocket_connection_options = websocket_connection_options
def __enter__(self) -> RealtimeConnection:
"""
👋 If your application doesn't work well with the context manager approach then you
can call this method directly to initiate a connection.
**Warning**: You must remember to close the connection with `.close()`.
```py
connection = client.realtime.connect(...).enter()
# ...
connection.close()
```
"""
try:
from websockets.sync.client import connect
except ImportError as exc:
raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
extra_query = self.__extra_query
self.__client._refresh_api_key()
auth_headers = self.__client.auth_headers
extra_query = self.__extra_query
if self.__call_id is not omit:
extra_query = {**extra_query, "call_id": self.__call_id}
if is_azure_client(self.__client):
model = self.__model
if not model:
raise OpenAIError("`model` is required for Azure Realtime API")
else:
url, auth_headers = self.__client._configure_realtime(model, extra_query)
else:
url = self._prepare_url().copy_with(
params={
**self.__client.base_url.params,
**({"model": self.__model} if self.__model is not omit else {}),
**extra_query,
},
)
log.debug("Connecting to %s", url)
if self.__websocket_connection_options:
log.debug("Connection options: %s", self.__websocket_connection_options)
self.__connection = RealtimeConnection(
connect(
str(url),
user_agent_header=self.__client.user_agent,
additional_headers=_merge_mappings(
{
**auth_headers,
},
self.__extra_headers,
),
**self.__websocket_connection_options,
)
)
return self.__connection
enter = __enter__
def _prepare_url(self) -> httpx.URL:
if self.__client.websocket_base_url is not None:
base_url = httpx.URL(self.__client.websocket_base_url)
else:
base_url = self.__client._base_url.copy_with(scheme="wss")
merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
return base_url.copy_with(raw_path=merge_raw_path)
def __exit__(
self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
) -> None:
if self.__connection is not None:
self.__connection.close()
class BaseRealtimeConnectionResource:
def __init__(self, connection: RealtimeConnection) -> None:
self._connection = connection
class RealtimeSessionResource(BaseRealtimeConnectionResource):
def update(self, *, session: session_update_event_param.Session, event_id: str | Omit = omit) -> None:
"""
Send this event to update the session’s configuration.
The client may send this event at any time to update any field
except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet.
When the server receives a `session.update`, it will respond
with a `session.updated` event showing the full, effective configuration.
Only the fields that are present in the `session.update` are updated. To clear a field like
`instructions`, pass an empty string. To clear a field like `tools`, pass an empty array.
To clear a field like `turn_detection`, pass `null`.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "session.update", "session": session, "event_id": event_id}),
)
)
class RealtimeResponseResource(BaseRealtimeConnectionResource):
def create(self, *, event_id: str | Omit = omit, response: RealtimeResponseCreateParamsParam | Omit = omit) -> None:
"""
This event instructs the server to create a Response, which means triggering
model inference. When in Server VAD mode, the server will create Responses
automatically.
A Response will include at least one Item, and may have two, in which case
the second will be a function call. These Items will be appended to the
conversation history by default.
The server will respond with a `response.created` event, events for Items
and content created, and finally a `response.done` event to indicate the
Response is complete.
The `response.create` event includes inference configuration like
`instructions` and `tools`. If these are set, they will override the Session's
configuration for this Response only.
Responses can be created out-of-band of the default Conversation, meaning that they can
have arbitrary input, and it's possible to disable writing the output to the Conversation.
Only one Response can write to the default Conversation at a time, but otherwise multiple
Responses can be created in parallel. The `metadata` field is a good way to disambiguate
multiple simultaneous Responses.
Clients can set `conversation` to `none` to create a Response that does not write to the default
Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting
raw Items and references to existing Items.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
)
)
def cancel(self, *, event_id: str | Omit = omit, response_id: str | Omit = omit) -> None:
"""Send this event to cancel an in-progress response.
The server will respond
with a `response.done` event with a status of `response.status=cancelled`. If
there is no response to cancel, the server will respond with an error. It's safe
to call `response.cancel` even if no response is in progress, an error will be
returned the session will remain unaffected.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
)
)
class RealtimeInputAudioBufferResource(BaseRealtimeConnectionResource):
def clear(self, *, event_id: str | Omit = omit) -> None:
"""Send this event to clear the audio bytes in the buffer.
The server will
respond with an `input_audio_buffer.cleared` event.
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
)
def commit(self, *, event_id: str | Omit = omit) -> None:
"""
Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.
Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event.
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
)
def append(self, *, audio: str, event_id: str | Omit = omit) -> None:
"""Send this event to append audio bytes to the input audio buffer.
The audio
buffer is temporary storage you can write to and later commit. A "commit" will create a new
user message item in the conversation history from the buffer content and clear the buffer.
Input audio transcription (if enabled) will be generated when the buffer is committed.
If VAD is enabled the audio buffer is used to detect speech and the server will decide
when to commit. When Server VAD is disabled, you must commit the audio buffer
manually. Input audio noise reduction operates on writes to the audio buffer.
The client may choose how much audio to place in each event up to a maximum
of 15 MiB, for example streaming smaller chunks from the client may allow the
VAD to be more responsive. Unlike most other client events, the server will
not send a confirmation response to this event.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
)
)
class RealtimeConversationResource(BaseRealtimeConnectionResource):
@cached_property
def item(self) -> RealtimeConversationItemResource:
return RealtimeConversationItemResource(self._connection)
class RealtimeConversationItemResource(BaseRealtimeConnectionResource):
def delete(self, *, item_id: str, event_id: str | Omit = omit) -> None:
"""Send this event when you want to remove any item from the conversation
history.
The server will respond with a `conversation.item.deleted` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}),
)
)
def create(
self, *, item: ConversationItemParam, event_id: str | Omit = omit, previous_item_id: str | Omit = omit
) -> None:
"""
Add a new Item to the Conversation's context, including messages, function
calls, and function call responses. This event can be used both to populate a
"history" of the conversation and to add new items mid-stream, but has the
current limitation that it cannot populate assistant audio messages.
If successful, the server will respond with a `conversation.item.created`
event, otherwise an `error` event will be sent.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given(
{
"type": "conversation.item.create",
"item": item,
"event_id": event_id,
"previous_item_id": previous_item_id,
}
),
)
)
def truncate(self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | Omit = omit) -> None:
"""Send this event to truncate a previous assistant message’s audio.
The server
will produce audio faster than realtime, so this event is useful when the user
interrupts to truncate audio that has already been sent to the client but not
yet played. This will synchronize the server's understanding of the audio with
the client's playback.
Truncating audio will delete the server-side text transcript to ensure there
is not text in the context that hasn't been heard by the user.
If successful, the server will respond with a `conversation.item.truncated`
event.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given(
{
"type": "conversation.item.truncate",
"audio_end_ms": audio_end_ms,
"content_index": content_index,
"item_id": item_id,
"event_id": event_id,
}
),
)
)
def retrieve(self, *, item_id: str, event_id: str | Omit = omit) -> None:
"""
Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
The server will respond with a `conversation.item.retrieved` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "conversation.item.retrieve", "item_id": item_id, "event_id": event_id}),
)
)
class RealtimeOutputAudioBufferResource(BaseRealtimeConnectionResource):
def clear(self, *, event_id: str | Omit = omit) -> None:
"""**WebRTC/SIP Only:** Emit to cut off the current audio response.
This will trigger the server to
stop generating audio and emit a `output_audio_buffer.cleared` event. This
event should be preceded by a `response.cancel` client event to stop the
generation of the current response.
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id}))
)
class BaseAsyncRealtimeConnectionResource:
def __init__(self, connection: AsyncRealtimeConnection) -> None:
self._connection = connection
class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource):
async def update(self, *, session: session_update_event_param.Session, event_id: str | Omit = omit) -> None:
"""
Send this event to update the session’s configuration.
The client may send this event at any time to update any field
except for `voice` and `model`. `voice` can be updated only if there have been no other audio outputs yet.
When the server receives a `session.update`, it will respond
with a `session.updated` event showing the full, effective configuration.
Only the fields that are present in the `session.update` are updated. To clear a field like
`instructions`, pass an empty string. To clear a field like `tools`, pass an empty array.
To clear a field like `turn_detection`, pass `null`.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "session.update", "session": session, "event_id": event_id}),
)
)
class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource):
async def create(
self, *, event_id: str | Omit = omit, response: RealtimeResponseCreateParamsParam | Omit = omit
) -> None:
"""
This event instructs the server to create a Response, which means triggering
model inference. When in Server VAD mode, the server will create Responses
automatically.
A Response will include at least one Item, and may have two, in which case
the second will be a function call. These Items will be appended to the
conversation history by default.
The server will respond with a `response.created` event, events for Items
and content created, and finally a `response.done` event to indicate the
Response is complete.
The `response.create` event includes inference configuration like
`instructions` and `tools`. If these are set, they will override the Session's
configuration for this Response only.
Responses can be created out-of-band of the default Conversation, meaning that they can
have arbitrary input, and it's possible to disable writing the output to the Conversation.
Only one Response can write to the default Conversation at a time, but otherwise multiple
Responses can be created in parallel. The `metadata` field is a good way to disambiguate
multiple simultaneous Responses.
Clients can set `conversation` to `none` to create a Response that does not write to the default
Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting
raw Items and references to existing Items.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
)
)
async def cancel(self, *, event_id: str | Omit = omit, response_id: str | Omit = omit) -> None:
"""Send this event to cancel an in-progress response.
The server will respond
with a `response.done` event with a status of `response.status=cancelled`. If
there is no response to cancel, the server will respond with an error. It's safe
to call `response.cancel` even if no response is in progress, an error will be
returned the session will remain unaffected.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
)
)
class AsyncRealtimeInputAudioBufferResource(BaseAsyncRealtimeConnectionResource):
async def clear(self, *, event_id: str | Omit = omit) -> None:
"""Send this event to clear the audio bytes in the buffer.
The server will
respond with an `input_audio_buffer.cleared` event.
"""
await self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
)
async def commit(self, *, event_id: str | Omit = omit) -> None:
"""
Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.
Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event.
"""
await self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
)
async def append(self, *, audio: str, event_id: str | Omit = omit) -> None:
"""Send this event to append audio bytes to the input audio buffer.
The audio
buffer is temporary storage you can write to and later commit. A "commit" will create a new
user message item in the conversation history from the buffer content and clear the buffer.
Input audio transcription (if enabled) will be generated when the buffer is committed.
If VAD is enabled the audio buffer is used to detect speech and the server will decide
when to commit. When Server VAD is disabled, you must commit the audio buffer
manually. Input audio noise reduction operates on writes to the audio buffer.
The client may choose how much audio to place in each event up to a maximum
of 15 MiB, for example streaming smaller chunks from the client may allow the
VAD to be more responsive. Unlike most other client events, the server will
not send a confirmation response to this event.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
)
)
class AsyncRealtimeConversationResource(BaseAsyncRealtimeConnectionResource):
@cached_property
def item(self) -> AsyncRealtimeConversationItemResource:
return AsyncRealtimeConversationItemResource(self._connection)
class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource):
async def delete(self, *, item_id: str, event_id: str | Omit = omit) -> None:
"""Send this event when you want to remove any item from the conversation
history.
The server will respond with a `conversation.item.deleted` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}),
)
)
async def create(
self, *, item: ConversationItemParam, event_id: str | Omit = omit, previous_item_id: str | Omit = omit
) -> None:
"""
Add a new Item to the Conversation's context, including messages, function
calls, and function call responses. This event can be used both to populate a
"history" of the conversation and to add new items mid-stream, but has the
current limitation that it cannot populate assistant audio messages.
If successful, the server will respond with a `conversation.item.created`
event, otherwise an `error` event will be sent.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given(
{
"type": "conversation.item.create",
"item": item,
"event_id": event_id,
"previous_item_id": previous_item_id,
}
),
)
)
async def truncate(
self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | Omit = omit
) -> None:
"""Send this event to truncate a previous assistant message’s audio.
The server
will produce audio faster than realtime, so this event is useful when the user
interrupts to truncate audio that has already been sent to the client but not
yet played. This will synchronize the server's understanding of the audio with
the client's playback.
Truncating audio will delete the server-side text transcript to ensure there
is not text in the context that hasn't been heard by the user.
If successful, the server will respond with a `conversation.item.truncated`
event.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given(
{
"type": "conversation.item.truncate",
"audio_end_ms": audio_end_ms,
"content_index": content_index,
"item_id": item_id,
"event_id": event_id,
}
),
)
)
async def retrieve(self, *, item_id: str, event_id: str | Omit = omit) -> None:
"""
Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
The server will respond with a `conversation.item.retrieved` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "conversation.item.retrieve", "item_id": item_id, "event_id": event_id}),
)
)
class AsyncRealtimeOutputAudioBufferResource(BaseAsyncRealtimeConnectionResource):
async def clear(self, *, event_id: str | Omit = omit) -> None:
"""**WebRTC/SIP Only:** Emit to cut off the current audio response.
This will trigger the server to
stop generating audio and emit a `output_audio_buffer.cleared` event. This
event should be preceded by a `response.cancel` client event to stop the
generation of the current response.
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
await self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id}))
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/realtime/realtime.py",
"license": "Apache License 2.0",
"lines": 885,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/realtime/client_secret_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypeAlias, TypedDict
from .realtime_session_create_request_param import RealtimeSessionCreateRequestParam
from .realtime_transcription_session_create_request_param import RealtimeTranscriptionSessionCreateRequestParam
__all__ = ["ClientSecretCreateParams", "ExpiresAfter", "Session"]
class ClientSecretCreateParams(TypedDict, total=False):
expires_after: ExpiresAfter
"""Configuration for the client secret expiration.
Expiration refers to the time after which a client secret will no longer be
valid for creating sessions. The session itself may continue after that time
once started. A secret can be used to create multiple sessions until it expires.
"""
session: Session
"""Session configuration to use for the client secret.
Choose either a realtime session or a transcription session.
"""
class ExpiresAfter(TypedDict, total=False):
"""Configuration for the client secret expiration.
Expiration refers to the time after which
a client secret will no longer be valid for creating sessions. The session itself may
continue after that time once started. A secret can be used to create multiple sessions
until it expires.
"""
anchor: Literal["created_at"]
"""
The anchor point for the client secret expiration, meaning that `seconds` will
be added to the `created_at` time of the client secret to produce an expiration
timestamp. Only `created_at` is currently supported.
"""
seconds: int
"""The number of seconds from the anchor point to the expiration.
Select a value between `10` and `7200` (2 hours). This default to 600 seconds
(10 minutes) if not specified.
"""
Session: TypeAlias = Union[RealtimeSessionCreateRequestParam, RealtimeTranscriptionSessionCreateRequestParam]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/client_secret_create_params.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/client_secret_create_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .realtime_session_create_response import RealtimeSessionCreateResponse
from .realtime_transcription_session_create_response import RealtimeTranscriptionSessionCreateResponse
__all__ = ["ClientSecretCreateResponse", "Session"]
Session: TypeAlias = Annotated[
Union[RealtimeSessionCreateResponse, RealtimeTranscriptionSessionCreateResponse], PropertyInfo(discriminator="type")
]
class ClientSecretCreateResponse(BaseModel):
"""Response from creating a session and client secret for the Realtime API."""
expires_at: int
"""Expiration timestamp for the client secret, in seconds since epoch."""
session: Session
"""The session configuration for either a realtime or transcription session."""
value: str
"""The generated client secret value."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/client_secret_create_response.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/conversation_created_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationCreatedEvent", "Conversation"]
class Conversation(BaseModel):
"""The conversation resource."""
id: Optional[str] = None
"""The unique ID of the conversation."""
object: Optional[Literal["realtime.conversation"]] = None
"""The object type, must be `realtime.conversation`."""
class ConversationCreatedEvent(BaseModel):
"""Returned when a conversation is created. Emitted right after session creation."""
conversation: Conversation
"""The conversation resource."""
event_id: str
"""The unique ID of the server event."""
type: Literal["conversation.created"]
"""The event type, must be `conversation.created`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_created_event.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/conversation_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from .realtime_mcp_tool_call import RealtimeMcpToolCall
from .realtime_mcp_list_tools import RealtimeMcpListTools
from .realtime_mcp_approval_request import RealtimeMcpApprovalRequest
from .realtime_mcp_approval_response import RealtimeMcpApprovalResponse
from .realtime_conversation_item_user_message import RealtimeConversationItemUserMessage
from .realtime_conversation_item_function_call import RealtimeConversationItemFunctionCall
from .realtime_conversation_item_system_message import RealtimeConversationItemSystemMessage
from .realtime_conversation_item_assistant_message import RealtimeConversationItemAssistantMessage
from .realtime_conversation_item_function_call_output import RealtimeConversationItemFunctionCallOutput
__all__ = ["ConversationItem"]
ConversationItem: TypeAlias = Annotated[
Union[
RealtimeConversationItemSystemMessage,
RealtimeConversationItemUserMessage,
RealtimeConversationItemAssistantMessage,
RealtimeConversationItemFunctionCall,
RealtimeConversationItemFunctionCallOutput,
RealtimeMcpApprovalResponse,
RealtimeMcpListTools,
RealtimeMcpToolCall,
RealtimeMcpApprovalRequest,
],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/conversation_item_added.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .conversation_item import ConversationItem
__all__ = ["ConversationItemAdded"]
class ConversationItemAdded(BaseModel):
"""Sent by the server when an Item is added to the default Conversation.
This can happen in several cases:
- When the client sends a `conversation.item.create` event.
- When the input audio buffer is committed. In this case the item will be a user message containing the audio from the buffer.
- When the model is generating a Response. In this case the `conversation.item.added` event will be sent when the model starts generating a specific Item, and thus it will not yet have any content (and `status` will be `in_progress`).
The event will include the full content of the Item (except when model is generating a Response) except for audio data, which can be retrieved separately with a `conversation.item.retrieve` event if necessary.
"""
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
type: Literal["conversation.item.added"]
"""The event type, must be `conversation.item.added`."""
previous_item_id: Optional[str] = None
"""The ID of the item that precedes this one, if any.
This is used to maintain ordering when items are inserted.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_added.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_create_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .conversation_item import ConversationItem
__all__ = ["ConversationItemCreateEvent"]
class ConversationItemCreateEvent(BaseModel):
"""
Add a new Item to the Conversation's context, including messages, function
calls, and function call responses. This event can be used both to populate a
"history" of the conversation and to add new items mid-stream, but has the
current limitation that it cannot populate assistant audio messages.
If successful, the server will respond with a `conversation.item.created`
event, otherwise an `error` event will be sent.
"""
item: ConversationItem
"""A single item within a Realtime conversation."""
type: Literal["conversation.item.create"]
"""The event type, must be `conversation.item.create`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
previous_item_id: Optional[str] = None
"""The ID of the preceding item after which the new item will be inserted.
If not set, the new item will be appended to the end of the conversation.
If set to `root`, the new item will be added to the beginning of the
conversation.
If set to an existing ID, it allows an item to be inserted mid-conversation. If
the ID cannot be found, an error will be returned and the item will not be
added.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_create_event.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_create_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
from .conversation_item_param import ConversationItemParam
__all__ = ["ConversationItemCreateEventParam"]
class ConversationItemCreateEventParam(TypedDict, total=False):
"""
Add a new Item to the Conversation's context, including messages, function
calls, and function call responses. This event can be used both to populate a
"history" of the conversation and to add new items mid-stream, but has the
current limitation that it cannot populate assistant audio messages.
If successful, the server will respond with a `conversation.item.created`
event, otherwise an `error` event will be sent.
"""
item: Required[ConversationItemParam]
"""A single item within a Realtime conversation."""
type: Required[Literal["conversation.item.create"]]
"""The event type, must be `conversation.item.create`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
previous_item_id: str
"""The ID of the preceding item after which the new item will be inserted.
If not set, the new item will be appended to the end of the conversation.
If set to `root`, the new item will be added to the beginning of the
conversation.
If set to an existing ID, it allows an item to be inserted mid-conversation. If
the ID cannot be found, an error will be returned and the item will not be
added.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_create_event_param.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_created_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .conversation_item import ConversationItem
__all__ = ["ConversationItemCreatedEvent"]
class ConversationItemCreatedEvent(BaseModel):
"""Returned when a conversation item is created.
There are several scenarios that produce this event:
- The server is generating a Response, which if successful will produce
either one or two Items, which will be of type `message`
(role `assistant`) or type `function_call`.
- The input audio buffer has been committed, either by the client or the
server (in `server_vad` mode). The server will take the content of the
input audio buffer and add it to a new user message Item.
- The client has sent a `conversation.item.create` event to add a new Item
to the Conversation.
"""
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
type: Literal["conversation.item.created"]
"""The event type, must be `conversation.item.created`."""
previous_item_id: Optional[str] = None
"""
The ID of the preceding item in the Conversation context, allows the client to
understand the order of the conversation. Can be `null` if the item has no
predecessor.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_created_event.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_delete_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemDeleteEvent"]
class ConversationItemDeleteEvent(BaseModel):
"""Send this event when you want to remove any item from the conversation
history.
The server will respond with a `conversation.item.deleted` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
item_id: str
"""The ID of the item to delete."""
type: Literal["conversation.item.delete"]
"""The event type, must be `conversation.item.delete`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_delete_event.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_delete_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ConversationItemDeleteEventParam"]
class ConversationItemDeleteEventParam(TypedDict, total=False):
"""Send this event when you want to remove any item from the conversation
history.
The server will respond with a `conversation.item.deleted` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
item_id: Required[str]
"""The ID of the item to delete."""
type: Required[Literal["conversation.item.delete"]]
"""The event type, must be `conversation.item.delete`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_delete_event_param.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_deleted_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemDeletedEvent"]
class ConversationItemDeletedEvent(BaseModel):
"""
Returned when an item in the conversation is deleted by the client with a
`conversation.item.delete` event. This event is used to synchronize the
server's understanding of the conversation history with the client's view.
"""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item that was deleted."""
type: Literal["conversation.item.deleted"]
"""The event type, must be `conversation.item.deleted`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_deleted_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_done.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .conversation_item import ConversationItem
__all__ = ["ConversationItemDone"]
class ConversationItemDone(BaseModel):
"""Returned when a conversation item is finalized.
The event will include the full content of the Item except for audio data, which can be retrieved separately with a `conversation.item.retrieve` event if needed.
"""
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""A single item within a Realtime conversation."""
type: Literal["conversation.item.done"]
"""The event type, must be `conversation.item.done`."""
previous_item_id: Optional[str] = None
"""The ID of the item that precedes this one, if any.
This is used to maintain ordering when items are inserted.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_done.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from .log_prob_properties import LogProbProperties
__all__ = [
"ConversationItemInputAudioTranscriptionCompletedEvent",
"Usage",
"UsageTranscriptTextUsageTokens",
"UsageTranscriptTextUsageTokensInputTokenDetails",
"UsageTranscriptTextUsageDuration",
]
class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel):
"""Details about the input tokens billed for this request."""
audio_tokens: Optional[int] = None
"""Number of audio tokens billed for this request."""
text_tokens: Optional[int] = None
"""Number of text tokens billed for this request."""
class UsageTranscriptTextUsageTokens(BaseModel):
"""Usage statistics for models billed by token usage."""
input_tokens: int
"""Number of input tokens billed for this request."""
output_tokens: int
"""Number of output tokens generated."""
total_tokens: int
"""Total number of tokens used (input + output)."""
type: Literal["tokens"]
"""The type of the usage object. Always `tokens` for this variant."""
input_token_details: Optional[UsageTranscriptTextUsageTokensInputTokenDetails] = None
"""Details about the input tokens billed for this request."""
class UsageTranscriptTextUsageDuration(BaseModel):
"""Usage statistics for models billed by audio input duration."""
seconds: float
"""Duration of the input audio in seconds."""
type: Literal["duration"]
"""The type of the usage object. Always `duration` for this variant."""
Usage: TypeAlias = Union[UsageTranscriptTextUsageTokens, UsageTranscriptTextUsageDuration]
class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel):
"""
This event is the output of audio transcription for user audio written to the
user audio buffer. Transcription begins when the input audio buffer is
committed by the client or server (when VAD is enabled). Transcription runs
asynchronously with Response creation, so this event may come before or after
the Response events.
Realtime API models accept audio natively, and thus input transcription is a
separate process run on a separate ASR (Automatic Speech Recognition) model.
The transcript may diverge somewhat from the model's interpretation, and
should be treated as a rough guide.
"""
content_index: int
"""The index of the content part containing the audio."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item containing the audio that is being transcribed."""
transcript: str
"""The transcribed text."""
type: Literal["conversation.item.input_audio_transcription.completed"]
"""
The event type, must be `conversation.item.input_audio_transcription.completed`.
"""
usage: Usage
"""
Usage statistics for the transcription, this is billed according to the ASR
model's pricing rather than the realtime model's pricing.
"""
logprobs: Optional[List[LogProbProperties]] = None
"""The log probabilities of the transcription."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .log_prob_properties import LogProbProperties
__all__ = ["ConversationItemInputAudioTranscriptionDeltaEvent"]
class ConversationItemInputAudioTranscriptionDeltaEvent(BaseModel):
"""
Returned when the text value of an input audio transcription content part is updated with incremental transcription results.
"""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item containing the audio that is being transcribed."""
type: Literal["conversation.item.input_audio_transcription.delta"]
"""The event type, must be `conversation.item.input_audio_transcription.delta`."""
content_index: Optional[int] = None
"""The index of the content part in the item's content array."""
delta: Optional[str] = None
"""The text delta."""
logprobs: Optional[List[LogProbProperties]] = None
"""The log probabilities of the transcription.
These can be enabled by configurating the session with
`"include": ["item.input_audio_transcription.logprobs"]`. Each entry in the
array corresponds a log probability of which token would be selected for this
chunk of transcription. This can help to identify if it was possible there were
multiple valid options for a given chunk of transcription.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemInputAudioTranscriptionFailedEvent", "Error"]
class Error(BaseModel):
"""Details of the transcription error."""
code: Optional[str] = None
"""Error code, if any."""
message: Optional[str] = None
"""A human-readable error message."""
param: Optional[str] = None
"""Parameter related to the error, if any."""
type: Optional[str] = None
"""The type of error."""
class ConversationItemInputAudioTranscriptionFailedEvent(BaseModel):
"""
Returned when input audio transcription is configured, and a transcription
request for a user message failed. These events are separate from other
`error` events so that the client can identify the related Item.
"""
content_index: int
"""The index of the content part containing the audio."""
error: Error
"""Details of the transcription error."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the user message item."""
type: Literal["conversation.item.input_audio_transcription.failed"]
"""The event type, must be `conversation.item.input_audio_transcription.failed`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_input_audio_transcription_segment.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemInputAudioTranscriptionSegment"]
class ConversationItemInputAudioTranscriptionSegment(BaseModel):
"""Returned when an input audio transcription segment is identified for an item."""
id: str
"""The segment identifier."""
content_index: int
"""The index of the input audio content part within the item."""
end: float
"""End time of the segment in seconds."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item containing the input audio content."""
speaker: str
"""The detected speaker label for this segment."""
start: float
"""Start time of the segment in seconds."""
text: str
"""The text for this segment."""
type: Literal["conversation.item.input_audio_transcription.segment"]
"""The event type, must be `conversation.item.input_audio_transcription.segment`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_input_audio_transcription_segment.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import TypeAlias
from .realtime_mcp_tool_call_param import RealtimeMcpToolCallParam
from .realtime_mcp_list_tools_param import RealtimeMcpListToolsParam
from .realtime_mcp_approval_request_param import RealtimeMcpApprovalRequestParam
from .realtime_mcp_approval_response_param import RealtimeMcpApprovalResponseParam
from .realtime_conversation_item_user_message_param import RealtimeConversationItemUserMessageParam
from .realtime_conversation_item_function_call_param import RealtimeConversationItemFunctionCallParam
from .realtime_conversation_item_system_message_param import RealtimeConversationItemSystemMessageParam
from .realtime_conversation_item_assistant_message_param import RealtimeConversationItemAssistantMessageParam
from .realtime_conversation_item_function_call_output_param import RealtimeConversationItemFunctionCallOutputParam
__all__ = ["ConversationItemParam"]
ConversationItemParam: TypeAlias = Union[
RealtimeConversationItemSystemMessageParam,
RealtimeConversationItemUserMessageParam,
RealtimeConversationItemAssistantMessageParam,
RealtimeConversationItemFunctionCallParam,
RealtimeConversationItemFunctionCallOutputParam,
RealtimeMcpApprovalResponseParam,
RealtimeMcpListToolsParam,
RealtimeMcpToolCallParam,
RealtimeMcpApprovalRequestParam,
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_param.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/conversation_item_retrieve_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemRetrieveEvent"]
class ConversationItemRetrieveEvent(BaseModel):
"""
Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
The server will respond with a `conversation.item.retrieved` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
item_id: str
"""The ID of the item to retrieve."""
type: Literal["conversation.item.retrieve"]
"""The event type, must be `conversation.item.retrieve`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_retrieve_event.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_retrieve_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ConversationItemRetrieveEventParam"]
class ConversationItemRetrieveEventParam(TypedDict, total=False):
"""
Send this event when you want to retrieve the server's representation of a specific item in the conversation history. This is useful, for example, to inspect user audio after noise cancellation and VAD.
The server will respond with a `conversation.item.retrieved` event,
unless the item does not exist in the conversation history, in which case the
server will respond with an error.
"""
item_id: Required[str]
"""The ID of the item to retrieve."""
type: Required[Literal["conversation.item.retrieve"]]
"""The event type, must be `conversation.item.retrieve`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_retrieve_event_param.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_truncate_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemTruncateEvent"]
class ConversationItemTruncateEvent(BaseModel):
"""Send this event to truncate a previous assistant message’s audio.
The server
will produce audio faster than realtime, so this event is useful when the user
interrupts to truncate audio that has already been sent to the client but not
yet played. This will synchronize the server's understanding of the audio with
the client's playback.
Truncating audio will delete the server-side text transcript to ensure there
is not text in the context that hasn't been heard by the user.
If successful, the server will respond with a `conversation.item.truncated`
event.
"""
audio_end_ms: int
"""Inclusive duration up to which audio is truncated, in milliseconds.
If the audio_end_ms is greater than the actual audio duration, the server will
respond with an error.
"""
content_index: int
"""The index of the content part to truncate. Set this to `0`."""
item_id: str
"""The ID of the assistant message item to truncate.
Only assistant message items can be truncated.
"""
type: Literal["conversation.item.truncate"]
"""The event type, must be `conversation.item.truncate`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_truncate_event.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_truncate_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ConversationItemTruncateEventParam"]
class ConversationItemTruncateEventParam(TypedDict, total=False):
"""Send this event to truncate a previous assistant message’s audio.
The server
will produce audio faster than realtime, so this event is useful when the user
interrupts to truncate audio that has already been sent to the client but not
yet played. This will synchronize the server's understanding of the audio with
the client's playback.
Truncating audio will delete the server-side text transcript to ensure there
is not text in the context that hasn't been heard by the user.
If successful, the server will respond with a `conversation.item.truncated`
event.
"""
audio_end_ms: Required[int]
"""Inclusive duration up to which audio is truncated, in milliseconds.
If the audio_end_ms is greater than the actual audio duration, the server will
respond with an error.
"""
content_index: Required[int]
"""The index of the content part to truncate. Set this to `0`."""
item_id: Required[str]
"""The ID of the assistant message item to truncate.
Only assistant message items can be truncated.
"""
type: Required[Literal["conversation.item.truncate"]]
"""The event type, must be `conversation.item.truncate`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_truncate_event_param.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/conversation_item_truncated_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ConversationItemTruncatedEvent"]
class ConversationItemTruncatedEvent(BaseModel):
"""
Returned when an earlier assistant audio message item is truncated by the
client with a `conversation.item.truncate` event. This event is used to
synchronize the server's understanding of the audio with the client's playback.
This action will truncate the audio and remove the server-side text transcript
to ensure there is no text in the context that hasn't been heard by the user.
"""
audio_end_ms: int
"""The duration up to which the audio was truncated, in milliseconds."""
content_index: int
"""The index of the content part that was truncated."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the assistant message item that was truncated."""
type: Literal["conversation.item.truncated"]
"""The event type, must be `conversation.item.truncated`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/conversation_item_truncated_event.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_append_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferAppendEvent"]
class InputAudioBufferAppendEvent(BaseModel):
"""Send this event to append audio bytes to the input audio buffer.
The audio
buffer is temporary storage you can write to and later commit. A "commit" will create a new
user message item in the conversation history from the buffer content and clear the buffer.
Input audio transcription (if enabled) will be generated when the buffer is committed.
If VAD is enabled the audio buffer is used to detect speech and the server will decide
when to commit. When Server VAD is disabled, you must commit the audio buffer
manually. Input audio noise reduction operates on writes to the audio buffer.
The client may choose how much audio to place in each event up to a maximum
of 15 MiB, for example streaming smaller chunks from the client may allow the
VAD to be more responsive. Unlike most other client events, the server will
not send a confirmation response to this event.
"""
audio: str
"""Base64-encoded audio bytes.
This must be in the format specified by the `input_audio_format` field in the
session configuration.
"""
type: Literal["input_audio_buffer.append"]
"""The event type, must be `input_audio_buffer.append`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_append_event.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_append_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["InputAudioBufferAppendEventParam"]
class InputAudioBufferAppendEventParam(TypedDict, total=False):
"""Send this event to append audio bytes to the input audio buffer.
The audio
buffer is temporary storage you can write to and later commit. A "commit" will create a new
user message item in the conversation history from the buffer content and clear the buffer.
Input audio transcription (if enabled) will be generated when the buffer is committed.
If VAD is enabled the audio buffer is used to detect speech and the server will decide
when to commit. When Server VAD is disabled, you must commit the audio buffer
manually. Input audio noise reduction operates on writes to the audio buffer.
The client may choose how much audio to place in each event up to a maximum
of 15 MiB, for example streaming smaller chunks from the client may allow the
VAD to be more responsive. Unlike most other client events, the server will
not send a confirmation response to this event.
"""
audio: Required[str]
"""Base64-encoded audio bytes.
This must be in the format specified by the `input_audio_format` field in the
session configuration.
"""
type: Required[Literal["input_audio_buffer.append"]]
"""The event type, must be `input_audio_buffer.append`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_append_event_param.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_clear_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferClearEvent"]
class InputAudioBufferClearEvent(BaseModel):
"""Send this event to clear the audio bytes in the buffer.
The server will
respond with an `input_audio_buffer.cleared` event.
"""
type: Literal["input_audio_buffer.clear"]
"""The event type, must be `input_audio_buffer.clear`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_clear_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_clear_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["InputAudioBufferClearEventParam"]
class InputAudioBufferClearEventParam(TypedDict, total=False):
"""Send this event to clear the audio bytes in the buffer.
The server will
respond with an `input_audio_buffer.cleared` event.
"""
type: Required[Literal["input_audio_buffer.clear"]]
"""The event type, must be `input_audio_buffer.clear`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_clear_event_param.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_cleared_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferClearedEvent"]
class InputAudioBufferClearedEvent(BaseModel):
"""
Returned when the input audio buffer is cleared by the client with a
`input_audio_buffer.clear` event.
"""
event_id: str
"""The unique ID of the server event."""
type: Literal["input_audio_buffer.cleared"]
"""The event type, must be `input_audio_buffer.cleared`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_cleared_event.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_commit_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferCommitEvent"]
class InputAudioBufferCommitEvent(BaseModel):
"""
Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.
Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event.
"""
type: Literal["input_audio_buffer.commit"]
"""The event type, must be `input_audio_buffer.commit`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_commit_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_commit_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["InputAudioBufferCommitEventParam"]
class InputAudioBufferCommitEventParam(TypedDict, total=False):
"""
Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.
Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event.
"""
type: Required[Literal["input_audio_buffer.commit"]]
"""The event type, must be `input_audio_buffer.commit`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_commit_event_param.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_committed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferCommittedEvent"]
class InputAudioBufferCommittedEvent(BaseModel):
"""
Returned when an input audio buffer is committed, either by the client or
automatically in server VAD mode. The `item_id` property is the ID of the user
message item that will be created, thus a `conversation.item.created` event
will also be sent to the client.
"""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the user message item that will be created."""
type: Literal["input_audio_buffer.committed"]
"""The event type, must be `input_audio_buffer.committed`."""
previous_item_id: Optional[str] = None
"""
The ID of the preceding item after which the new item will be inserted. Can be
`null` if the item has no predecessor.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_committed_event.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_speech_started_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferSpeechStartedEvent"]
class InputAudioBufferSpeechStartedEvent(BaseModel):
"""
Sent by the server when in `server_vad` mode to indicate that speech has been
detected in the audio buffer. This can happen any time audio is added to the
buffer (unless speech is already detected). The client may want to use this
event to interrupt audio playback or provide visual feedback to the user.
The client should expect to receive a `input_audio_buffer.speech_stopped` event
when speech stops. The `item_id` property is the ID of the user message item
that will be created when speech stops and will also be included in the
`input_audio_buffer.speech_stopped` event (unless the client manually commits
the audio buffer during VAD activation).
"""
audio_start_ms: int
"""
Milliseconds from the start of all audio written to the buffer during the
session when speech was first detected. This will correspond to the beginning of
audio sent to the model, and thus includes the `prefix_padding_ms` configured in
the Session.
"""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the user message item that will be created when speech stops."""
type: Literal["input_audio_buffer.speech_started"]
"""The event type, must be `input_audio_buffer.speech_started`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_speech_started_event.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_speech_stopped_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferSpeechStoppedEvent"]
class InputAudioBufferSpeechStoppedEvent(BaseModel):
"""
Returned in `server_vad` mode when the server detects the end of speech in
the audio buffer. The server will also send an `conversation.item.created`
event with the user message item that is created from the audio buffer.
"""
audio_end_ms: int
"""Milliseconds since the session started when speech stopped.
This will correspond to the end of audio sent to the model, and thus includes
the `min_silence_duration_ms` configured in the Session.
"""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the user message item that will be created."""
type: Literal["input_audio_buffer.speech_stopped"]
"""The event type, must be `input_audio_buffer.speech_stopped`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_speech_stopped_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_timeout_triggered.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferTimeoutTriggered"]
class InputAudioBufferTimeoutTriggered(BaseModel):
"""Returned when the Server VAD timeout is triggered for the input audio buffer.
This is configured
with `idle_timeout_ms` in the `turn_detection` settings of the session, and it indicates that
there hasn't been any speech detected for the configured duration.
The `audio_start_ms` and `audio_end_ms` fields indicate the segment of audio after the last
model response up to the triggering time, as an offset from the beginning of audio written
to the input audio buffer. This means it demarcates the segment of audio that was silent and
the difference between the start and end values will roughly match the configured timeout.
The empty audio will be committed to the conversation as an `input_audio` item (there will be a
`input_audio_buffer.committed` event) and a model response will be generated. There may be speech
that didn't trigger VAD but is still detected by the model, so the model may respond with
something relevant to the conversation or a prompt to continue speaking.
"""
audio_end_ms: int
"""
Millisecond offset of audio written to the input audio buffer at the time the
timeout was triggered.
"""
audio_start_ms: int
"""
Millisecond offset of audio written to the input audio buffer that was after the
playback time of the last model response.
"""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item associated with this segment."""
type: Literal["input_audio_buffer.timeout_triggered"]
"""The event type, must be `input_audio_buffer.timeout_triggered`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_timeout_triggered.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/log_prob_properties.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List
from ..._models import BaseModel
__all__ = ["LogProbProperties"]
class LogProbProperties(BaseModel):
"""A log probability object."""
token: str
"""The token that was used to generate the log probability."""
bytes: List[int]
"""The bytes that were used to generate the log probability."""
logprob: float
"""The log probability of the token."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/log_prob_properties.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/mcp_list_tools_completed.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["McpListToolsCompleted"]
class McpListToolsCompleted(BaseModel):
"""Returned when listing MCP tools has completed for an item."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP list tools item."""
type: Literal["mcp_list_tools.completed"]
"""The event type, must be `mcp_list_tools.completed`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/mcp_list_tools_completed.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/mcp_list_tools_failed.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["McpListToolsFailed"]
class McpListToolsFailed(BaseModel):
"""Returned when listing MCP tools has failed for an item."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP list tools item."""
type: Literal["mcp_list_tools.failed"]
"""The event type, must be `mcp_list_tools.failed`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/mcp_list_tools_failed.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/mcp_list_tools_in_progress.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["McpListToolsInProgress"]
class McpListToolsInProgress(BaseModel):
"""Returned when listing MCP tools is in progress for an item."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP list tools item."""
type: Literal["mcp_list_tools.in_progress"]
"""The event type, must be `mcp_list_tools.in_progress`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/mcp_list_tools_in_progress.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/output_audio_buffer_clear_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["OutputAudioBufferClearEvent"]
class OutputAudioBufferClearEvent(BaseModel):
"""**WebRTC/SIP Only:** Emit to cut off the current audio response.
This will trigger the server to
stop generating audio and emit a `output_audio_buffer.cleared` event. This
event should be preceded by a `response.cancel` client event to stop the
generation of the current response.
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
type: Literal["output_audio_buffer.clear"]
"""The event type, must be `output_audio_buffer.clear`."""
event_id: Optional[str] = None
"""The unique ID of the client event used for error handling."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/output_audio_buffer_clear_event.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/output_audio_buffer_clear_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["OutputAudioBufferClearEventParam"]
class OutputAudioBufferClearEventParam(TypedDict, total=False):
"""**WebRTC/SIP Only:** Emit to cut off the current audio response.
This will trigger the server to
stop generating audio and emit a `output_audio_buffer.cleared` event. This
event should be preceded by a `response.cancel` client event to stop the
generation of the current response.
[Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
"""
type: Required[Literal["output_audio_buffer.clear"]]
"""The event type, must be `output_audio_buffer.clear`."""
event_id: str
"""The unique ID of the client event used for error handling."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/output_audio_buffer_clear_event_param.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/rate_limits_updated_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RateLimitsUpdatedEvent", "RateLimit"]
class RateLimit(BaseModel):
limit: Optional[int] = None
"""The maximum allowed value for the rate limit."""
name: Optional[Literal["requests", "tokens"]] = None
"""The name of the rate limit (`requests`, `tokens`)."""
remaining: Optional[int] = None
"""The remaining value before the limit is reached."""
reset_seconds: Optional[float] = None
"""Seconds until the rate limit resets."""
class RateLimitsUpdatedEvent(BaseModel):
"""Emitted at the beginning of a Response to indicate the updated rate limits.
When a Response is created some tokens will be "reserved" for the output
tokens, the rate limits shown here reflect that reservation, which is then
adjusted accordingly once the Response is completed.
"""
event_id: str
"""The unique ID of the server event."""
rate_limits: List[RateLimit]
"""List of rate limit information."""
type: Literal["rate_limits.updated"]
"""The event type, must be `rate_limits.updated`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/rate_limits_updated_event.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_audio_config.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .realtime_audio_config_input import RealtimeAudioConfigInput
from .realtime_audio_config_output import RealtimeAudioConfigOutput
__all__ = ["RealtimeAudioConfig"]
class RealtimeAudioConfig(BaseModel):
"""Configuration for input and output audio."""
input: Optional[RealtimeAudioConfigInput] = None
output: Optional[RealtimeAudioConfigOutput] = None
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_config.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_audio_config_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
from .realtime_audio_config_input_param import RealtimeAudioConfigInputParam
from .realtime_audio_config_output_param import RealtimeAudioConfigOutputParam
__all__ = ["RealtimeAudioConfigParam"]
class RealtimeAudioConfigParam(TypedDict, total=False):
"""Configuration for input and output audio."""
input: RealtimeAudioConfigInputParam
output: RealtimeAudioConfigOutputParam
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_audio_config_param.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_client_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from .session_update_event import SessionUpdateEvent
from .response_cancel_event import ResponseCancelEvent
from .response_create_event import ResponseCreateEvent
from .conversation_item_create_event import ConversationItemCreateEvent
from .conversation_item_delete_event import ConversationItemDeleteEvent
from .input_audio_buffer_clear_event import InputAudioBufferClearEvent
from .input_audio_buffer_append_event import InputAudioBufferAppendEvent
from .input_audio_buffer_commit_event import InputAudioBufferCommitEvent
from .output_audio_buffer_clear_event import OutputAudioBufferClearEvent
from .conversation_item_retrieve_event import ConversationItemRetrieveEvent
from .conversation_item_truncate_event import ConversationItemTruncateEvent
__all__ = ["RealtimeClientEvent"]
RealtimeClientEvent: TypeAlias = Annotated[
Union[
ConversationItemCreateEvent,
ConversationItemDeleteEvent,
ConversationItemRetrieveEvent,
ConversationItemTruncateEvent,
InputAudioBufferAppendEvent,
InputAudioBufferClearEvent,
OutputAudioBufferClearEvent,
InputAudioBufferCommitEvent,
ResponseCancelEvent,
ResponseCreateEvent,
SessionUpdateEvent,
],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_client_event.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_client_event_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import TypeAlias
from .session_update_event_param import SessionUpdateEventParam
from .response_cancel_event_param import ResponseCancelEventParam
from .response_create_event_param import ResponseCreateEventParam
from .conversation_item_create_event_param import ConversationItemCreateEventParam
from .conversation_item_delete_event_param import ConversationItemDeleteEventParam
from .input_audio_buffer_clear_event_param import InputAudioBufferClearEventParam
from .input_audio_buffer_append_event_param import InputAudioBufferAppendEventParam
from .input_audio_buffer_commit_event_param import InputAudioBufferCommitEventParam
from .output_audio_buffer_clear_event_param import OutputAudioBufferClearEventParam
from .conversation_item_retrieve_event_param import ConversationItemRetrieveEventParam
from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam
__all__ = ["RealtimeClientEventParam"]
RealtimeClientEventParam: TypeAlias = Union[
ConversationItemCreateEventParam,
ConversationItemDeleteEventParam,
ConversationItemRetrieveEventParam,
ConversationItemTruncateEventParam,
InputAudioBufferAppendEventParam,
InputAudioBufferClearEventParam,
OutputAudioBufferClearEventParam,
InputAudioBufferCommitEventParam,
ResponseCancelEventParam,
ResponseCreateEventParam,
SessionUpdateEventParam,
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_client_event_param.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_connect_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
__all__ = ["RealtimeConnectParams"]
class RealtimeConnectParams(TypedDict, total=False):
call_id: str
model: str
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_connect_params.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_assistant_message.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeConversationItemAssistantMessage", "Content"]
class Content(BaseModel):
audio: Optional[str] = None
"""
Base64-encoded audio bytes, these will be parsed as the format specified in the
session output audio type configuration. This defaults to PCM 16-bit 24kHz mono
if not specified.
"""
text: Optional[str] = None
"""The text content."""
transcript: Optional[str] = None
"""
The transcript of the audio content, this will always be present if the output
type is `audio`.
"""
type: Optional[Literal["output_text", "output_audio"]] = None
"""
The content type, `output_text` or `output_audio` depending on the session
`output_modalities` configuration.
"""
class RealtimeConversationItemAssistantMessage(BaseModel):
"""An assistant message item in a Realtime conversation."""
content: List[Content]
"""The content of the message."""
role: Literal["assistant"]
"""The role of the message sender. Always `assistant`."""
type: Literal["message"]
"""The type of the item. Always `message`."""
id: Optional[str] = None
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Optional[Literal["realtime.item"]] = None
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Optional[Literal["completed", "incomplete", "in_progress"]] = None
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_assistant_message.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_assistant_message_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeConversationItemAssistantMessageParam", "Content"]
class Content(TypedDict, total=False):
audio: str
"""
Base64-encoded audio bytes, these will be parsed as the format specified in the
session output audio type configuration. This defaults to PCM 16-bit 24kHz mono
if not specified.
"""
text: str
"""The text content."""
transcript: str
"""
The transcript of the audio content, this will always be present if the output
type is `audio`.
"""
type: Literal["output_text", "output_audio"]
"""
The content type, `output_text` or `output_audio` depending on the session
`output_modalities` configuration.
"""
class RealtimeConversationItemAssistantMessageParam(TypedDict, total=False):
"""An assistant message item in a Realtime conversation."""
content: Required[Iterable[Content]]
"""The content of the message."""
role: Required[Literal["assistant"]]
"""The role of the message sender. Always `assistant`."""
type: Required[Literal["message"]]
"""The type of the item. Always `message`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_assistant_message_param.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_function_call.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeConversationItemFunctionCall"]
class RealtimeConversationItemFunctionCall(BaseModel):
"""A function call item in a Realtime conversation."""
arguments: str
"""The arguments of the function call.
This is a JSON-encoded string representing the arguments passed to the function,
for example `{"arg1": "value1", "arg2": 42}`.
"""
name: str
"""The name of the function being called."""
type: Literal["function_call"]
"""The type of the item. Always `function_call`."""
id: Optional[str] = None
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
call_id: Optional[str] = None
"""The ID of the function call."""
object: Optional[Literal["realtime.item"]] = None
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Optional[Literal["completed", "incomplete", "in_progress"]] = None
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_function_call.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_function_call_output.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeConversationItemFunctionCallOutput"]
class RealtimeConversationItemFunctionCallOutput(BaseModel):
"""A function call output item in a Realtime conversation."""
call_id: str
"""The ID of the function call this output is for."""
output: str
"""
The output of the function call, this is free text and can contain any
information or simply be empty.
"""
type: Literal["function_call_output"]
"""The type of the item. Always `function_call_output`."""
id: Optional[str] = None
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Optional[Literal["realtime.item"]] = None
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Optional[Literal["completed", "incomplete", "in_progress"]] = None
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_function_call_output.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_function_call_output_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeConversationItemFunctionCallOutputParam"]
class RealtimeConversationItemFunctionCallOutputParam(TypedDict, total=False):
"""A function call output item in a Realtime conversation."""
call_id: Required[str]
"""The ID of the function call this output is for."""
output: Required[str]
"""
The output of the function call, this is free text and can contain any
information or simply be empty.
"""
type: Required[Literal["function_call_output"]]
"""The type of the item. Always `function_call_output`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_function_call_output_param.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_function_call_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeConversationItemFunctionCallParam"]
class RealtimeConversationItemFunctionCallParam(TypedDict, total=False):
"""A function call item in a Realtime conversation."""
arguments: Required[str]
"""The arguments of the function call.
This is a JSON-encoded string representing the arguments passed to the function,
for example `{"arg1": "value1", "arg2": 42}`.
"""
name: Required[str]
"""The name of the function being called."""
type: Required[Literal["function_call"]]
"""The type of the item. Always `function_call`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
call_id: str
"""The ID of the function call."""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_function_call_param.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_system_message.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeConversationItemSystemMessage", "Content"]
class Content(BaseModel):
text: Optional[str] = None
"""The text content."""
type: Optional[Literal["input_text"]] = None
"""The content type. Always `input_text` for system messages."""
class RealtimeConversationItemSystemMessage(BaseModel):
"""
A system message in a Realtime conversation can be used to provide additional context or instructions to the model. This is similar but distinct from the instruction prompt provided at the start of a conversation, as system messages can be added at any point in the conversation. For major changes to the conversation's behavior, use instructions, but for smaller updates (e.g. "the user is now asking about a different topic"), use system messages.
"""
content: List[Content]
"""The content of the message."""
role: Literal["system"]
"""The role of the message sender. Always `system`."""
type: Literal["message"]
"""The type of the item. Always `message`."""
id: Optional[str] = None
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Optional[Literal["realtime.item"]] = None
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Optional[Literal["completed", "incomplete", "in_progress"]] = None
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_system_message.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_system_message_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeConversationItemSystemMessageParam", "Content"]
class Content(TypedDict, total=False):
text: str
"""The text content."""
type: Literal["input_text"]
"""The content type. Always `input_text` for system messages."""
class RealtimeConversationItemSystemMessageParam(TypedDict, total=False):
"""
A system message in a Realtime conversation can be used to provide additional context or instructions to the model. This is similar but distinct from the instruction prompt provided at the start of a conversation, as system messages can be added at any point in the conversation. For major changes to the conversation's behavior, use instructions, but for smaller updates (e.g. "the user is now asking about a different topic"), use system messages.
"""
content: Required[Iterable[Content]]
"""The content of the message."""
role: Required[Literal["system"]]
"""The role of the message sender. Always `system`."""
type: Required[Literal["message"]]
"""The type of the item. Always `message`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_system_message_param.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_user_message.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeConversationItemUserMessage", "Content"]
class Content(BaseModel):
audio: Optional[str] = None
"""
Base64-encoded audio bytes (for `input_audio`), these will be parsed as the
format specified in the session input audio type configuration. This defaults to
PCM 16-bit 24kHz mono if not specified.
"""
detail: Optional[Literal["auto", "low", "high"]] = None
"""The detail level of the image (for `input_image`).
`auto` will default to `high`.
"""
image_url: Optional[str] = None
"""Base64-encoded image bytes (for `input_image`) as a data URI.
For example `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...`. Supported
formats are PNG and JPEG.
"""
text: Optional[str] = None
"""The text content (for `input_text`)."""
transcript: Optional[str] = None
"""Transcript of the audio (for `input_audio`).
This is not sent to the model, but will be attached to the message item for
reference.
"""
type: Optional[Literal["input_text", "input_audio", "input_image"]] = None
"""The content type (`input_text`, `input_audio`, or `input_image`)."""
class RealtimeConversationItemUserMessage(BaseModel):
"""A user message item in a Realtime conversation."""
content: List[Content]
"""The content of the message."""
role: Literal["user"]
"""The role of the message sender. Always `user`."""
type: Literal["message"]
"""The type of the item. Always `message`."""
id: Optional[str] = None
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Optional[Literal["realtime.item"]] = None
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Optional[Literal["completed", "incomplete", "in_progress"]] = None
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_user_message.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_conversation_item_user_message_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeConversationItemUserMessageParam", "Content"]
class Content(TypedDict, total=False):
audio: str
"""
Base64-encoded audio bytes (for `input_audio`), these will be parsed as the
format specified in the session input audio type configuration. This defaults to
PCM 16-bit 24kHz mono if not specified.
"""
detail: Literal["auto", "low", "high"]
"""The detail level of the image (for `input_image`).
`auto` will default to `high`.
"""
image_url: str
"""Base64-encoded image bytes (for `input_image`) as a data URI.
For example `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...`. Supported
formats are PNG and JPEG.
"""
text: str
"""The text content (for `input_text`)."""
transcript: str
"""Transcript of the audio (for `input_audio`).
This is not sent to the model, but will be attached to the message item for
reference.
"""
type: Literal["input_text", "input_audio", "input_image"]
"""The content type (`input_text`, `input_audio`, or `input_image`)."""
class RealtimeConversationItemUserMessageParam(TypedDict, total=False):
"""A user message item in a Realtime conversation."""
content: Required[Iterable[Content]]
"""The content of the message."""
role: Required[Literal["user"]]
"""The role of the message sender. Always `user`."""
type: Required[Literal["message"]]
"""The type of the item. Always `message`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_conversation_item_user_message_param.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_error.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
__all__ = ["RealtimeError"]
class RealtimeError(BaseModel):
"""Details of the error."""
message: str
"""A human-readable error message."""
type: str
"""The type of error (e.g., "invalid_request_error", "server_error")."""
code: Optional[str] = None
"""Error code, if any."""
event_id: Optional[str] = None
"""The event_id of the client event that caused the error, if applicable."""
param: Optional[str] = None
"""Parameter related to the error, if any."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_error.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_error_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
from .realtime_error import RealtimeError
__all__ = ["RealtimeErrorEvent"]
class RealtimeErrorEvent(BaseModel):
"""
Returned when an error occurs, which could be a client problem or a server
problem. Most errors are recoverable and the session will stay open, we
recommend to implementors to monitor and log error messages by default.
"""
error: RealtimeError
"""Details of the error."""
event_id: str
"""The unique ID of the server event."""
type: Literal["error"]
"""The event type, must be `error`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_error_event.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/realtime_mcp_approval_request.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["RealtimeMcpApprovalRequest"]
class RealtimeMcpApprovalRequest(BaseModel):
"""A Realtime item requesting human approval of a tool invocation."""
id: str
"""The unique ID of the approval request."""
arguments: str
"""A JSON string of arguments for the tool."""
name: str
"""The name of the tool to run."""
server_label: str
"""The label of the MCP server making the request."""
type: Literal["mcp_approval_request"]
"""The type of the item. Always `mcp_approval_request`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_approval_request.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/realtime_mcp_approval_request_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["RealtimeMcpApprovalRequestParam"]
class RealtimeMcpApprovalRequestParam(TypedDict, total=False):
"""A Realtime item requesting human approval of a tool invocation."""
id: Required[str]
"""The unique ID of the approval request."""
arguments: Required[str]
"""A JSON string of arguments for the tool."""
name: Required[str]
"""The name of the tool to run."""
server_label: Required[str]
"""The label of the MCP server making the request."""
type: Required[Literal["mcp_approval_request"]]
"""The type of the item. Always `mcp_approval_request`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/realtime_mcp_approval_request_param.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.