sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
openai/openai-python:src/openai/types/responses/response_local_environment.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseLocalEnvironment"]
class ResponseLocalEnvironment(BaseModel):
"""Represents the use of a local environment to perform shell actions."""
type: Literal["local"]
"""The environment type. Always `local`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_local_environment.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/skill_reference.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["SkillReference"]
class SkillReference(BaseModel):
skill_id: str
"""The ID of the referenced skill."""
type: Literal["skill_reference"]
"""References a skill created with the /v1/skills endpoint."""
version: Optional[str] = None
"""Optional skill version. Use a positive integer or 'latest'. Omit for default."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/skill_reference.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/skill_reference_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["SkillReferenceParam"]
class SkillReferenceParam(TypedDict, total=False):
skill_id: Required[str]
"""The ID of the referenced skill."""
type: Required[Literal["skill_reference"]]
"""References a skill created with the /v1/skills endpoint."""
version: str
"""Optional skill version. Use a positive integer or 'latest'. Omit for default."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/skill_reference_param.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skill.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["Skill"]
class Skill(BaseModel):
id: str
"""Unique identifier for the skill."""
created_at: int
"""Unix timestamp (seconds) for when the skill was created."""
default_version: str
"""Default version for the skill."""
description: str
"""Description of the skill."""
latest_version: str
"""Latest version for the skill."""
name: str
"""Name of the skill."""
object: Literal["skill"]
"""The object type, which is `skill`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skill.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skill_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import TypedDict
from .._types import FileTypes, SequenceNotStr
__all__ = ["SkillCreateParams"]
class SkillCreateParams(TypedDict, total=False):
files: Union[SequenceNotStr[FileTypes], FileTypes]
"""Skill files to upload (directory upload) or a single zip file."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skill_create_params.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skill_list.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from .skill import Skill
from .._models import BaseModel
__all__ = ["SkillList"]
class SkillList(BaseModel):
data: List[Skill]
"""A list of items"""
first_id: Optional[str] = None
"""The ID of the first item in the list."""
has_more: bool
"""Whether there are more items available."""
last_id: Optional[str] = None
"""The ID of the last item in the list."""
object: Literal["list"]
"""The type of object returned, must be `list`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skill_list.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skill_list_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["SkillListParams"]
class SkillListParams(TypedDict, total=False):
after: str
"""Identifier for the last item from the previous pagination request"""
limit: int
"""Number of items to retrieve"""
order: Literal["asc", "desc"]
"""Sort order of results by timestamp.
Use `asc` for ascending order or `desc` for descending order.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skill_list_params.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skill_update_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
__all__ = ["SkillUpdateParams"]
class SkillUpdateParams(TypedDict, total=False):
default_version: Required[str]
"""The skill version number to set as default."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skill_update_params.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skills/deleted_skill_version.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["DeletedSkillVersion"]
class DeletedSkillVersion(BaseModel):
id: str
deleted: bool
object: Literal["skill.version.deleted"]
version: str
"""The deleted skill version."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skills/deleted_skill_version.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skills/skill_version.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["SkillVersion"]
class SkillVersion(BaseModel):
id: str
"""Unique identifier for the skill version."""
created_at: int
"""Unix timestamp (seconds) for when the version was created."""
description: str
"""Description of the skill version."""
name: str
"""Name of the skill version."""
object: Literal["skill.version"]
"""The object type, which is `skill.version`."""
skill_id: str
"""Identifier of the skill for this version."""
version: str
"""Version number for this skill."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skills/skill_version.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skills/skill_version_list.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
from .skill_version import SkillVersion
__all__ = ["SkillVersionList"]
class SkillVersionList(BaseModel):
data: List[SkillVersion]
"""A list of items"""
first_id: Optional[str] = None
"""The ID of the first item in the list."""
has_more: bool
"""Whether there are more items available."""
last_id: Optional[str] = None
"""The ID of the last item in the list."""
object: Literal["list"]
"""The type of object returned, must be `list`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skills/skill_version_list.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skills/version_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import TypedDict
from ..._types import FileTypes, SequenceNotStr
__all__ = ["VersionCreateParams"]
class VersionCreateParams(TypedDict, total=False):
default: bool
"""Whether to set this version as the default."""
files: Union[SequenceNotStr[FileTypes], FileTypes]
"""Skill files to upload (directory upload) or a single zip file."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skills/version_create_params.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/skills/version_list_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["VersionListParams"]
class VersionListParams(TypedDict, total=False):
after: str
"""The skill version ID to start after."""
limit: int
"""Number of versions to retrieve."""
order: Literal["asc", "desc"]
"""Sort order of results by version number."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/skills/version_list_params.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/api_resources/skills/test_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import httpx
import pytest
from respx import MockRouter
import openai._legacy_response as _legacy_response
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestContent:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
content = client.skills.content.retrieve(
"skill_123",
)
assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
assert content.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = client.skills.content.with_raw_response.retrieve(
"skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_streaming_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.skills.content.with_streaming_response.retrieve(
"skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.content.with_raw_response.retrieve(
"",
)
class TestAsyncContent:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
content = await async_client.skills.content.retrieve(
"skill_123",
)
assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
assert content.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.skills.content.with_raw_response.retrieve(
"skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.skills.content.with_streaming_response.retrieve(
"skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = await response.parse()
assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.content.with_raw_response.retrieve(
"",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/skills/test_content.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/skills/test_versions.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.pagination import SyncCursorPage, AsyncCursorPage
from openai.types.skills import SkillVersion, DeletedSkillVersion
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestVersions:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
version = client.skills.versions.create(
skill_id="skill_123",
)
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
version = client.skills.versions.create(
skill_id="skill_123",
default=True,
files=[b"raw file contents"],
)
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.skills.versions.with_raw_response.create(
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.skills.versions.with_streaming_response.create(
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.versions.with_raw_response.create(
skill_id="",
)
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
version = client.skills.versions.retrieve(
version="version",
skill_id="skill_123",
)
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.skills.versions.with_raw_response.retrieve(
version="version",
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.skills.versions.with_streaming_response.retrieve(
version="version",
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.versions.with_raw_response.retrieve(
version="version",
skill_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `version` but received ''"):
client.skills.versions.with_raw_response.retrieve(
version="",
skill_id="skill_123",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
version = client.skills.versions.list(
skill_id="skill_123",
)
assert_matches_type(SyncCursorPage[SkillVersion], version, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
version = client.skills.versions.list(
skill_id="skill_123",
after="skillver_123",
limit=0,
order="asc",
)
assert_matches_type(SyncCursorPage[SkillVersion], version, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.skills.versions.with_raw_response.list(
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SyncCursorPage[SkillVersion], version, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.skills.versions.with_streaming_response.list(
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SyncCursorPage[SkillVersion], version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.versions.with_raw_response.list(
skill_id="",
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
version = client.skills.versions.delete(
version="version",
skill_id="skill_123",
)
assert_matches_type(DeletedSkillVersion, version, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.skills.versions.with_raw_response.delete(
version="version",
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(DeletedSkillVersion, version, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.skills.versions.with_streaming_response.delete(
version="version",
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(DeletedSkillVersion, version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.versions.with_raw_response.delete(
version="version",
skill_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `version` but received ''"):
client.skills.versions.with_raw_response.delete(
version="",
skill_id="skill_123",
)
class TestAsyncVersions:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
version = await async_client.skills.versions.create(
skill_id="skill_123",
)
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
version = await async_client.skills.versions.create(
skill_id="skill_123",
default=True,
files=[b"raw file contents"],
)
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.versions.with_raw_response.create(
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.versions.with_streaming_response.create(
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = await response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.versions.with_raw_response.create(
skill_id="",
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
version = await async_client.skills.versions.retrieve(
version="version",
skill_id="skill_123",
)
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.versions.with_raw_response.retrieve(
version="version",
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.versions.with_streaming_response.retrieve(
version="version",
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = await response.parse()
assert_matches_type(SkillVersion, version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.versions.with_raw_response.retrieve(
version="version",
skill_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `version` but received ''"):
await async_client.skills.versions.with_raw_response.retrieve(
version="",
skill_id="skill_123",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
version = await async_client.skills.versions.list(
skill_id="skill_123",
)
assert_matches_type(AsyncCursorPage[SkillVersion], version, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
version = await async_client.skills.versions.list(
skill_id="skill_123",
after="skillver_123",
limit=0,
order="asc",
)
assert_matches_type(AsyncCursorPage[SkillVersion], version, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.versions.with_raw_response.list(
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(AsyncCursorPage[SkillVersion], version, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.versions.with_streaming_response.list(
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = await response.parse()
assert_matches_type(AsyncCursorPage[SkillVersion], version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.versions.with_raw_response.list(
skill_id="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
version = await async_client.skills.versions.delete(
version="version",
skill_id="skill_123",
)
assert_matches_type(DeletedSkillVersion, version, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.versions.with_raw_response.delete(
version="version",
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = response.parse()
assert_matches_type(DeletedSkillVersion, version, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.versions.with_streaming_response.delete(
version="version",
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
version = await response.parse()
assert_matches_type(DeletedSkillVersion, version, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.versions.with_raw_response.delete(
version="version",
skill_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `version` but received ''"):
await async_client.skills.versions.with_raw_response.delete(
version="",
skill_id="skill_123",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/skills/test_versions.py",
"license": "Apache License 2.0",
"lines": 334,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/skills/versions/test_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import httpx
import pytest
from respx import MockRouter
import openai._legacy_response as _legacy_response
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestContent:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/versions/version/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
content = client.skills.versions.content.retrieve(
version="version",
skill_id="skill_123",
)
assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
assert content.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/versions/version/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
response = client.skills.versions.content.with_raw_response.retrieve(
version="version",
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_streaming_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/versions/version/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
with client.skills.versions.content.with_streaming_response.retrieve(
version="version",
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.versions.content.with_raw_response.retrieve(
version="version",
skill_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `version` but received ''"):
client.skills.versions.content.with_raw_response.retrieve(
version="",
skill_id="skill_123",
)
class TestAsyncContent:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/versions/version/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
content = await async_client.skills.versions.content.retrieve(
version="version",
skill_id="skill_123",
)
assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
assert content.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/versions/version/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
response = await async_client.skills.versions.content.with_raw_response.retrieve(
version="version",
skill_id="skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/skills/skill_123/versions/version/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
async with async_client.skills.versions.content.with_streaming_response.retrieve(
version="version",
skill_id="skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = await response.parse()
assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.versions.content.with_raw_response.retrieve(
version="version",
skill_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `version` but received ''"):
await async_client.skills.versions.content.with_raw_response.retrieve(
version="",
skill_id="skill_123",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/skills/versions/test_content.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/test_skills.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types import Skill, DeletedSkill
from openai.pagination import SyncCursorPage, AsyncCursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestSkills:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
skill = client.skills.create()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
skill = client.skills.create(
files=[b"raw file contents"],
)
assert_matches_type(Skill, skill, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.skills.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.skills.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
skill = client.skills.retrieve(
"skill_123",
)
assert_matches_type(Skill, skill, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.skills.with_raw_response.retrieve(
"skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.skills.with_streaming_response.retrieve(
"skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_update(self, client: OpenAI) -> None:
skill = client.skills.update(
skill_id="skill_123",
default_version="default_version",
)
assert_matches_type(Skill, skill, path=["response"])
@parametrize
def test_raw_response_update(self, client: OpenAI) -> None:
response = client.skills.with_raw_response.update(
skill_id="skill_123",
default_version="default_version",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
def test_streaming_response_update(self, client: OpenAI) -> None:
with client.skills.with_streaming_response.update(
skill_id="skill_123",
default_version="default_version",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_update(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.with_raw_response.update(
skill_id="",
default_version="default_version",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
skill = client.skills.list()
assert_matches_type(SyncCursorPage[Skill], skill, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
skill = client.skills.list(
after="after",
limit=0,
order="asc",
)
assert_matches_type(SyncCursorPage[Skill], skill, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.skills.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(SyncCursorPage[Skill], skill, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.skills.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(SyncCursorPage[Skill], skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
skill = client.skills.delete(
"skill_123",
)
assert_matches_type(DeletedSkill, skill, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.skills.with_raw_response.delete(
"skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(DeletedSkill, skill, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.skills.with_streaming_response.delete(
"skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(DeletedSkill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
client.skills.with_raw_response.delete(
"",
)
class TestAsyncSkills:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
skill = await async_client.skills.create()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
skill = await async_client.skills.create(
files=[b"raw file contents"],
)
assert_matches_type(Skill, skill, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = await response.parse()
assert_matches_type(Skill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
skill = await async_client.skills.retrieve(
"skill_123",
)
assert_matches_type(Skill, skill, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.with_raw_response.retrieve(
"skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.with_streaming_response.retrieve(
"skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = await response.parse()
assert_matches_type(Skill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
skill = await async_client.skills.update(
skill_id="skill_123",
default_version="default_version",
)
assert_matches_type(Skill, skill, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.with_raw_response.update(
skill_id="skill_123",
default_version="default_version",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(Skill, skill, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.with_streaming_response.update(
skill_id="skill_123",
default_version="default_version",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = await response.parse()
assert_matches_type(Skill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.with_raw_response.update(
skill_id="",
default_version="default_version",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
skill = await async_client.skills.list()
assert_matches_type(AsyncCursorPage[Skill], skill, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
skill = await async_client.skills.list(
after="after",
limit=0,
order="asc",
)
assert_matches_type(AsyncCursorPage[Skill], skill, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(AsyncCursorPage[Skill], skill, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = await response.parse()
assert_matches_type(AsyncCursorPage[Skill], skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
skill = await async_client.skills.delete(
"skill_123",
)
assert_matches_type(DeletedSkill, skill, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.skills.with_raw_response.delete(
"skill_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = response.parse()
assert_matches_type(DeletedSkill, skill, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.skills.with_streaming_response.delete(
"skill_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
skill = await response.parse()
assert_matches_type(DeletedSkill, skill, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `skill_id` but received ''"):
await async_client.skills.with_raw_response.delete(
"",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/test_skills.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/_utils/_json.py | import json
from typing import Any
from datetime import datetime
from typing_extensions import override
import pydantic
from .._compat import model_dump
def openapi_dumps(obj: Any) -> bytes:
"""
Serialize an object to UTF-8 encoded JSON bytes.
Extends the standard json.dumps with support for additional types
commonly used in the SDK, such as `datetime`, `pydantic.BaseModel`, etc.
"""
return json.dumps(
obj,
cls=_CustomEncoder,
# Uses the same defaults as httpx's JSON serialization
ensure_ascii=False,
separators=(",", ":"),
allow_nan=False,
).encode()
class _CustomEncoder(json.JSONEncoder):
@override
def default(self, o: Any) -> Any:
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, pydantic.BaseModel):
return model_dump(o, exclude_unset=True, mode="json", by_alias=True)
return super().default(o)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/_utils/_json.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/test_utils/test_json.py | from __future__ import annotations
import datetime
from typing import Union
import pydantic
from openai import _compat
from openai._utils._json import openapi_dumps
class TestOpenapiDumps:
def test_basic(self) -> None:
data = {"key": "value", "number": 42}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"key":"value","number":42}'
def test_datetime_serialization(self) -> None:
dt = datetime.datetime(2023, 1, 1, 12, 0, 0)
data = {"datetime": dt}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"datetime":"2023-01-01T12:00:00"}'
def test_pydantic_model_serialization(self) -> None:
class User(pydantic.BaseModel):
first_name: str
last_name: str
age: int
model_instance = User(first_name="John", last_name="Kramer", age=83)
data = {"model": model_instance}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"first_name":"John","last_name":"Kramer","age":83}}'
def test_pydantic_model_with_default_values(self) -> None:
class User(pydantic.BaseModel):
name: str
role: str = "user"
active: bool = True
score: int = 0
model_instance = User(name="Alice")
data = {"model": model_instance}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"name":"Alice"}}'
def test_pydantic_model_with_default_values_overridden(self) -> None:
class User(pydantic.BaseModel):
name: str
role: str = "user"
active: bool = True
model_instance = User(name="Bob", role="admin", active=False)
data = {"model": model_instance}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"name":"Bob","role":"admin","active":false}}'
def test_pydantic_model_with_alias(self) -> None:
class User(pydantic.BaseModel):
first_name: str = pydantic.Field(alias="firstName")
last_name: str = pydantic.Field(alias="lastName")
model_instance = User(firstName="John", lastName="Doe")
data = {"model": model_instance}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"firstName":"John","lastName":"Doe"}}'
def test_pydantic_model_with_alias_and_default(self) -> None:
class User(pydantic.BaseModel):
user_name: str = pydantic.Field(alias="userName")
user_role: str = pydantic.Field(default="member", alias="userRole")
is_active: bool = pydantic.Field(default=True, alias="isActive")
model_instance = User(userName="charlie")
data = {"model": model_instance}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"userName":"charlie"}}'
model_with_overrides = User(userName="diana", userRole="admin", isActive=False)
data = {"model": model_with_overrides}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"userName":"diana","userRole":"admin","isActive":false}}'
def test_pydantic_model_with_nested_models_and_defaults(self) -> None:
class Address(pydantic.BaseModel):
street: str
city: str = "Unknown"
class User(pydantic.BaseModel):
name: str
address: Address
verified: bool = False
if _compat.PYDANTIC_V1:
# to handle forward references in Pydantic v1
User.update_forward_refs(**locals()) # type: ignore[reportDeprecated]
address = Address(street="123 Main St")
user = User(name="Diana", address=address)
data = {"user": user}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"user":{"name":"Diana","address":{"street":"123 Main St"}}}'
address_with_city = Address(street="456 Oak Ave", city="Boston")
user_verified = User(name="Eve", address=address_with_city, verified=True)
data = {"user": user_verified}
json_bytes = openapi_dumps(data)
assert (
json_bytes == b'{"user":{"name":"Eve","address":{"street":"456 Oak Ave","city":"Boston"},"verified":true}}'
)
def test_pydantic_model_with_optional_fields(self) -> None:
class User(pydantic.BaseModel):
name: str
email: Union[str, None]
phone: Union[str, None]
model_with_none = User(name="Eve", email=None, phone=None)
data = {"model": model_with_none}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"name":"Eve","email":null,"phone":null}}'
model_with_values = User(name="Frank", email="frank@example.com", phone=None)
data = {"model": model_with_values}
json_bytes = openapi_dumps(data)
assert json_bytes == b'{"model":{"name":"Frank","email":"frank@example.com","phone":null}}'
| {
"repo_id": "openai/openai-python",
"file_path": "tests/test_utils/test_json.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/types/video_model_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypeAlias
__all__ = ["VideoModelParam"]
VideoModelParam: TypeAlias = Union[
str, Literal["sora-2", "sora-2-pro", "sora-2-2025-10-06", "sora-2-pro-2025-10-06", "sora-2-2025-12-08"]
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_model_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/graders/grader_inputs.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from ..responses.response_input_text import ResponseInputText
from ..responses.response_input_audio import ResponseInputAudio
__all__ = ["GraderInputs", "GraderInputItem", "GraderInputItemOutputText", "GraderInputItemInputImage"]
class GraderInputItemOutputText(BaseModel):
"""A text output from the model."""
text: str
"""The text output from the model."""
type: Literal["output_text"]
"""The type of the output text. Always `output_text`."""
class GraderInputItemInputImage(BaseModel):
"""An image input block used within EvalItem content arrays."""
image_url: str
"""The URL of the image input."""
type: Literal["input_image"]
"""The type of the image input. Always `input_image`."""
detail: Optional[str] = None
"""The detail level of the image to be sent to the model.
One of `high`, `low`, or `auto`. Defaults to `auto`.
"""
GraderInputItem: TypeAlias = Union[
str, ResponseInputText, GraderInputItemOutputText, GraderInputItemInputImage, ResponseInputAudio
]
GraderInputs: TypeAlias = List[GraderInputItem]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/graders/grader_inputs.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/graders/grader_inputs_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..responses.response_input_text_param import ResponseInputTextParam
from ..responses.response_input_audio_param import ResponseInputAudioParam
__all__ = [
"GraderInputsParam",
"GraderInputsParamItem",
"GraderInputsParamItemOutputText",
"GraderInputsParamItemInputImage",
]
class GraderInputsParamItemOutputText(TypedDict, total=False):
"""A text output from the model."""
text: Required[str]
"""The text output from the model."""
type: Required[Literal["output_text"]]
"""The type of the output text. Always `output_text`."""
class GraderInputsParamItemInputImage(TypedDict, total=False):
"""An image input block used within EvalItem content arrays."""
image_url: Required[str]
"""The URL of the image input."""
type: Required[Literal["input_image"]]
"""The type of the image input. Always `input_image`."""
detail: str
"""The detail level of the image to be sent to the model.
One of `high`, `low`, or `auto`. Defaults to `auto`.
"""
GraderInputsParamItem: TypeAlias = Union[
str,
ResponseInputTextParam,
GraderInputsParamItemOutputText,
GraderInputsParamItemInputImage,
ResponseInputAudioParam,
]
GraderInputsParam: TypeAlias = List[GraderInputsParamItem]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/graders/grader_inputs_param.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputAudioBufferDtmfEventReceivedEvent"]
class InputAudioBufferDtmfEventReceivedEvent(BaseModel):
"""**SIP Only:** Returned when an DTMF event is received.
A DTMF event is a message that
represents a telephone keypad press (0–9, *, #, A–D). The `event` property
is the keypad that the user press. The `received_at` is the UTC Unix Timestamp
that the server received the event.
"""
event: str
"""The telephone keypad that was pressed by the user."""
received_at: int
"""UTC Unix Timestamp when DTMF Event was received by server."""
type: Literal["input_audio_buffer.dtmf_event_received"]
"""The event type, must be `input_audio_buffer.dtmf_event_received`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/input_audio_buffer_dtmf_event_received_event.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/compacted_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List
from typing_extensions import Literal
from ..._models import BaseModel
from .response_usage import ResponseUsage
from .response_output_item import ResponseOutputItem
__all__ = ["CompactedResponse"]
class CompactedResponse(BaseModel):
id: str
"""The unique identifier for the compacted response."""
created_at: int
"""Unix timestamp (in seconds) when the compacted conversation was created."""
object: Literal["response.compaction"]
"""The object type. Always `response.compaction`."""
output: List[ResponseOutputItem]
"""The compacted list of output items.
This is a list of all user messages, followed by a single compaction item.
"""
usage: ResponseUsage
"""
Token accounting for the compaction pass, including cached, reasoning, and total
tokens.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/compacted_response.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_compact_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
from .response_input_item_param import ResponseInputItemParam
__all__ = ["ResponseCompactParams"]
class ResponseCompactParams(TypedDict, total=False):
model: Required[
Union[
Literal[
"gpt-5.2",
"gpt-5.2-2025-12-11",
"gpt-5.2-chat-latest",
"gpt-5.2-pro",
"gpt-5.2-pro-2025-12-11",
"gpt-5.1",
"gpt-5.1-2025-11-13",
"gpt-5.1-codex",
"gpt-5.1-mini",
"gpt-5.1-chat-latest",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5-2025-08-07",
"gpt-5-mini-2025-08-07",
"gpt-5-nano-2025-08-07",
"gpt-5-chat-latest",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"o4-mini",
"o4-mini-2025-04-16",
"o3",
"o3-2025-04-16",
"o3-mini",
"o3-mini-2025-01-31",
"o1",
"o1-2024-12-17",
"o1-preview",
"o1-preview-2024-09-12",
"o1-mini",
"o1-mini-2024-09-12",
"gpt-4o",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-search-preview",
"gpt-4o-mini-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-mini-search-preview-2025-03-11",
"chatgpt-4o-latest",
"codex-mini-latest",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
"o1-pro",
"o1-pro-2025-03-19",
"o3-pro",
"o3-pro-2025-06-10",
"o3-deep-research",
"o3-deep-research-2025-06-26",
"o4-mini-deep-research",
"o4-mini-deep-research-2025-06-26",
"computer-use-preview",
"computer-use-preview-2025-03-11",
"gpt-5-codex",
"gpt-5-pro",
"gpt-5-pro-2025-10-06",
"gpt-5.1-codex-max",
],
str,
None,
]
]
"""Model ID used to generate the response, like `gpt-5` or `o3`.
OpenAI offers a wide range of models with different capabilities, performance
characteristics, and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
"""
input: Union[str, Iterable[ResponseInputItemParam], None]
"""Text, image, or file inputs to the model, used to generate a response"""
instructions: Optional[str]
"""
A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
"""
previous_response_id: Optional[str]
"""The unique ID of the previous response to the model.
Use this to create multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
"""
prompt_cache_key: Optional[str]
"""A key to use when reading from or writing to the prompt cache."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_compact_params.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_compaction_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCompactionItem"]
class ResponseCompactionItem(BaseModel):
"""
A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
"""
id: str
"""The unique ID of the compaction item."""
encrypted_content: str
"""The encrypted content that was produced by compaction."""
type: Literal["compaction"]
"""The type of the item. Always `compaction`."""
created_by: Optional[str] = None
"""The identifier of the actor that created the item."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_compaction_item.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_compaction_item_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCompactionItemParam"]
class ResponseCompactionItemParam(BaseModel):
"""
A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
"""
encrypted_content: str
"""The encrypted content of the compaction summary."""
type: Literal["compaction"]
"""The type of the item. Always `compaction`."""
id: Optional[str] = None
"""The ID of the compaction item."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_compaction_item_param.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_compaction_item_param_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseCompactionItemParamParam"]
class ResponseCompactionItemParamParam(TypedDict, total=False):
"""
A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).
"""
encrypted_content: Required[str]
"""The encrypted content of the compaction summary."""
type: Required[Literal["compaction"]]
"""The type of the item. Always `compaction`."""
id: Optional[str]
"""The ID of the compaction item."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_compaction_item_param_param.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/apply_patch_tool.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ApplyPatchTool"]
class ApplyPatchTool(BaseModel):
"""Allows the assistant to create, delete, or update files using unified diffs."""
type: Literal["apply_patch"]
"""The type of the tool. Always `apply_patch`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/apply_patch_tool.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/apply_patch_tool_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ApplyPatchToolParam"]
class ApplyPatchToolParam(TypedDict, total=False):
"""Allows the assistant to create, delete, or update files using unified diffs."""
type: Required[Literal["apply_patch"]]
"""The type of the tool. Always `apply_patch`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/apply_patch_tool_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/function_shell_tool.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .container_auto import ContainerAuto
from .local_environment import LocalEnvironment
from .container_reference import ContainerReference
__all__ = ["FunctionShellTool", "Environment"]
Environment: TypeAlias = Annotated[
Union[ContainerAuto, LocalEnvironment, ContainerReference, None], PropertyInfo(discriminator="type")
]
class FunctionShellTool(BaseModel):
"""A tool that allows the model to execute shell commands."""
type: Literal["shell"]
"""The type of the shell tool. Always `shell`."""
environment: Optional[Environment] = None
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/function_shell_tool.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/function_shell_tool_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .container_auto_param import ContainerAutoParam
from .local_environment_param import LocalEnvironmentParam
from .container_reference_param import ContainerReferenceParam
__all__ = ["FunctionShellToolParam", "Environment"]
Environment: TypeAlias = Union[ContainerAutoParam, LocalEnvironmentParam, ContainerReferenceParam]
class FunctionShellToolParam(TypedDict, total=False):
"""A tool that allows the model to execute shell commands."""
type: Required[Literal["shell"]]
"""The type of the shell tool. Always `shell`."""
environment: Optional[Environment]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/function_shell_tool_param.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_apply_patch_tool_call.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = [
"ResponseApplyPatchToolCall",
"Operation",
"OperationCreateFile",
"OperationDeleteFile",
"OperationUpdateFile",
]
class OperationCreateFile(BaseModel):
"""Instruction describing how to create a file via the apply_patch tool."""
diff: str
"""Diff to apply."""
path: str
"""Path of the file to create."""
type: Literal["create_file"]
"""Create a new file with the provided diff."""
class OperationDeleteFile(BaseModel):
"""Instruction describing how to delete a file via the apply_patch tool."""
path: str
"""Path of the file to delete."""
type: Literal["delete_file"]
"""Delete the specified file."""
class OperationUpdateFile(BaseModel):
"""Instruction describing how to update a file via the apply_patch tool."""
diff: str
"""Diff to apply."""
path: str
"""Path of the file to update."""
type: Literal["update_file"]
"""Update an existing file with the provided diff."""
Operation: TypeAlias = Annotated[
Union[OperationCreateFile, OperationDeleteFile, OperationUpdateFile], PropertyInfo(discriminator="type")
]
class ResponseApplyPatchToolCall(BaseModel):
"""A tool call that applies file diffs by creating, deleting, or updating files."""
id: str
"""The unique ID of the apply patch tool call.
Populated when this item is returned via API.
"""
call_id: str
"""The unique ID of the apply patch tool call generated by the model."""
operation: Operation
"""
One of the create_file, delete_file, or update_file operations applied via
apply_patch.
"""
status: Literal["in_progress", "completed"]
"""The status of the apply patch tool call. One of `in_progress` or `completed`."""
type: Literal["apply_patch_call"]
"""The type of the item. Always `apply_patch_call`."""
created_by: Optional[str] = None
"""The ID of the entity that created this tool call."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_apply_patch_tool_call.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_apply_patch_tool_call_output.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseApplyPatchToolCallOutput"]
class ResponseApplyPatchToolCallOutput(BaseModel):
"""The output emitted by an apply patch tool call."""
id: str
"""The unique ID of the apply patch tool call output.
Populated when this item is returned via API.
"""
call_id: str
"""The unique ID of the apply patch tool call generated by the model."""
status: Literal["completed", "failed"]
"""The status of the apply patch tool call output. One of `completed` or `failed`."""
type: Literal["apply_patch_call_output"]
"""The type of the item. Always `apply_patch_call_output`."""
created_by: Optional[str] = None
"""The ID of the entity that created this tool call output."""
output: Optional[str] = None
"""Optional textual output returned by the apply patch tool."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_apply_patch_tool_call_output.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_function_shell_call_output_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = ["ResponseFunctionShellCallOutputContent", "Outcome", "OutcomeTimeout", "OutcomeExit"]
class OutcomeTimeout(BaseModel):
"""Indicates that the shell call exceeded its configured time limit."""
type: Literal["timeout"]
"""The outcome type. Always `timeout`."""
class OutcomeExit(BaseModel):
"""Indicates that the shell commands finished and returned an exit code."""
exit_code: int
"""The exit code returned by the shell process."""
type: Literal["exit"]
"""The outcome type. Always `exit`."""
Outcome: TypeAlias = Annotated[Union[OutcomeTimeout, OutcomeExit], PropertyInfo(discriminator="type")]
class ResponseFunctionShellCallOutputContent(BaseModel):
"""Captured stdout and stderr for a portion of a shell tool call output."""
outcome: Outcome
"""The exit or timeout outcome associated with this shell call."""
stderr: str
"""Captured stderr output for the shell call."""
stdout: str
"""Captured stdout output for the shell call."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_shell_call_output_content.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_function_shell_call_output_content_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, Required, TypeAlias, TypedDict
__all__ = ["ResponseFunctionShellCallOutputContentParam", "Outcome", "OutcomeTimeout", "OutcomeExit"]
class OutcomeTimeout(TypedDict, total=False):
"""Indicates that the shell call exceeded its configured time limit."""
type: Required[Literal["timeout"]]
"""The outcome type. Always `timeout`."""
class OutcomeExit(TypedDict, total=False):
"""Indicates that the shell commands finished and returned an exit code."""
exit_code: Required[int]
"""The exit code returned by the shell process."""
type: Required[Literal["exit"]]
"""The outcome type. Always `exit`."""
Outcome: TypeAlias = Union[OutcomeTimeout, OutcomeExit]
class ResponseFunctionShellCallOutputContentParam(TypedDict, total=False):
"""Captured stdout and stderr for a portion of a shell tool call output."""
outcome: Required[Outcome]
"""The exit or timeout outcome associated with this shell call."""
stderr: Required[str]
"""Captured stderr output for the shell call."""
stdout: Required[str]
"""Captured stdout output for the shell call."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_shell_call_output_content_param.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_function_shell_tool_call.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .response_local_environment import ResponseLocalEnvironment
from .response_container_reference import ResponseContainerReference
__all__ = ["ResponseFunctionShellToolCall", "Action", "Environment"]
class Action(BaseModel):
"""The shell commands and limits that describe how to run the tool call."""
commands: List[str]
max_output_length: Optional[int] = None
"""Optional maximum number of characters to return from each command."""
timeout_ms: Optional[int] = None
"""Optional timeout in milliseconds for the commands."""
Environment: TypeAlias = Annotated[
Union[ResponseLocalEnvironment, ResponseContainerReference, None], PropertyInfo(discriminator="type")
]
class ResponseFunctionShellToolCall(BaseModel):
"""A tool call that executes one or more shell commands in a managed environment."""
id: str
"""The unique ID of the shell tool call.
Populated when this item is returned via API.
"""
action: Action
"""The shell commands and limits that describe how to run the tool call."""
call_id: str
"""The unique ID of the shell tool call generated by the model."""
environment: Optional[Environment] = None
"""Represents the use of a local environment to perform shell actions."""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the shell call.
One of `in_progress`, `completed`, or `incomplete`.
"""
type: Literal["shell_call"]
"""The type of the item. Always `shell_call`."""
created_by: Optional[str] = None
"""The ID of the entity that created this tool call."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_shell_tool_call.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_function_shell_tool_call_output.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = [
"ResponseFunctionShellToolCallOutput",
"Output",
"OutputOutcome",
"OutputOutcomeTimeout",
"OutputOutcomeExit",
]
class OutputOutcomeTimeout(BaseModel):
"""Indicates that the shell call exceeded its configured time limit."""
type: Literal["timeout"]
"""The outcome type. Always `timeout`."""
class OutputOutcomeExit(BaseModel):
"""Indicates that the shell commands finished and returned an exit code."""
exit_code: int
"""Exit code from the shell process."""
type: Literal["exit"]
"""The outcome type. Always `exit`."""
OutputOutcome: TypeAlias = Annotated[Union[OutputOutcomeTimeout, OutputOutcomeExit], PropertyInfo(discriminator="type")]
class Output(BaseModel):
"""The content of a shell tool call output that was emitted."""
outcome: OutputOutcome
"""
Represents either an exit outcome (with an exit code) or a timeout outcome for a
shell call output chunk.
"""
stderr: str
"""The standard error output that was captured."""
stdout: str
"""The standard output that was captured."""
created_by: Optional[str] = None
"""The identifier of the actor that created the item."""
class ResponseFunctionShellToolCallOutput(BaseModel):
"""The output of a shell tool call that was emitted."""
id: str
"""The unique ID of the shell call output.
Populated when this item is returned via API.
"""
call_id: str
"""The unique ID of the shell tool call generated by the model."""
max_output_length: Optional[int] = None
"""The maximum length of the shell command output.
This is generated by the model and should be passed back with the raw output.
"""
output: List[Output]
"""An array of shell call output contents"""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the shell call output.
One of `in_progress`, `completed`, or `incomplete`.
"""
type: Literal["shell_call_output"]
"""The type of the shell call output. Always `shell_call_output`."""
created_by: Optional[str] = None
"""The identifier of the actor that created the item."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_shell_tool_call_output.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/tool_choice_apply_patch.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ToolChoiceApplyPatch"]
class ToolChoiceApplyPatch(BaseModel):
"""Forces the model to call the apply_patch tool when executing a tool call."""
type: Literal["apply_patch"]
"""The tool to call. Always `apply_patch`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_apply_patch.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/tool_choice_apply_patch_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ToolChoiceApplyPatchParam"]
class ToolChoiceApplyPatchParam(TypedDict, total=False):
"""Forces the model to call the apply_patch tool when executing a tool call."""
type: Required[Literal["apply_patch"]]
"""The tool to call. Always `apply_patch`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_apply_patch_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/tool_choice_shell.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ToolChoiceShell"]
class ToolChoiceShell(BaseModel):
"""Forces the model to call the shell tool when a tool call is required."""
type: Literal["shell"]
"""The tool to call. Always `shell`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_shell.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/tool_choice_shell_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ToolChoiceShellParam"]
class ToolChoiceShellParam(TypedDict, total=False):
"""Forces the model to call the shell tool when a tool call is required."""
type: Required[Literal["shell"]]
"""The tool to call. Always `shell`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_shell_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:examples/responses_input_tokens.py | from typing import List
from openai import OpenAI
from openai.types.responses.tool_param import ToolParam
from openai.types.responses.response_input_item_param import ResponseInputItemParam
def main() -> None:
client = OpenAI()
tools: List[ToolParam] = [
{
"type": "function",
"name": "get_current_weather",
"description": "Get current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["c", "f"],
"description": "Temperature unit to use",
},
},
"required": ["location", "unit"],
"additionalProperties": False,
},
"strict": True,
}
]
input_items: List[ResponseInputItemParam] = [
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "What's the weather in San Francisco today?"}],
}
]
response = client.responses.input_tokens.count(
model="gpt-5",
instructions="You are a concise assistant.",
input=input_items,
tools=tools,
tool_choice={"type": "function", "name": "get_current_weather"},
)
print(f"input tokens: {response.input_tokens}")
if __name__ == "__main__":
main()
| {
"repo_id": "openai/openai-python",
"file_path": "examples/responses_input_tokens.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/responses/input_tokens.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.responses import input_token_count_params
from ...types.responses.tool_param import ToolParam
from ...types.shared_params.reasoning import Reasoning
from ...types.responses.response_input_item_param import ResponseInputItemParam
from ...types.responses.input_token_count_response import InputTokenCountResponse
__all__ = ["InputTokens", "AsyncInputTokens"]
class InputTokens(SyncAPIResource):
@cached_property
def with_raw_response(self) -> InputTokensWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return InputTokensWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> InputTokensWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return InputTokensWithStreamingResponse(self)
def count(
self,
*,
conversation: Optional[input_token_count_params.Conversation] | Omit = omit,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Optional[str] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
text: Optional[input_token_count_params.Text] | Omit = omit,
tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit,
tools: Optional[Iterable[ToolParam]] | Omit = omit,
truncation: Literal["auto", "disabled"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
Returns input token counts of the request.
Returns an object with `object` set to `response.input_tokens` and an
`input_tokens` count.
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
input: Text, image, or file inputs to the model, used to generate a response
instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
reasoning: **gpt-5 and o-series models only** Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: Controls which tool the model should use, if any.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
truncation: The truncation strategy to use for the model response. - `auto`: If the input to
this Response exceeds the model's context window size, the model will truncate
the response to fit the context window by dropping items from the beginning of
the conversation. - `disabled` (default): If the input size will exceed the
context window size for a model, the request will fail with a 400 error.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/responses/input_tokens",
body=maybe_transform(
{
"conversation": conversation,
"input": input,
"instructions": instructions,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"reasoning": reasoning,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"truncation": truncation,
},
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=InputTokenCountResponse,
)
class AsyncInputTokens(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncInputTokensWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncInputTokensWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncInputTokensWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncInputTokensWithStreamingResponse(self)
async def count(
self,
*,
conversation: Optional[input_token_count_params.Conversation] | Omit = omit,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Optional[str] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
text: Optional[input_token_count_params.Text] | Omit = omit,
tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit,
tools: Optional[Iterable[ToolParam]] | Omit = omit,
truncation: Literal["auto", "disabled"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
Returns input token counts of the request.
Returns an object with `object` set to `response.input_tokens` and an
`input_tokens` count.
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
input: Text, image, or file inputs to the model, used to generate a response
instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
reasoning: **gpt-5 and o-series models only** Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: Controls which tool the model should use, if any.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
truncation: The truncation strategy to use for the model response. - `auto`: If the input to
this Response exceeds the model's context window size, the model will truncate
the response to fit the context window by dropping items from the beginning of
the conversation. - `disabled` (default): If the input size will exceed the
context window size for a model, the request will fail with a 400 error.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/responses/input_tokens",
body=await async_maybe_transform(
{
"conversation": conversation,
"input": input,
"instructions": instructions,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"reasoning": reasoning,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"truncation": truncation,
},
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=InputTokenCountResponse,
)
class InputTokensWithRawResponse:
def __init__(self, input_tokens: InputTokens) -> None:
self._input_tokens = input_tokens
self.count = _legacy_response.to_raw_response_wrapper(
input_tokens.count,
)
class AsyncInputTokensWithRawResponse:
def __init__(self, input_tokens: AsyncInputTokens) -> None:
self._input_tokens = input_tokens
self.count = _legacy_response.async_to_raw_response_wrapper(
input_tokens.count,
)
class InputTokensWithStreamingResponse:
def __init__(self, input_tokens: InputTokens) -> None:
self._input_tokens = input_tokens
self.count = to_streamed_response_wrapper(
input_tokens.count,
)
class AsyncInputTokensWithStreamingResponse:
def __init__(self, input_tokens: AsyncInputTokens) -> None:
self._input_tokens = input_tokens
self.count = async_to_streamed_response_wrapper(
input_tokens.count,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/responses/input_tokens.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/input_token_count_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal, TypeAlias, TypedDict
from .tool_param import ToolParam
from .tool_choice_options import ToolChoiceOptions
from .tool_choice_mcp_param import ToolChoiceMcpParam
from .tool_choice_shell_param import ToolChoiceShellParam
from .tool_choice_types_param import ToolChoiceTypesParam
from ..shared_params.reasoning import Reasoning
from .tool_choice_custom_param import ToolChoiceCustomParam
from .response_input_item_param import ResponseInputItemParam
from .tool_choice_allowed_param import ToolChoiceAllowedParam
from .tool_choice_function_param import ToolChoiceFunctionParam
from .tool_choice_apply_patch_param import ToolChoiceApplyPatchParam
from .response_conversation_param_param import ResponseConversationParamParam
from .response_format_text_config_param import ResponseFormatTextConfigParam
__all__ = ["InputTokenCountParams", "Conversation", "Text", "ToolChoice"]
class InputTokenCountParams(TypedDict, total=False):
conversation: Optional[Conversation]
"""The conversation that this response belongs to.
Items from this conversation are prepended to `input_items` for this response
request. Input items and output items from this response are automatically added
to this conversation after this response completes.
"""
input: Union[str, Iterable[ResponseInputItemParam], None]
"""Text, image, or file inputs to the model, used to generate a response"""
instructions: Optional[str]
"""
A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
"""
model: Optional[str]
"""Model ID used to generate the response, like `gpt-4o` or `o3`.
OpenAI offers a wide range of models with different capabilities, performance
characteristics, and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
"""
parallel_tool_calls: Optional[bool]
"""Whether to allow the model to run tool calls in parallel."""
previous_response_id: Optional[str]
"""The unique ID of the previous response to the model.
Use this to create multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
"""
reasoning: Optional[Reasoning]
"""
**gpt-5 and o-series models only** Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
"""
text: Optional[Text]
"""Configuration options for a text response from the model.
Can be plain text or structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
"""
tool_choice: Optional[ToolChoice]
"""Controls which tool the model should use, if any."""
tools: Optional[Iterable[ToolParam]]
"""An array of tools the model may call while generating a response.
You can specify which tool to use by setting the `tool_choice` parameter.
"""
truncation: Literal["auto", "disabled"]
"""The truncation strategy to use for the model response.
- `auto`: If the input to this Response exceeds the model's context window size,
the model will truncate the response to fit the context window by dropping
items from the beginning of the conversation. - `disabled` (default): If the
input size will exceed the context window size for a model, the request will
fail with a 400 error.
"""
Conversation: TypeAlias = Union[str, ResponseConversationParamParam]
class Text(TypedDict, total=False):
"""Configuration options for a text response from the model.
Can be plain
text or structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
"""
format: ResponseFormatTextConfigParam
"""An object specifying the format that the model must output.
Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
ensures the model will match your supplied JSON schema. Learn more in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
The default format is `{ "type": "text" }` with no additional options.
**Not recommended for gpt-4o and newer models:**
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
"""
verbosity: Optional[Literal["low", "medium", "high"]]
"""Constrains the verbosity of the model's response.
Lower values will result in more concise responses, while higher values will
result in more verbose responses. Currently supported values are `low`,
`medium`, and `high`.
"""
ToolChoice: TypeAlias = Union[
ToolChoiceOptions,
ToolChoiceAllowedParam,
ToolChoiceTypesParam,
ToolChoiceFunctionParam,
ToolChoiceMcpParam,
ToolChoiceCustomParam,
ToolChoiceApplyPatchParam,
ToolChoiceShellParam,
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/input_token_count_params.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/input_token_count_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["InputTokenCountResponse"]
class InputTokenCountResponse(BaseModel):
input_tokens: int
object: Literal["response.input_tokens"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/input_token_count_response.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/api_resources/responses/test_input_tokens.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types.responses import InputTokenCountResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestInputTokens:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_count(self, client: OpenAI) -> None:
input_token = client.responses.input_tokens.count()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
def test_method_count_with_all_params(self, client: OpenAI) -> None:
input_token = client.responses.input_tokens.count(
conversation="string",
input="string",
instructions="instructions",
model="model",
parallel_tool_calls=True,
previous_response_id="resp_123",
reasoning={
"effort": "none",
"generate_summary": "auto",
"summary": "auto",
},
text={
"format": {"type": "text"},
"verbosity": "low",
},
tool_choice="none",
tools=[
{
"name": "name",
"parameters": {"foo": "bar"},
"strict": True,
"type": "function",
"description": "description",
}
],
truncation="auto",
)
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
def test_raw_response_count(self, client: OpenAI) -> None:
response = client.responses.input_tokens.with_raw_response.count()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
input_token = response.parse()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
def test_streaming_response_count(self, client: OpenAI) -> None:
with client.responses.input_tokens.with_streaming_response.count() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
input_token = response.parse()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
assert cast(Any, response.is_closed) is True
class TestAsyncInputTokens:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_count(self, async_client: AsyncOpenAI) -> None:
input_token = await async_client.responses.input_tokens.count()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
async def test_method_count_with_all_params(self, async_client: AsyncOpenAI) -> None:
input_token = await async_client.responses.input_tokens.count(
conversation="string",
input="string",
instructions="instructions",
model="model",
parallel_tool_calls=True,
previous_response_id="resp_123",
reasoning={
"effort": "none",
"generate_summary": "auto",
"summary": "auto",
},
text={
"format": {"type": "text"},
"verbosity": "low",
},
tool_choice="none",
tools=[
{
"name": "name",
"parameters": {"foo": "bar"},
"strict": True,
"type": "function",
"description": "description",
}
],
truncation="auto",
)
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
async def test_raw_response_count(self, async_client: AsyncOpenAI) -> None:
response = await async_client.responses.input_tokens.with_raw_response.count()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
input_token = response.parse()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
@parametrize
async def test_streaming_response_count(self, async_client: AsyncOpenAI) -> None:
async with async_client.responses.input_tokens.with_streaming_response.count() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
input_token = await response.parse()
assert_matches_type(InputTokenCountResponse, input_token, path=["response"])
assert cast(Any, response.is_closed) is True
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/responses/test_input_tokens.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/types/audio/transcription_diarized.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .transcription_diarized_segment import TranscriptionDiarizedSegment
__all__ = ["TranscriptionDiarized", "Usage", "UsageTokens", "UsageTokensInputTokenDetails", "UsageDuration"]
class UsageTokensInputTokenDetails(BaseModel):
"""Details about the input tokens billed for this request."""
audio_tokens: Optional[int] = None
"""Number of audio tokens billed for this request."""
text_tokens: Optional[int] = None
"""Number of text tokens billed for this request."""
class UsageTokens(BaseModel):
"""Usage statistics for models billed by token usage."""
input_tokens: int
"""Number of input tokens billed for this request."""
output_tokens: int
"""Number of output tokens generated."""
total_tokens: int
"""Total number of tokens used (input + output)."""
type: Literal["tokens"]
"""The type of the usage object. Always `tokens` for this variant."""
input_token_details: Optional[UsageTokensInputTokenDetails] = None
"""Details about the input tokens billed for this request."""
class UsageDuration(BaseModel):
"""Usage statistics for models billed by audio input duration."""
seconds: float
"""Duration of the input audio in seconds."""
type: Literal["duration"]
"""The type of the usage object. Always `duration` for this variant."""
Usage: TypeAlias = Annotated[Union[UsageTokens, UsageDuration], PropertyInfo(discriminator="type")]
class TranscriptionDiarized(BaseModel):
"""
Represents a diarized transcription response returned by the model, including the combined transcript and speaker-segment annotations.
"""
duration: float
"""Duration of the input audio in seconds."""
segments: List[TranscriptionDiarizedSegment]
"""Segments of the transcript annotated with timestamps and speaker labels."""
task: Literal["transcribe"]
"""The type of task that was run. Always `transcribe`."""
text: str
"""The concatenated transcript text for the entire audio input."""
usage: Optional[Usage] = None
"""Token or duration usage statistics for the request."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/audio/transcription_diarized.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/audio/transcription_diarized_segment.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["TranscriptionDiarizedSegment"]
class TranscriptionDiarizedSegment(BaseModel):
"""A segment of diarized transcript text with speaker metadata."""
id: str
"""Unique identifier for the segment."""
end: float
"""End timestamp of the segment in seconds."""
speaker: str
"""Speaker label for this segment.
When known speakers are provided, the label matches `known_speaker_names[]`.
Otherwise speakers are labeled sequentially using capital letters (`A`, `B`,
...).
"""
start: float
"""Start timestamp of the segment in seconds."""
text: str
"""Transcript text for this segment."""
type: Literal["transcript.text.segment"]
"""The type of the segment. Always `transcript.text.segment`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/audio/transcription_diarized_segment.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/audio/transcription_text_segment_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["TranscriptionTextSegmentEvent"]
class TranscriptionTextSegmentEvent(BaseModel):
"""
Emitted when a diarized transcription returns a completed segment with speaker information. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with `stream` set to `true` and `response_format` set to `diarized_json`.
"""
id: str
"""Unique identifier for the segment."""
end: float
"""End timestamp of the segment in seconds."""
speaker: str
"""Speaker label for this segment."""
start: float
"""Start timestamp of the segment in seconds."""
text: str
"""Transcript text for this segment."""
type: Literal["transcript.text.segment"]
"""The type of the event. Always `transcript.text.segment`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/audio/transcription_text_segment_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:examples/video.py | #!/usr/bin/env -S poetry run python
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI()
async def main() -> None:
video = await client.videos.create_and_poll(
model="sora-2",
prompt="A video of the words 'Thank you' in sparkling letters",
)
if video.status == "completed":
print("Video successfully completed: ", video)
else:
print("Video creation failed. Status: ", video.status)
asyncio.run(main())
| {
"repo_id": "openai/openai-python",
"file_path": "examples/video.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/beta/chatkit/chatkit.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from .threads import (
Threads,
AsyncThreads,
ThreadsWithRawResponse,
AsyncThreadsWithRawResponse,
ThreadsWithStreamingResponse,
AsyncThreadsWithStreamingResponse,
)
from .sessions import (
Sessions,
AsyncSessions,
SessionsWithRawResponse,
AsyncSessionsWithRawResponse,
SessionsWithStreamingResponse,
AsyncSessionsWithStreamingResponse,
)
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["ChatKit", "AsyncChatKit"]
class ChatKit(SyncAPIResource):
@cached_property
def sessions(self) -> Sessions:
return Sessions(self._client)
@cached_property
def threads(self) -> Threads:
return Threads(self._client)
@cached_property
def with_raw_response(self) -> ChatKitWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ChatKitWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ChatKitWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ChatKitWithStreamingResponse(self)
class AsyncChatKit(AsyncAPIResource):
@cached_property
def sessions(self) -> AsyncSessions:
return AsyncSessions(self._client)
@cached_property
def threads(self) -> AsyncThreads:
return AsyncThreads(self._client)
@cached_property
def with_raw_response(self) -> AsyncChatKitWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatKitWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncChatKitWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncChatKitWithStreamingResponse(self)
class ChatKitWithRawResponse:
def __init__(self, chatkit: ChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> SessionsWithRawResponse:
return SessionsWithRawResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> ThreadsWithRawResponse:
return ThreadsWithRawResponse(self._chatkit.threads)
class AsyncChatKitWithRawResponse:
def __init__(self, chatkit: AsyncChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> AsyncSessionsWithRawResponse:
return AsyncSessionsWithRawResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> AsyncThreadsWithRawResponse:
return AsyncThreadsWithRawResponse(self._chatkit.threads)
class ChatKitWithStreamingResponse:
def __init__(self, chatkit: ChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> SessionsWithStreamingResponse:
return SessionsWithStreamingResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> ThreadsWithStreamingResponse:
return ThreadsWithStreamingResponse(self._chatkit.threads)
class AsyncChatKitWithStreamingResponse:
def __init__(self, chatkit: AsyncChatKit) -> None:
self._chatkit = chatkit
@cached_property
def sessions(self) -> AsyncSessionsWithStreamingResponse:
return AsyncSessionsWithStreamingResponse(self._chatkit.sessions)
@cached_property
def threads(self) -> AsyncThreadsWithStreamingResponse:
return AsyncThreadsWithStreamingResponse(self._chatkit.threads)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/beta/chatkit/chatkit.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/beta/chatkit/sessions.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.beta.chatkit import (
ChatSessionWorkflowParam,
ChatSessionRateLimitsParam,
ChatSessionExpiresAfterParam,
ChatSessionChatKitConfigurationParam,
session_create_params,
)
from ....types.beta.chatkit.chat_session import ChatSession
from ....types.beta.chatkit.chat_session_workflow_param import ChatSessionWorkflowParam
from ....types.beta.chatkit.chat_session_rate_limits_param import ChatSessionRateLimitsParam
from ....types.beta.chatkit.chat_session_expires_after_param import ChatSessionExpiresAfterParam
from ....types.beta.chatkit.chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam
__all__ = ["Sessions", "AsyncSessions"]
class Sessions(SyncAPIResource):
@cached_property
def with_raw_response(self) -> SessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return SessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return SessionsWithStreamingResponse(self)
def create(
self,
*,
user: str,
workflow: ChatSessionWorkflowParam,
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Create a ChatKit session.
Args:
user: A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
workflow: Workflow that powers the session.
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
expires_after: Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._post(
"/chatkit/sessions",
body=maybe_transform(
{
"user": user,
"workflow": workflow,
"chatkit_configuration": chatkit_configuration,
"expires_after": expires_after,
"rate_limits": rate_limits,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
def cancel(
self,
session_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Cancel an active ChatKit session and return its most recent metadata.
Cancelling prevents new requests from using the issued client secret.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not session_id:
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._post(
f"/chatkit/sessions/{session_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
class AsyncSessions(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncSessionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncSessionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncSessionsWithStreamingResponse(self)
async def create(
self,
*,
user: str,
workflow: ChatSessionWorkflowParam,
chatkit_configuration: ChatSessionChatKitConfigurationParam | Omit = omit,
expires_after: ChatSessionExpiresAfterParam | Omit = omit,
rate_limits: ChatSessionRateLimitsParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Create a ChatKit session.
Args:
user: A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
workflow: Workflow that powers the session.
chatkit_configuration: Optional overrides for ChatKit runtime configuration features
expires_after: Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
rate_limits: Optional override for per-minute request limits. When omitted, defaults to 10.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._post(
"/chatkit/sessions",
body=await async_maybe_transform(
{
"user": user,
"workflow": workflow,
"chatkit_configuration": chatkit_configuration,
"expires_after": expires_after,
"rate_limits": rate_limits,
},
session_create_params.SessionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
async def cancel(
self,
session_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatSession:
"""
Cancel an active ChatKit session and return its most recent metadata.
Cancelling prevents new requests from using the issued client secret.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not session_id:
raise ValueError(f"Expected a non-empty value for `session_id` but received {session_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._post(
f"/chatkit/sessions/{session_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatSession,
)
class SessionsWithRawResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = _legacy_response.to_raw_response_wrapper(
sessions.create,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
sessions.cancel,
)
class AsyncSessionsWithRawResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = _legacy_response.async_to_raw_response_wrapper(
sessions.create,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
sessions.cancel,
)
class SessionsWithStreamingResponse:
def __init__(self, sessions: Sessions) -> None:
self._sessions = sessions
self.create = to_streamed_response_wrapper(
sessions.create,
)
self.cancel = to_streamed_response_wrapper(
sessions.cancel,
)
class AsyncSessionsWithStreamingResponse:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = async_to_streamed_response_wrapper(
sessions.create,
)
self.cancel = async_to_streamed_response_wrapper(
sessions.cancel,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/beta/chatkit/sessions.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/beta/chatkit/threads.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Any, cast
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.beta.chatkit import thread_list_params, thread_list_items_params
from ....types.beta.chatkit.chatkit_thread import ChatKitThread
from ....types.beta.chatkit.thread_delete_response import ThreadDeleteResponse
from ....types.beta.chatkit.chatkit_thread_item_list import Data
__all__ = ["Threads", "AsyncThreads"]
class Threads(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ThreadsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ThreadsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ThreadsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ThreadsWithStreamingResponse(self)
def retrieve(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatKitThread:
"""
Retrieve a ChatKit thread by its identifier.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatKitThread,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[ChatKitThread]:
"""
List ChatKit threads with optional pagination and user filters.
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
user: Filter threads that belong to this user identifier. Defaults to null to return
all users.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
"/chatkit/threads",
page=SyncConversationCursorPage[ChatKitThread],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"user": user,
},
thread_list_params.ThreadListParams,
),
),
model=ChatKitThread,
)
def delete(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ThreadDeleteResponse:
"""
Delete a ChatKit thread along with its items and stored attachments.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._delete(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ThreadDeleteResponse,
)
def list_items(
self,
thread_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[Data]:
"""
List items that belong to a ChatKit thread.
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
f"/chatkit/threads/{thread_id}/items",
page=SyncConversationCursorPage[Data],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
thread_list_items_params.ThreadListItemsParams,
),
),
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
)
class AsyncThreads(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncThreadsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncThreadsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncThreadsWithStreamingResponse(self)
async def retrieve(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatKitThread:
"""
Retrieve a ChatKit thread by its identifier.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._get(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatKitThread,
)
def list(
self,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ChatKitThread, AsyncConversationCursorPage[ChatKitThread]]:
"""
List ChatKit threads with optional pagination and user filters.
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
user: Filter threads that belong to this user identifier. Defaults to null to return
all users.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
"/chatkit/threads",
page=AsyncConversationCursorPage[ChatKitThread],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"user": user,
},
thread_list_params.ThreadListParams,
),
),
model=ChatKitThread,
)
async def delete(
self,
thread_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ThreadDeleteResponse:
"""
Delete a ChatKit thread along with its items and stored attachments.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return await self._delete(
f"/chatkit/threads/{thread_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ThreadDeleteResponse,
)
def list_items(
self,
thread_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Data, AsyncConversationCursorPage[Data]]:
"""
List items that belong to a ChatKit thread.
Args:
after: List items created after this thread item ID. Defaults to null for the first
page.
before: List items created before this thread item ID. Defaults to null for the newest
results.
limit: Maximum number of thread items to return. Defaults to 20.
order: Sort order for results by creation time. Defaults to `desc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "chatkit_beta=v1", **(extra_headers or {})}
return self._get_api_list(
f"/chatkit/threads/{thread_id}/items",
page=AsyncConversationCursorPage[Data],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
},
thread_list_items_params.ThreadListItemsParams,
),
),
model=cast(Any, Data), # Union types cannot be passed in as arguments in the type system
)
class ThreadsWithRawResponse:
def __init__(self, threads: Threads) -> None:
self._threads = threads
self.retrieve = _legacy_response.to_raw_response_wrapper(
threads.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
threads.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
threads.delete,
)
self.list_items = _legacy_response.to_raw_response_wrapper(
threads.list_items,
)
class AsyncThreadsWithRawResponse:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
threads.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
threads.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
threads.delete,
)
self.list_items = _legacy_response.async_to_raw_response_wrapper(
threads.list_items,
)
class ThreadsWithStreamingResponse:
def __init__(self, threads: Threads) -> None:
self._threads = threads
self.retrieve = to_streamed_response_wrapper(
threads.retrieve,
)
self.list = to_streamed_response_wrapper(
threads.list,
)
self.delete = to_streamed_response_wrapper(
threads.delete,
)
self.list_items = to_streamed_response_wrapper(
threads.list_items,
)
class AsyncThreadsWithStreamingResponse:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
self.retrieve = async_to_streamed_response_wrapper(
threads.retrieve,
)
self.list = async_to_streamed_response_wrapper(
threads.list,
)
self.delete = async_to_streamed_response_wrapper(
threads.delete,
)
self.list_items = async_to_streamed_response_wrapper(
threads.list_items,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/beta/chatkit/threads.py",
"license": "Apache License 2.0",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/videos.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import TYPE_CHECKING, Mapping, cast
from typing_extensions import Literal, assert_never
import httpx
from .. import _legacy_response
from ..types import (
VideoSize,
VideoSeconds,
video_list_params,
video_remix_params,
video_create_params,
video_download_content_params,
)
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, omit, not_given
from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_streamed_response_wrapper,
async_to_streamed_response_wrapper,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ..pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from ..types.video import Video
from .._base_client import AsyncPaginator, make_request_options
from .._utils._utils import is_given
from ..types.video_size import VideoSize
from ..types.video_seconds import VideoSeconds
from ..types.video_model_param import VideoModelParam
from ..types.video_delete_response import VideoDeleteResponse
__all__ = ["Videos", "AsyncVideos"]
class Videos(SyncAPIResource):
@cached_property
def with_raw_response(self) -> VideosWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return VideosWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> VideosWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return VideosWithStreamingResponse(self)
def create(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a new video generation job from a prompt and optional reference assets.
Args:
prompt: Text prompt that describes the video to generate.
input_reference: Optional image reference that guides generation.
model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
to `sora-2`.
seconds: Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
size: Output resolution formatted as width x height (allowed values: 720x1280,
1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
"/videos",
body=maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def create_and_poll(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
poll_interval_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""Create a video and wait for it to be processed."""
video = self.create(
model=model,
prompt=prompt,
input_reference=input_reference,
seconds=seconds,
size=size,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return self.poll(
video.id,
poll_interval_ms=poll_interval_ms,
)
def poll(
self,
video_id: str,
*,
poll_interval_ms: int | Omit = omit,
) -> Video:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = self.with_raw_response.retrieve(
video_id,
extra_headers=headers,
)
video = response.parse()
if video.status == "in_progress" or video.status == "queued":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
self._sleep(poll_interval_ms / 1000)
elif video.status == "completed" or video.status == "failed":
return video
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(video.status)
else:
return video
def retrieve(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Fetch the latest metadata for a generated video.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._get(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncConversationCursorPage[Video]:
"""
List recently generated videos for the current project.
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/videos",
page=SyncConversationCursorPage[Video],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
video_list_params.VideoListParams,
),
),
model=Video,
)
def delete(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
Permanently delete a completed or failed video and its stored assets.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._delete(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
def download_content(
self,
video_id: str,
*,
variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Download the generated video bytes or a derived preview asset.
Streams the rendered video content for the specified video job.
Args:
variant: Which downloadable asset to return. Defaults to the MP4 video.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/videos/{video_id}/content",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform({"variant": variant}, video_download_content_params.VideoDownloadContentParams),
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
def remix(
self,
video_id: str,
*,
prompt: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a remix of a completed video using a refreshed prompt.
Args:
prompt: Updated text prompt that directs the remix generation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return self._post(
f"/videos/{video_id}/remix",
body=maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
class AsyncVideos(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncVideosWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncVideosWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncVideosWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncVideosWithStreamingResponse(self)
async def create(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a new video generation job from a prompt and optional reference assets.
Args:
prompt: Text prompt that describes the video to generate.
input_reference: Optional image reference that guides generation.
model: The video generation model to use (allowed values: sora-2, sora-2-pro). Defaults
to `sora-2`.
seconds: Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds.
size: Output resolution formatted as width x height (allowed values: 720x1280,
1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"prompt": prompt,
"input_reference": input_reference,
"model": model,
"seconds": seconds,
"size": size,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["input_reference"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/videos",
body=await async_maybe_transform(body, video_create_params.VideoCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
async def create_and_poll(
self,
*,
prompt: str,
input_reference: FileTypes | Omit = omit,
model: VideoModelParam | Omit = omit,
seconds: VideoSeconds | Omit = omit,
size: VideoSize | Omit = omit,
poll_interval_ms: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""Create a video and wait for it to be processed."""
video = await self.create(
model=model,
prompt=prompt,
input_reference=input_reference,
seconds=seconds,
size=size,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return await self.poll(
video.id,
poll_interval_ms=poll_interval_ms,
)
async def poll(
self,
video_id: str,
*,
poll_interval_ms: int | Omit = omit,
) -> Video:
"""Wait for the vector store file to finish processing.
Note: this will return even if the file failed to process, you need to check
file.last_error and file.status to handle these cases
"""
headers: dict[str, str] = {"X-Stainless-Poll-Helper": "true"}
if is_given(poll_interval_ms):
headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
while True:
response = await self.with_raw_response.retrieve(
video_id,
extra_headers=headers,
)
video = response.parse()
if video.status == "in_progress" or video.status == "queued":
if not is_given(poll_interval_ms):
from_header = response.headers.get("openai-poll-after-ms")
if from_header is not None:
poll_interval_ms = int(from_header)
else:
poll_interval_ms = 1000
await self._sleep(poll_interval_ms / 1000)
elif video.status == "completed" or video.status == "failed":
return video
else:
if TYPE_CHECKING: # type: ignore[unreachable]
assert_never(video.status)
else:
return video
async def retrieve(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Fetch the latest metadata for a generated video.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._get(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[Video, AsyncConversationCursorPage[Video]]:
"""
List recently generated videos for the current project.
Args:
after: Identifier for the last item from the previous pagination request
limit: Number of items to retrieve
order: Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/videos",
page=AsyncConversationCursorPage[Video],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
video_list_params.VideoListParams,
),
),
model=Video,
)
async def delete(
self,
video_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VideoDeleteResponse:
"""
Permanently delete a completed or failed video and its stored assets.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._delete(
f"/videos/{video_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=VideoDeleteResponse,
)
async def download_content(
self,
video_id: str,
*,
variant: Literal["video", "thumbnail", "spritesheet"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Download the generated video bytes or a derived preview asset.
Streams the rendered video content for the specified video job.
Args:
variant: Which downloadable asset to return. Defaults to the MP4 video.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/videos/{video_id}/content",
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=await async_maybe_transform(
{"variant": variant}, video_download_content_params.VideoDownloadContentParams
),
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
async def remix(
self,
video_id: str,
*,
prompt: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Video:
"""
Create a remix of a completed video using a refreshed prompt.
Args:
prompt: Updated text prompt that directs the remix generation.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not video_id:
raise ValueError(f"Expected a non-empty value for `video_id` but received {video_id!r}")
return await self._post(
f"/videos/{video_id}/remix",
body=await async_maybe_transform({"prompt": prompt}, video_remix_params.VideoRemixParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Video,
)
class VideosWithRawResponse:
def __init__(self, videos: Videos) -> None:
self._videos = videos
self.create = _legacy_response.to_raw_response_wrapper(
videos.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
videos.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
videos.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
videos.delete,
)
self.download_content = _legacy_response.to_raw_response_wrapper(
videos.download_content,
)
self.remix = _legacy_response.to_raw_response_wrapper(
videos.remix,
)
class AsyncVideosWithRawResponse:
def __init__(self, videos: AsyncVideos) -> None:
self._videos = videos
self.create = _legacy_response.async_to_raw_response_wrapper(
videos.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
videos.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
videos.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
videos.delete,
)
self.download_content = _legacy_response.async_to_raw_response_wrapper(
videos.download_content,
)
self.remix = _legacy_response.async_to_raw_response_wrapper(
videos.remix,
)
class VideosWithStreamingResponse:
def __init__(self, videos: Videos) -> None:
self._videos = videos
self.create = to_streamed_response_wrapper(
videos.create,
)
self.retrieve = to_streamed_response_wrapper(
videos.retrieve,
)
self.list = to_streamed_response_wrapper(
videos.list,
)
self.delete = to_streamed_response_wrapper(
videos.delete,
)
self.download_content = to_custom_streamed_response_wrapper(
videos.download_content,
StreamedBinaryAPIResponse,
)
self.remix = to_streamed_response_wrapper(
videos.remix,
)
class AsyncVideosWithStreamingResponse:
def __init__(self, videos: AsyncVideos) -> None:
self._videos = videos
self.create = async_to_streamed_response_wrapper(
videos.create,
)
self.retrieve = async_to_streamed_response_wrapper(
videos.retrieve,
)
self.list = async_to_streamed_response_wrapper(
videos.list,
)
self.delete = async_to_streamed_response_wrapper(
videos.delete,
)
self.download_content = async_to_custom_streamed_response_wrapper(
videos.download_content,
AsyncStreamedBinaryAPIResponse,
)
self.remix = async_to_streamed_response_wrapper(
videos.remix,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/videos.py",
"license": "Apache License 2.0",
"lines": 729,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/beta/chatkit/chat_session.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ...._models import BaseModel
from ..chatkit_workflow import ChatKitWorkflow
from .chat_session_status import ChatSessionStatus
from .chat_session_rate_limits import ChatSessionRateLimits
from .chat_session_chatkit_configuration import ChatSessionChatKitConfiguration
__all__ = ["ChatSession"]
class ChatSession(BaseModel):
"""Represents a ChatKit session and its resolved configuration."""
id: str
"""Identifier for the ChatKit session."""
chatkit_configuration: ChatSessionChatKitConfiguration
"""Resolved ChatKit feature configuration for the session."""
client_secret: str
"""Ephemeral client secret that authenticates session requests."""
expires_at: int
"""Unix timestamp (in seconds) for when the session expires."""
max_requests_per_1_minute: int
"""Convenience copy of the per-minute request limit."""
object: Literal["chatkit.session"]
"""Type discriminator that is always `chatkit.session`."""
rate_limits: ChatSessionRateLimits
"""Resolved rate limit values."""
status: ChatSessionStatus
"""Current lifecycle state of the session."""
user: str
"""User identifier associated with the session."""
workflow: ChatKitWorkflow
"""Workflow metadata for the session."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from ...._models import BaseModel
__all__ = ["ChatSessionAutomaticThreadTitling"]
class ChatSessionAutomaticThreadTitling(BaseModel):
"""Automatic thread title preferences for the session."""
enabled: bool
"""Whether automatic thread titling is enabled."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_automatic_thread_titling.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from ...._models import BaseModel
from .chat_session_history import ChatSessionHistory
from .chat_session_file_upload import ChatSessionFileUpload
from .chat_session_automatic_thread_titling import ChatSessionAutomaticThreadTitling
__all__ = ["ChatSessionChatKitConfiguration"]
class ChatSessionChatKitConfiguration(BaseModel):
"""ChatKit configuration for the session."""
automatic_thread_titling: ChatSessionAutomaticThreadTitling
"""Automatic thread titling preferences."""
file_upload: ChatSessionFileUpload
"""Upload settings for the session."""
history: ChatSessionHistory
"""History retention configuration."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_chatkit_configuration.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
__all__ = ["ChatSessionChatKitConfigurationParam", "AutomaticThreadTitling", "FileUpload", "History"]
class AutomaticThreadTitling(TypedDict, total=False):
"""Configuration for automatic thread titling.
When omitted, automatic thread titling is enabled by default.
"""
enabled: bool
"""Enable automatic thread title generation. Defaults to true."""
class FileUpload(TypedDict, total=False):
"""Configuration for upload enablement and limits.
When omitted, uploads are disabled by default (max_files 10, max_file_size 512 MB).
"""
enabled: bool
"""Enable uploads for this session. Defaults to false."""
max_file_size: int
"""Maximum size in megabytes for each uploaded file.
Defaults to 512 MB, which is the maximum allowable size.
"""
max_files: int
"""Maximum number of files that can be uploaded to the session. Defaults to 10."""
class History(TypedDict, total=False):
"""Configuration for chat history retention.
When omitted, history is enabled by default with no limit on recent_threads (null).
"""
enabled: bool
"""Enables chat users to access previous ChatKit threads. Defaults to true."""
recent_threads: int
"""Number of recent ChatKit threads users have access to.
Defaults to unlimited when unset.
"""
class ChatSessionChatKitConfigurationParam(TypedDict, total=False):
"""Optional per-session configuration settings for ChatKit behavior."""
automatic_thread_titling: AutomaticThreadTitling
"""Configuration for automatic thread titling.
When omitted, automatic thread titling is enabled by default.
"""
file_upload: FileUpload
"""Configuration for upload enablement and limits.
When omitted, uploads are disabled by default (max_files 10, max_file_size 512
MB).
"""
history: History
"""Configuration for chat history retention.
When omitted, history is enabled by default with no limit on recent_threads
(null).
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_expires_after_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ChatSessionExpiresAfterParam"]
class ChatSessionExpiresAfterParam(TypedDict, total=False):
"""Controls when the session expires relative to an anchor timestamp."""
anchor: Required[Literal["created_at"]]
"""Base timestamp used to calculate expiration. Currently fixed to `created_at`."""
seconds: Required[int]
"""Number of seconds after the anchor when the session expires."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_expires_after_param.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_file_upload.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ...._models import BaseModel
__all__ = ["ChatSessionFileUpload"]
class ChatSessionFileUpload(BaseModel):
"""Upload permissions and limits applied to the session."""
enabled: bool
"""Indicates if uploads are enabled for the session."""
max_file_size: Optional[int] = None
"""Maximum upload size in megabytes."""
max_files: Optional[int] = None
"""Maximum number of uploads allowed during the session."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_file_upload.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_history.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ...._models import BaseModel
__all__ = ["ChatSessionHistory"]
class ChatSessionHistory(BaseModel):
"""History retention preferences returned for the session."""
enabled: bool
"""Indicates if chat history is persisted for the session."""
recent_threads: Optional[int] = None
"""Number of prior threads surfaced in history views.
Defaults to null when all history is retained.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_history.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_rate_limits.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from ...._models import BaseModel
__all__ = ["ChatSessionRateLimits"]
class ChatSessionRateLimits(BaseModel):
"""Active per-minute request limit for the session."""
max_requests_per_1_minute: int
"""Maximum allowed requests per one-minute window."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_rate_limits.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_rate_limits_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
__all__ = ["ChatSessionRateLimitsParam"]
class ChatSessionRateLimitsParam(TypedDict, total=False):
"""Controls request rate limits for the session."""
max_requests_per_1_minute: int
"""Maximum number of requests allowed per minute for the session. Defaults to 10."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_rate_limits_param.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chat_session_workflow_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Union
from typing_extensions import Required, TypedDict
__all__ = ["ChatSessionWorkflowParam", "Tracing"]
class Tracing(TypedDict, total=False):
"""Optional tracing overrides for the workflow invocation.
When omitted, tracing is enabled by default.
"""
enabled: bool
"""Whether tracing is enabled during the session. Defaults to true."""
class ChatSessionWorkflowParam(TypedDict, total=False):
"""Workflow reference and overrides applied to the chat session."""
id: Required[str]
"""Identifier for the workflow invoked by the session."""
state_variables: Dict[str, Union[str, bool, float]]
"""State variables forwarded to the workflow.
Keys may be up to 64 characters, values must be primitive types, and the map
defaults to an empty object.
"""
tracing: Tracing
"""Optional tracing overrides for the workflow invocation.
When omitted, tracing is enabled by default.
"""
version: str
"""Specific workflow version to run. Defaults to the latest deployed version."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chat_session_workflow_param.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/beta/chatkit/chatkit_attachment.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ...._models import BaseModel
__all__ = ["ChatKitAttachment"]
class ChatKitAttachment(BaseModel):
"""Attachment metadata included on thread items."""
id: str
"""Identifier for the attachment."""
mime_type: str
"""MIME type of the attachment."""
name: str
"""Original display name for the attachment."""
preview_url: Optional[str] = None
"""Preview URL for rendering the attachment inline."""
type: Literal["image", "file"]
"""Attachment discriminator."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chatkit_attachment.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chatkit_response_output_text.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union
from typing_extensions import Literal, Annotated, TypeAlias
from ...._utils import PropertyInfo
from ...._models import BaseModel
__all__ = [
"ChatKitResponseOutputText",
"Annotation",
"AnnotationFile",
"AnnotationFileSource",
"AnnotationURL",
"AnnotationURLSource",
]
class AnnotationFileSource(BaseModel):
"""File attachment referenced by the annotation."""
filename: str
"""Filename referenced by the annotation."""
type: Literal["file"]
"""Type discriminator that is always `file`."""
class AnnotationFile(BaseModel):
"""Annotation that references an uploaded file."""
source: AnnotationFileSource
"""File attachment referenced by the annotation."""
type: Literal["file"]
"""Type discriminator that is always `file` for this annotation."""
class AnnotationURLSource(BaseModel):
"""URL referenced by the annotation."""
type: Literal["url"]
"""Type discriminator that is always `url`."""
url: str
"""URL referenced by the annotation."""
class AnnotationURL(BaseModel):
"""Annotation that references a URL."""
source: AnnotationURLSource
"""URL referenced by the annotation."""
type: Literal["url"]
"""Type discriminator that is always `url` for this annotation."""
Annotation: TypeAlias = Annotated[Union[AnnotationFile, AnnotationURL], PropertyInfo(discriminator="type")]
class ChatKitResponseOutputText(BaseModel):
"""Assistant response text accompanied by optional annotations."""
annotations: List[Annotation]
"""Ordered list of annotations attached to the response text."""
text: str
"""Assistant generated text."""
type: Literal["output_text"]
"""Type discriminator that is always `output_text`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chatkit_response_output_text.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chatkit_thread.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ...._utils import PropertyInfo
from ...._models import BaseModel
__all__ = ["ChatKitThread", "Status", "StatusActive", "StatusLocked", "StatusClosed"]
class StatusActive(BaseModel):
"""Indicates that a thread is active."""
type: Literal["active"]
"""Status discriminator that is always `active`."""
class StatusLocked(BaseModel):
"""Indicates that a thread is locked and cannot accept new input."""
reason: Optional[str] = None
"""Reason that the thread was locked. Defaults to null when no reason is recorded."""
type: Literal["locked"]
"""Status discriminator that is always `locked`."""
class StatusClosed(BaseModel):
"""Indicates that a thread has been closed."""
reason: Optional[str] = None
"""Reason that the thread was closed. Defaults to null when no reason is recorded."""
type: Literal["closed"]
"""Status discriminator that is always `closed`."""
Status: TypeAlias = Annotated[Union[StatusActive, StatusLocked, StatusClosed], PropertyInfo(discriminator="type")]
class ChatKitThread(BaseModel):
"""Represents a ChatKit thread and its current status."""
id: str
"""Identifier of the thread."""
created_at: int
"""Unix timestamp (in seconds) for when the thread was created."""
object: Literal["chatkit.thread"]
"""Type discriminator that is always `chatkit.thread`."""
status: Status
"""Current status for the thread. Defaults to `active` for newly created threads."""
title: Optional[str] = None
"""Optional human-readable title for the thread.
Defaults to null when no title has been generated.
"""
user: str
"""Free-form string that identifies your end user who owns the thread."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chatkit_thread.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List
from typing_extensions import Literal
from ...._models import BaseModel
from .chatkit_response_output_text import ChatKitResponseOutputText
__all__ = ["ChatKitThreadAssistantMessageItem"]
class ChatKitThreadAssistantMessageItem(BaseModel):
"""Assistant-authored message within a thread."""
id: str
"""Identifier of the thread item."""
content: List[ChatKitResponseOutputText]
"""Ordered assistant response segments."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.assistant_message"]
"""Type discriminator that is always `chatkit.assistant_message`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chatkit_thread_item_list.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ...._utils import PropertyInfo
from ...._models import BaseModel
from .chatkit_widget_item import ChatKitWidgetItem
from .chatkit_thread_user_message_item import ChatKitThreadUserMessageItem
from .chatkit_thread_assistant_message_item import ChatKitThreadAssistantMessageItem
__all__ = [
"ChatKitThreadItemList",
"Data",
"DataChatKitClientToolCall",
"DataChatKitTask",
"DataChatKitTaskGroup",
"DataChatKitTaskGroupTask",
]
class DataChatKitClientToolCall(BaseModel):
"""Record of a client side tool invocation initiated by the assistant."""
id: str
"""Identifier of the thread item."""
arguments: str
"""JSON-encoded arguments that were sent to the tool."""
call_id: str
"""Identifier for the client tool call."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
name: str
"""Tool name that was invoked."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
output: Optional[str] = None
"""JSON-encoded output captured from the tool.
Defaults to null while execution is in progress.
"""
status: Literal["in_progress", "completed"]
"""Execution status for the tool call."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.client_tool_call"]
"""Type discriminator that is always `chatkit.client_tool_call`."""
class DataChatKitTask(BaseModel):
"""Task emitted by the workflow to show progress and status updates."""
id: str
"""Identifier of the thread item."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
heading: Optional[str] = None
"""Optional heading for the task. Defaults to null when not provided."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
summary: Optional[str] = None
"""Optional summary that describes the task. Defaults to null when omitted."""
task_type: Literal["custom", "thought"]
"""Subtype for the task."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.task"]
"""Type discriminator that is always `chatkit.task`."""
class DataChatKitTaskGroupTask(BaseModel):
"""Task entry that appears within a TaskGroup."""
heading: Optional[str] = None
"""Optional heading for the grouped task. Defaults to null when not provided."""
summary: Optional[str] = None
"""Optional summary that describes the grouped task.
Defaults to null when omitted.
"""
type: Literal["custom", "thought"]
"""Subtype for the grouped task."""
class DataChatKitTaskGroup(BaseModel):
"""Collection of workflow tasks grouped together in the thread."""
id: str
"""Identifier of the thread item."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
tasks: List[DataChatKitTaskGroupTask]
"""Tasks included in the group."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.task_group"]
"""Type discriminator that is always `chatkit.task_group`."""
Data: TypeAlias = Annotated[
Union[
ChatKitThreadUserMessageItem,
ChatKitThreadAssistantMessageItem,
ChatKitWidgetItem,
DataChatKitClientToolCall,
DataChatKitTask,
DataChatKitTaskGroup,
],
PropertyInfo(discriminator="type"),
]
class ChatKitThreadItemList(BaseModel):
"""A paginated list of thread items rendered for the ChatKit API."""
data: List[Data]
"""A list of items"""
first_id: Optional[str] = None
"""The ID of the first item in the list."""
has_more: bool
"""Whether there are more items available."""
last_id: Optional[str] = None
"""The ID of the last item in the list."""
object: Literal["list"]
"""The type of object returned, must be `list`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chatkit_thread_item_list.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ...._utils import PropertyInfo
from ...._models import BaseModel
from .chatkit_attachment import ChatKitAttachment
__all__ = [
"ChatKitThreadUserMessageItem",
"Content",
"ContentInputText",
"ContentQuotedText",
"InferenceOptions",
"InferenceOptionsToolChoice",
]
class ContentInputText(BaseModel):
"""Text block that a user contributed to the thread."""
text: str
"""Plain-text content supplied by the user."""
type: Literal["input_text"]
"""Type discriminator that is always `input_text`."""
class ContentQuotedText(BaseModel):
"""Quoted snippet that the user referenced in their message."""
text: str
"""Quoted text content."""
type: Literal["quoted_text"]
"""Type discriminator that is always `quoted_text`."""
Content: TypeAlias = Annotated[Union[ContentInputText, ContentQuotedText], PropertyInfo(discriminator="type")]
class InferenceOptionsToolChoice(BaseModel):
"""Preferred tool to invoke. Defaults to null when ChatKit should auto-select."""
id: str
"""Identifier of the requested tool."""
class InferenceOptions(BaseModel):
"""Inference overrides applied to the message. Defaults to null when unset."""
model: Optional[str] = None
"""Model name that generated the response.
Defaults to null when using the session default.
"""
tool_choice: Optional[InferenceOptionsToolChoice] = None
"""Preferred tool to invoke. Defaults to null when ChatKit should auto-select."""
class ChatKitThreadUserMessageItem(BaseModel):
"""User-authored messages within a thread."""
id: str
"""Identifier of the thread item."""
attachments: List[ChatKitAttachment]
"""Attachments associated with the user message. Defaults to an empty list."""
content: List[Content]
"""Ordered content elements supplied by the user."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
inference_options: Optional[InferenceOptions] = None
"""Inference overrides applied to the message. Defaults to null when unset."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.user_message"]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chatkit_thread_user_message_item.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/chatkit_widget_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ...._models import BaseModel
__all__ = ["ChatKitWidgetItem"]
class ChatKitWidgetItem(BaseModel):
"""Thread item that renders a widget payload."""
id: str
"""Identifier of the thread item."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.widget"]
"""Type discriminator that is always `chatkit.widget`."""
widget: str
"""Serialized widget payload rendered in the UI."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/chatkit_widget_item.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/session_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
from .chat_session_workflow_param import ChatSessionWorkflowParam
from .chat_session_rate_limits_param import ChatSessionRateLimitsParam
from .chat_session_expires_after_param import ChatSessionExpiresAfterParam
from .chat_session_chatkit_configuration_param import ChatSessionChatKitConfigurationParam
__all__ = ["SessionCreateParams"]
class SessionCreateParams(TypedDict, total=False):
user: Required[str]
"""
A free-form string that identifies your end user; ensures this Session can
access other objects that have the same `user` scope.
"""
workflow: Required[ChatSessionWorkflowParam]
"""Workflow that powers the session."""
chatkit_configuration: ChatSessionChatKitConfigurationParam
"""Optional overrides for ChatKit runtime configuration features"""
expires_after: ChatSessionExpiresAfterParam
"""Optional override for session expiration timing in seconds from creation.
Defaults to 10 minutes.
"""
rate_limits: ChatSessionRateLimitsParam
"""Optional override for per-minute request limits. When omitted, defaults to 10."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/session_create_params.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/beta/chatkit/thread_delete_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ...._models import BaseModel
__all__ = ["ThreadDeleteResponse"]
class ThreadDeleteResponse(BaseModel):
"""Confirmation payload returned after deleting a thread."""
id: str
"""Identifier of the deleted thread."""
deleted: bool
"""Indicates that the thread has been deleted."""
object: Literal["chatkit.thread.deleted"]
"""Type discriminator that is always `chatkit.thread.deleted`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/thread_delete_response.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/beta/chatkit/thread_list_items_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["ThreadListItemsParams"]
class ThreadListItemsParams(TypedDict, total=False):
after: str
"""List items created after this thread item ID.
Defaults to null for the first page.
"""
before: str
"""List items created before this thread item ID.
Defaults to null for the newest results.
"""
limit: int
"""Maximum number of thread items to return. Defaults to 20."""
order: Literal["asc", "desc"]
"""Sort order for results by creation time. Defaults to `desc`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/thread_list_items_params.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/beta/chatkit/thread_list_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["ThreadListParams"]
class ThreadListParams(TypedDict, total=False):
after: str
"""List items created after this thread item ID.
Defaults to null for the first page.
"""
before: str
"""List items created before this thread item ID.
Defaults to null for the newest results.
"""
limit: int
"""Maximum number of thread items to return. Defaults to 20."""
order: Literal["asc", "desc"]
"""Sort order for results by creation time. Defaults to `desc`."""
user: str
"""Filter threads that belong to this user identifier.
Defaults to null to return all users.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit/thread_list_params.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/beta/chatkit_workflow.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, Union, Optional
from ..._models import BaseModel
__all__ = ["ChatKitWorkflow", "Tracing"]
class Tracing(BaseModel):
"""Tracing settings applied to the workflow."""
enabled: bool
"""Indicates whether tracing is enabled."""
class ChatKitWorkflow(BaseModel):
"""Workflow metadata and state returned for the session."""
id: str
"""Identifier of the workflow backing the session."""
state_variables: Optional[Dict[str, Union[str, bool, float]]] = None
"""State variable key-value pairs applied when invoking the workflow.
Defaults to null when no overrides were provided.
"""
tracing: Tracing
"""Tracing settings applied to the workflow."""
version: Optional[str] = None
"""Specific workflow version used for the session.
Defaults to null when using the latest deployment.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/beta/chatkit_workflow.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/video.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from .._models import BaseModel
from .video_size import VideoSize
from .video_model import VideoModel
from .video_seconds import VideoSeconds
from .video_create_error import VideoCreateError
__all__ = ["Video"]
class Video(BaseModel):
"""Structured information describing a generated video job."""
id: str
"""Unique identifier for the video job."""
completed_at: Optional[int] = None
"""Unix timestamp (seconds) for when the job completed, if finished."""
created_at: int
"""Unix timestamp (seconds) for when the job was created."""
error: Optional[VideoCreateError] = None
"""Error payload that explains why generation failed, if applicable."""
expires_at: Optional[int] = None
"""Unix timestamp (seconds) for when the downloadable assets expire, if set."""
model: VideoModel
"""The video generation model that produced the job."""
object: Literal["video"]
"""The object type, which is always `video`."""
progress: int
"""Approximate completion percentage for the generation task."""
prompt: Optional[str] = None
"""The prompt that was used to generate the video."""
remixed_from_video_id: Optional[str] = None
"""Identifier of the source video if this video is a remix."""
seconds: VideoSeconds
"""Duration of the generated clip in seconds."""
size: VideoSize
"""The resolution of the generated video."""
status: Literal["queued", "in_progress", "completed", "failed"]
"""Current lifecycle status of the video job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/video_create_error.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .._models import BaseModel
__all__ = ["VideoCreateError"]
class VideoCreateError(BaseModel):
"""An error that occurred while generating the response."""
code: str
"""A machine-readable error code that was returned."""
message: str
"""A human-readable description of the error that was returned."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_create_error.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/video_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
from .._types import FileTypes
from .video_size import VideoSize
from .video_seconds import VideoSeconds
from .video_model_param import VideoModelParam
__all__ = ["VideoCreateParams"]
class VideoCreateParams(TypedDict, total=False):
prompt: Required[str]
"""Text prompt that describes the video to generate."""
input_reference: FileTypes
"""Optional image reference that guides generation."""
model: VideoModelParam
"""The video generation model to use (allowed values: sora-2, sora-2-pro).
Defaults to `sora-2`.
"""
seconds: VideoSeconds
"""Clip duration in seconds (allowed values: 4, 8, 12). Defaults to 4 seconds."""
size: VideoSize
"""
Output resolution formatted as width x height (allowed values: 720x1280,
1280x720, 1024x1792, 1792x1024). Defaults to 720x1280.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_create_params.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/video_delete_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["VideoDeleteResponse"]
class VideoDeleteResponse(BaseModel):
"""Confirmation payload returned after deleting a video."""
id: str
"""Identifier of the deleted video."""
deleted: bool
"""Indicates that the video resource was deleted."""
object: Literal["video.deleted"]
"""The object type that signals the deletion response."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_delete_response.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/video_download_content_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["VideoDownloadContentParams"]
class VideoDownloadContentParams(TypedDict, total=False):
variant: Literal["video", "thumbnail", "spritesheet"]
"""Which downloadable asset to return. Defaults to the MP4 video."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_download_content_params.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/video_list_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["VideoListParams"]
class VideoListParams(TypedDict, total=False):
after: str
"""Identifier for the last item from the previous pagination request"""
limit: int
"""Number of items to retrieve"""
order: Literal["asc", "desc"]
"""Sort order of results by timestamp.
Use `asc` for ascending order or `desc` for descending order.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_list_params.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/video_model.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, TypeAlias
__all__ = ["VideoModel"]
VideoModel: TypeAlias = Union[
str, Literal["sora-2", "sora-2-pro", "sora-2-2025-10-06", "sora-2-pro-2025-10-06", "sora-2-2025-12-08"]
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_model.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/video_remix_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
__all__ = ["VideoRemixParams"]
class VideoRemixParams(TypedDict, total=False):
prompt: Required[str]
"""Updated text prompt that directs the remix generation."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/video_remix_params.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/api_resources/beta/chatkit/test_sessions.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types.beta.chatkit import (
ChatSession,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestSessions:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
session = client.beta.chatkit.sessions.create(
user="x",
workflow={"id": "id"},
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
session = client.beta.chatkit.sessions.create(
user="x",
workflow={
"id": "id",
"state_variables": {"foo": "string"},
"tracing": {"enabled": True},
"version": "version",
},
chatkit_configuration={
"automatic_thread_titling": {"enabled": True},
"file_upload": {
"enabled": True,
"max_file_size": 1,
"max_files": 1,
},
"history": {
"enabled": True,
"recent_threads": 1,
},
},
expires_after={
"anchor": "created_at",
"seconds": 1,
},
rate_limits={"max_requests_per_1_minute": 1},
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.beta.chatkit.sessions.with_raw_response.create(
user="x",
workflow={"id": "id"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.beta.chatkit.sessions.with_streaming_response.create(
user="x",
workflow={"id": "id"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_cancel(self, client: OpenAI) -> None:
session = client.beta.chatkit.sessions.cancel(
"cksess_123",
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
def test_raw_response_cancel(self, client: OpenAI) -> None:
response = client.beta.chatkit.sessions.with_raw_response.cancel(
"cksess_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
def test_streaming_response_cancel(self, client: OpenAI) -> None:
with client.beta.chatkit.sessions.with_streaming_response.cancel(
"cksess_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_cancel(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"):
client.beta.chatkit.sessions.with_raw_response.cancel(
"",
)
class TestAsyncSessions:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.chatkit.sessions.create(
user="x",
workflow={"id": "id"},
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.chatkit.sessions.create(
user="x",
workflow={
"id": "id",
"state_variables": {"foo": "string"},
"tracing": {"enabled": True},
"version": "version",
},
chatkit_configuration={
"automatic_thread_titling": {"enabled": True},
"file_upload": {
"enabled": True,
"max_file_size": 1,
"max_files": 1,
},
"history": {
"enabled": True,
"recent_threads": 1,
},
},
expires_after={
"anchor": "created_at",
"seconds": 1,
},
rate_limits={"max_requests_per_1_minute": 1},
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.sessions.with_raw_response.create(
user="x",
workflow={"id": "id"},
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.sessions.with_streaming_response.create(
user="x",
workflow={"id": "id"},
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = await response.parse()
assert_matches_type(ChatSession, session, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
session = await async_client.beta.chatkit.sessions.cancel(
"cksess_123",
)
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.sessions.with_raw_response.cancel(
"cksess_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = response.parse()
assert_matches_type(ChatSession, session, path=["response"])
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.sessions.with_streaming_response.cancel(
"cksess_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
session = await response.parse()
assert_matches_type(ChatSession, session, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `session_id` but received ''"):
await async_client.beta.chatkit.sessions.with_raw_response.cancel(
"",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/beta/chatkit/test_sessions.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/beta/chatkit/test_threads.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
from openai.types.beta.chatkit import ChatKitThread, ThreadDeleteResponse
from openai.types.beta.chatkit.chatkit_thread_item_list import Data
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestThreads:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
thread = client.beta.chatkit.threads.retrieve(
"cthr_123",
)
assert_matches_type(ChatKitThread, thread, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.beta.chatkit.threads.with_raw_response.retrieve(
"cthr_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(ChatKitThread, thread, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.beta.chatkit.threads.with_streaming_response.retrieve(
"cthr_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(ChatKitThread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
client.beta.chatkit.threads.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
thread = client.beta.chatkit.threads.list()
assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
thread = client.beta.chatkit.threads.list(
after="after",
before="before",
limit=0,
order="asc",
user="x",
)
assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.beta.chatkit.threads.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.beta.chatkit.threads.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(SyncConversationCursorPage[ChatKitThread], thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
thread = client.beta.chatkit.threads.delete(
"cthr_123",
)
assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.beta.chatkit.threads.with_raw_response.delete(
"cthr_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.beta.chatkit.threads.with_streaming_response.delete(
"cthr_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
client.beta.chatkit.threads.with_raw_response.delete(
"",
)
@parametrize
def test_method_list_items(self, client: OpenAI) -> None:
thread = client.beta.chatkit.threads.list_items(
thread_id="cthr_123",
)
assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
@parametrize
def test_method_list_items_with_all_params(self, client: OpenAI) -> None:
thread = client.beta.chatkit.threads.list_items(
thread_id="cthr_123",
after="after",
before="before",
limit=0,
order="asc",
)
assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
@parametrize
def test_raw_response_list_items(self, client: OpenAI) -> None:
response = client.beta.chatkit.threads.with_raw_response.list_items(
thread_id="cthr_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
@parametrize
def test_streaming_response_list_items(self, client: OpenAI) -> None:
with client.beta.chatkit.threads.with_streaming_response.list_items(
thread_id="cthr_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(SyncConversationCursorPage[Data], thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list_items(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
client.beta.chatkit.threads.with_raw_response.list_items(
thread_id="",
)
class TestAsyncThreads:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.chatkit.threads.retrieve(
"cthr_123",
)
assert_matches_type(ChatKitThread, thread, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.threads.with_raw_response.retrieve(
"cthr_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(ChatKitThread, thread, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.threads.with_streaming_response.retrieve(
"cthr_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(ChatKitThread, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.chatkit.threads.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.chatkit.threads.list()
assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.chatkit.threads.list(
after="after",
before="before",
limit=0,
order="asc",
user="x",
)
assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.threads.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.threads.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(AsyncConversationCursorPage[ChatKitThread], thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.chatkit.threads.delete(
"cthr_123",
)
assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.threads.with_raw_response.delete(
"cthr_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.threads.with_streaming_response.delete(
"cthr_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(ThreadDeleteResponse, thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.chatkit.threads.with_raw_response.delete(
"",
)
@parametrize
async def test_method_list_items(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.chatkit.threads.list_items(
thread_id="cthr_123",
)
assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
@parametrize
async def test_method_list_items_with_all_params(self, async_client: AsyncOpenAI) -> None:
thread = await async_client.beta.chatkit.threads.list_items(
thread_id="cthr_123",
after="after",
before="before",
limit=0,
order="asc",
)
assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
@parametrize
async def test_raw_response_list_items(self, async_client: AsyncOpenAI) -> None:
response = await async_client.beta.chatkit.threads.with_raw_response.list_items(
thread_id="cthr_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = response.parse()
assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
@parametrize
async def test_streaming_response_list_items(self, async_client: AsyncOpenAI) -> None:
async with async_client.beta.chatkit.threads.with_streaming_response.list_items(
thread_id="cthr_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
thread = await response.parse()
assert_matches_type(AsyncConversationCursorPage[Data], thread, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list_items(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.chatkit.threads.with_raw_response.list_items(
thread_id="",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/beta/chatkit/test_threads.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/test_videos.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import httpx
import pytest
from respx import MockRouter
import openai._legacy_response as _legacy_response
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types import (
Video,
VideoDeleteResponse,
)
from openai._utils import assert_signatures_in_sync
from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestVideos:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
video = client.videos.create(
prompt="x",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
video = client.videos.create(
prompt="x",
input_reference=b"raw file contents",
model="string",
seconds="4",
size="720x1280",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.videos.with_raw_response.create(
prompt="x",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.videos.with_streaming_response.create(
prompt="x",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
video = client.videos.retrieve(
"video_123",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.videos.with_raw_response.retrieve(
"video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.videos.with_streaming_response.retrieve(
"video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
client.videos.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
video = client.videos.list()
assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
video = client.videos.list(
after="after",
limit=0,
order="asc",
)
assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.videos.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.videos.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(SyncConversationCursorPage[Video], video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
video = client.videos.delete(
"video_123",
)
assert_matches_type(VideoDeleteResponse, video, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.videos.with_raw_response.delete(
"video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(VideoDeleteResponse, video, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.videos.with_streaming_response.delete(
"video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(VideoDeleteResponse, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
client.videos.with_raw_response.delete(
"",
)
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
video = client.videos.download_content(
video_id="video_123",
)
assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
assert video.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_download_content_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
video = client.videos.download_content(
video_id="video_123",
variant="video",
)
assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
assert video.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = client.videos.with_raw_response.download_content(
video_id="video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, video, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_streaming_response_download_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.videos.with_streaming_response.download_content(
video_id="video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(bytes, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_path_params_download_content(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
client.videos.with_raw_response.download_content(
video_id="",
)
@parametrize
def test_method_remix(self, client: OpenAI) -> None:
video = client.videos.remix(
video_id="video_123",
prompt="x",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
def test_raw_response_remix(self, client: OpenAI) -> None:
response = client.videos.with_raw_response.remix(
video_id="video_123",
prompt="x",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
def test_streaming_response_remix(self, client: OpenAI) -> None:
with client.videos.with_streaming_response.remix(
video_id="video_123",
prompt="x",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_remix(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
client.videos.with_raw_response.remix(
video_id="",
prompt="x",
)
class TestAsyncVideos:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.create(
prompt="x",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.create(
prompt="x",
input_reference=b"raw file contents",
model="string",
seconds="4",
size="720x1280",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.create(
prompt="x",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.create(
prompt="x",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.retrieve(
"video_123",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.retrieve(
"video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.retrieve(
"video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.list()
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.list(
after="after",
limit=0,
order="asc",
)
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.delete(
"video_123",
)
assert_matches_type(VideoDeleteResponse, video, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.delete(
"video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(VideoDeleteResponse, video, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.delete(
"video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(VideoDeleteResponse, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.delete(
"",
)
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
video = await async_client.videos.download_content(
video_id="video_123",
)
assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
assert video.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_download_content_with_all_params(
self, async_client: AsyncOpenAI, respx_mock: MockRouter
) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
video = await async_client.videos.download_content(
video_id="video_123",
variant="video",
)
assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
assert video.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.videos.with_raw_response.download_content(
video_id="video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, video, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_streaming_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.videos.with_streaming_response.download_content(
video_id="video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(bytes, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_path_params_download_content(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.download_content(
video_id="",
)
@parametrize
async def test_method_remix(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.remix(
video_id="video_123",
prompt="x",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_raw_response_remix(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.remix(
video_id="video_123",
prompt="x",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_streaming_response_remix(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.remix(
video_id="video_123",
prompt="x",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_remix(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.remix(
video_id="",
prompt="x",
)
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_create_and_poll_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
assert_signatures_in_sync(
checking_client.videos.create,
checking_client.videos.create_and_poll,
exclude_params={"extra_headers", "extra_query", "extra_body", "timeout"},
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/test_videos.py",
"license": "Apache License 2.0",
"lines": 450,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/lib/_realtime.py | from __future__ import annotations
import json
from typing_extensions import override
import httpx
from openai import _legacy_response
from openai._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from openai._utils import maybe_transform, async_maybe_transform
from openai._base_client import make_request_options
from openai.resources.realtime.calls import Calls, AsyncCalls
from openai.types.realtime.realtime_session_create_request_param import RealtimeSessionCreateRequestParam
__all__ = ["_Calls", "_AsyncCalls"]
# Custom code to override the `create` method to have correct behavior with
# application/sdp and multipart/form-data.
# Ideally we can cutover to the generated code this overrides eventually and remove this.
class _Calls(Calls):
@override
def create(
self,
*,
sdp: str,
session: RealtimeSessionCreateRequestParam | Omit = omit,
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
if session is omit:
extra_headers = {"Accept": "application/sdp", "Content-Type": "application/sdp", **(extra_headers or {})}
return self._post(
"/realtime/calls",
content=sdp.encode("utf-8"),
options=make_request_options(extra_headers=extra_headers, extra_query=extra_query, timeout=timeout),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
extra_headers = {"Accept": "application/sdp", "Content-Type": "multipart/form-data", **(extra_headers or {})}
session_payload = maybe_transform(session, RealtimeSessionCreateRequestParam)
files = [
("sdp", (None, sdp.encode("utf-8"), "application/sdp")),
("session", (None, json.dumps(session_payload).encode("utf-8"), "application/json")),
]
return self._post(
"/realtime/calls",
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class _AsyncCalls(AsyncCalls):
@override
async def create(
self,
*,
sdp: str,
session: RealtimeSessionCreateRequestParam | Omit = omit,
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
if session is omit:
extra_headers = {"Accept": "application/sdp", "Content-Type": "application/sdp", **(extra_headers or {})}
return await self._post(
"/realtime/calls",
content=sdp.encode("utf-8"),
options=make_request_options(extra_headers=extra_headers, extra_query=extra_query, timeout=timeout),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
extra_headers = {"Accept": "application/sdp", "Content-Type": "multipart/form-data", **(extra_headers or {})}
session_payload = await async_maybe_transform(session, RealtimeSessionCreateRequestParam)
files = [
("sdp", (None, sdp.encode("utf-8"), "application/sdp")),
("session", (None, json.dumps(session_payload).encode("utf-8"), "application/json")),
]
return await self._post(
"/realtime/calls",
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/lib/_realtime.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/realtime/calls.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Optional
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_streamed_response_wrapper,
async_to_streamed_response_wrapper,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ..._base_client import make_request_options
from ...types.realtime import (
call_refer_params,
call_accept_params,
call_create_params,
call_reject_params,
)
from ...types.responses.response_prompt_param import ResponsePromptParam
from ...types.realtime.realtime_truncation_param import RealtimeTruncationParam
from ...types.realtime.realtime_audio_config_param import RealtimeAudioConfigParam
from ...types.realtime.realtime_tools_config_param import RealtimeToolsConfigParam
from ...types.realtime.realtime_tracing_config_param import RealtimeTracingConfigParam
from ...types.realtime.realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam
from ...types.realtime.realtime_session_create_request_param import RealtimeSessionCreateRequestParam
__all__ = ["Calls", "AsyncCalls"]
class Calls(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CallsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CallsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CallsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CallsWithStreamingResponse(self)
def create(
self,
*,
sdp: str,
session: RealtimeSessionCreateRequestParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Create a new Realtime API call over WebRTC and receive the SDP answer needed to
complete the peer connection.
Args:
sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller.
session: Realtime session object configuration.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/sdp", **(extra_headers or {})}
return self._post(
"/realtime/calls",
body=maybe_transform(
{
"sdp": sdp,
"session": session,
},
call_create_params.CallCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
def accept(
self,
call_id: str,
*,
type: Literal["realtime"],
audio: RealtimeAudioConfigParam | Omit = omit,
include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit,
instructions: str | Omit = omit,
max_output_tokens: Union[int, Literal["inf"]] | Omit = omit,
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
]
| Omit = omit,
output_modalities: List[Literal["text", "audio"]] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
tool_choice: RealtimeToolChoiceConfigParam | Omit = omit,
tools: RealtimeToolsConfigParam | Omit = omit,
tracing: Optional[RealtimeTracingConfigParam] | Omit = omit,
truncation: RealtimeTruncationParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Accept an incoming SIP call and configure the realtime session that will handle
it.
Args:
type: The type of session to create. Always `realtime` for the Realtime API.
audio: Configuration for input and output audio.
include: Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
model: The Realtime model used for this session.
output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`,
indicating that the model will respond with audio plus a transcript. `["text"]`
can be used to make the model respond with text only. It is not possible to
request both `text` and `audio` at the same time.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
tool_choice: How the model chooses tools. Provide one of the string modes or force a specific
function/MCP tool.
tools: Tools available to the model.
tracing: Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
truncation: When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/accept",
body=maybe_transform(
{
"type": type,
"audio": audio,
"include": include,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
"model": model,
"output_modalities": output_modalities,
"prompt": prompt,
"tool_choice": tool_choice,
"tools": tools,
"tracing": tracing,
"truncation": truncation,
},
call_accept_params.CallAcceptParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
def hangup(
self,
call_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
End an active Realtime API call, whether it was initiated over SIP or WebRTC.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/hangup",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
def refer(
self,
call_id: str,
*,
target_uri: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Transfer an active SIP call to a new destination using the SIP REFER verb.
Args:
target_uri: URI that should appear in the SIP Refer-To header. Supports values like
`tel:+14155550123` or `sip:agent@example.com`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/refer",
body=maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
def reject(
self,
call_id: str,
*,
status_code: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Decline an incoming SIP call by returning a SIP status code to the caller.
Args:
status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when
omitted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/realtime/calls/{call_id}/reject",
body=maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class AsyncCalls(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCallsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCallsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCallsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCallsWithStreamingResponse(self)
async def create(
self,
*,
sdp: str,
session: RealtimeSessionCreateRequestParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Create a new Realtime API call over WebRTC and receive the SDP answer needed to
complete the peer connection.
Args:
sdp: WebRTC Session Description Protocol (SDP) offer generated by the caller.
session: Realtime session object configuration.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {"Accept": "application/sdp", **(extra_headers or {})}
return await self._post(
"/realtime/calls",
body=await async_maybe_transform(
{
"sdp": sdp,
"session": session,
},
call_create_params.CallCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
async def accept(
self,
call_id: str,
*,
type: Literal["realtime"],
audio: RealtimeAudioConfigParam | Omit = omit,
include: List[Literal["item.input_audio_transcription.logprobs"]] | Omit = omit,
instructions: str | Omit = omit,
max_output_tokens: Union[int, Literal["inf"]] | Omit = omit,
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
]
| Omit = omit,
output_modalities: List[Literal["text", "audio"]] | Omit = omit,
prompt: Optional[ResponsePromptParam] | Omit = omit,
tool_choice: RealtimeToolChoiceConfigParam | Omit = omit,
tools: RealtimeToolsConfigParam | Omit = omit,
tracing: Optional[RealtimeTracingConfigParam] | Omit = omit,
truncation: RealtimeTruncationParam | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Accept an incoming SIP call and configure the realtime session that will handle
it.
Args:
type: The type of session to create. Always `realtime` for the Realtime API.
audio: Configuration for input and output audio.
include: Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
instructions: The default system instructions (i.e. system message) prepended to model calls.
This field allows the client to guide the model on desired responses. The model
can be instructed on response content and format, (e.g. "be extremely succinct",
"act friendly", "here are examples of good responses") and on audio behavior
(e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
instructions are not guaranteed to be followed by the model, but they provide
guidance to the model on the desired behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
max_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
model: The Realtime model used for this session.
output_modalities: The set of modalities the model can respond with. It defaults to `["audio"]`,
indicating that the model will respond with audio plus a transcript. `["text"]`
can be used to make the model respond with text only. It is not possible to
request both `text` and `audio` at the same time.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
tool_choice: How the model chooses tools. Provide one of the string modes or force a specific
function/MCP tool.
tools: Tools available to the model.
tracing: Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
truncation: When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/accept",
body=await async_maybe_transform(
{
"type": type,
"audio": audio,
"include": include,
"instructions": instructions,
"max_output_tokens": max_output_tokens,
"model": model,
"output_modalities": output_modalities,
"prompt": prompt,
"tool_choice": tool_choice,
"tools": tools,
"tracing": tracing,
"truncation": truncation,
},
call_accept_params.CallAcceptParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
async def hangup(
self,
call_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
End an active Realtime API call, whether it was initiated over SIP or WebRTC.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/hangup",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
async def refer(
self,
call_id: str,
*,
target_uri: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Transfer an active SIP call to a new destination using the SIP REFER verb.
Args:
target_uri: URI that should appear in the SIP Refer-To header. Supports values like
`tel:+14155550123` or `sip:agent@example.com`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/refer",
body=await async_maybe_transform({"target_uri": target_uri}, call_refer_params.CallReferParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
async def reject(
self,
call_id: str,
*,
status_code: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Decline an incoming SIP call by returning a SIP status code to the caller.
Args:
status_code: SIP response code to send back to the caller. Defaults to `603` (Decline) when
omitted.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not call_id:
raise ValueError(f"Expected a non-empty value for `call_id` but received {call_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/realtime/calls/{call_id}/reject",
body=await async_maybe_transform({"status_code": status_code}, call_reject_params.CallRejectParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class CallsWithRawResponse:
def __init__(self, calls: Calls) -> None:
self._calls = calls
self.create = _legacy_response.to_raw_response_wrapper(
calls.create,
)
self.accept = _legacy_response.to_raw_response_wrapper(
calls.accept,
)
self.hangup = _legacy_response.to_raw_response_wrapper(
calls.hangup,
)
self.refer = _legacy_response.to_raw_response_wrapper(
calls.refer,
)
self.reject = _legacy_response.to_raw_response_wrapper(
calls.reject,
)
class AsyncCallsWithRawResponse:
def __init__(self, calls: AsyncCalls) -> None:
self._calls = calls
self.create = _legacy_response.async_to_raw_response_wrapper(
calls.create,
)
self.accept = _legacy_response.async_to_raw_response_wrapper(
calls.accept,
)
self.hangup = _legacy_response.async_to_raw_response_wrapper(
calls.hangup,
)
self.refer = _legacy_response.async_to_raw_response_wrapper(
calls.refer,
)
self.reject = _legacy_response.async_to_raw_response_wrapper(
calls.reject,
)
class CallsWithStreamingResponse:
def __init__(self, calls: Calls) -> None:
self._calls = calls
self.create = to_custom_streamed_response_wrapper(
calls.create,
StreamedBinaryAPIResponse,
)
self.accept = to_streamed_response_wrapper(
calls.accept,
)
self.hangup = to_streamed_response_wrapper(
calls.hangup,
)
self.refer = to_streamed_response_wrapper(
calls.refer,
)
self.reject = to_streamed_response_wrapper(
calls.reject,
)
class AsyncCallsWithStreamingResponse:
def __init__(self, calls: AsyncCalls) -> None:
self._calls = calls
self.create = async_to_custom_streamed_response_wrapper(
calls.create,
AsyncStreamedBinaryAPIResponse,
)
self.accept = async_to_streamed_response_wrapper(
calls.accept,
)
self.hangup = async_to_streamed_response_wrapper(
calls.hangup,
)
self.refer = async_to_streamed_response_wrapper(
calls.refer,
)
self.reject = async_to_streamed_response_wrapper(
calls.reject,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/realtime/calls.py",
"license": "Apache License 2.0",
"lines": 661,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/realtime/call_accept_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union, Optional
from typing_extensions import Literal, Required, TypedDict
from .realtime_truncation_param import RealtimeTruncationParam
from .realtime_audio_config_param import RealtimeAudioConfigParam
from .realtime_tools_config_param import RealtimeToolsConfigParam
from .realtime_tracing_config_param import RealtimeTracingConfigParam
from ..responses.response_prompt_param import ResponsePromptParam
from .realtime_tool_choice_config_param import RealtimeToolChoiceConfigParam
__all__ = ["CallAcceptParams"]
class CallAcceptParams(TypedDict, total=False):
type: Required[Literal["realtime"]]
"""The type of session to create. Always `realtime` for the Realtime API."""
audio: RealtimeAudioConfigParam
"""Configuration for input and output audio."""
include: List[Literal["item.input_audio_transcription.logprobs"]]
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
instructions: str
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
"""
max_output_tokens: Union[int, Literal["inf"]]
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
model: Union[
str,
Literal[
"gpt-realtime",
"gpt-realtime-1.5",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"gpt-realtime-mini-2025-12-15",
"gpt-audio-1.5",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-audio-mini-2025-12-15",
],
]
"""The Realtime model used for this session."""
output_modalities: List[Literal["text", "audio"]]
"""The set of modalities the model can respond with.
It defaults to `["audio"]`, indicating that the model will respond with audio
plus a transcript. `["text"]` can be used to make the model respond with text
only. It is not possible to request both `text` and `audio` at the same time.
"""
prompt: Optional[ResponsePromptParam]
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
tool_choice: RealtimeToolChoiceConfigParam
"""How the model chooses tools.
Provide one of the string modes or force a specific function/MCP tool.
"""
tools: RealtimeToolsConfigParam
"""Tools available to the model."""
tracing: Optional[RealtimeTracingConfigParam]
"""
Realtime API can write session traces to the
[Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
tracing is enabled for a session, the configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
"""
truncation: RealtimeTruncationParam
"""
When the number of tokens in a conversation exceeds the model's input token
limit, the conversation be truncated, meaning messages (starting from the
oldest) will not be included in the model's context. A 32k context model with
4,096 max output tokens can only include 28,224 tokens in the context before
truncation occurs.
Clients can configure truncation behavior to truncate with a lower max token
limit, which is an effective way to control token usage and cost.
Truncation will reduce the number of cached tokens on the next turn (busting the
cache), since messages are dropped from the beginning of the context. However,
clients can also configure truncation to retain messages up to a fraction of the
maximum context size, which will reduce the need for future truncations and thus
improve the cache rate.
Truncation can be disabled entirely, which means the server will never truncate
but would instead return an error if the conversation exceeds the model's input
token limit.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/call_accept_params.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/realtime/call_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
from .realtime_session_create_request_param import RealtimeSessionCreateRequestParam
__all__ = ["CallCreateParams"]
class CallCreateParams(TypedDict, total=False):
sdp: Required[str]
"""WebRTC Session Description Protocol (SDP) offer generated by the caller."""
session: RealtimeSessionCreateRequestParam
"""Realtime session object configuration."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/call_create_params.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/call_refer_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Required, TypedDict
__all__ = ["CallReferParams"]
class CallReferParams(TypedDict, total=False):
target_uri: Required[str]
"""URI that should appear in the SIP Refer-To header.
Supports values like `tel:+14155550123` or `sip:agent@example.com`.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/call_refer_params.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/realtime/call_reject_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
__all__ = ["CallRejectParams"]
class CallRejectParams(TypedDict, total=False):
status_code: int
"""SIP response code to send back to the caller.
Defaults to `603` (Decline) when omitted.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/realtime/call_reject_params.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/api_resources/realtime/test_calls.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import httpx
import pytest
from respx import MockRouter
import openai._legacy_response as _legacy_response
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestCalls:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
call = client.realtime.calls.create(
sdp="sdp",
)
assert isinstance(call, _legacy_response.HttpxBinaryResponseContent)
assert call.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
call = client.realtime.calls.create(
sdp="sdp",
session={
"type": "realtime",
"audio": {
"input": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"noise_reduction": {"type": "near_field"},
"transcription": {
"language": "language",
"model": "string",
"prompt": "prompt",
},
"turn_detection": {
"type": "server_vad",
"create_response": True,
"idle_timeout_ms": 5000,
"interrupt_response": True,
"prefix_padding_ms": 0,
"silence_duration_ms": 0,
"threshold": 0,
},
},
"output": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"speed": 0.25,
"voice": "ash",
},
},
"include": ["item.input_audio_transcription.logprobs"],
"instructions": "instructions",
"max_output_tokens": 0,
"model": "string",
"output_modalities": ["text"],
"prompt": {
"id": "id",
"variables": {"foo": "string"},
"version": "version",
},
"tool_choice": "none",
"tools": [
{
"description": "description",
"name": "name",
"parameters": {},
"type": "function",
}
],
"tracing": "auto",
"truncation": "auto",
},
)
assert isinstance(call, _legacy_response.HttpxBinaryResponseContent)
assert call.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = client.realtime.calls.with_raw_response.create(
sdp="sdp",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, call, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.realtime.calls.with_streaming_response.create(
sdp="sdp",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert_matches_type(bytes, call, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_accept(self, client: OpenAI) -> None:
call = client.realtime.calls.accept(
call_id="call_id",
type="realtime",
)
assert call is None
@parametrize
def test_method_accept_with_all_params(self, client: OpenAI) -> None:
call = client.realtime.calls.accept(
call_id="call_id",
type="realtime",
audio={
"input": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"noise_reduction": {"type": "near_field"},
"transcription": {
"language": "language",
"model": "string",
"prompt": "prompt",
},
"turn_detection": {
"type": "server_vad",
"create_response": True,
"idle_timeout_ms": 5000,
"interrupt_response": True,
"prefix_padding_ms": 0,
"silence_duration_ms": 0,
"threshold": 0,
},
},
"output": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"speed": 0.25,
"voice": "ash",
},
},
include=["item.input_audio_transcription.logprobs"],
instructions="instructions",
max_output_tokens=0,
model="string",
output_modalities=["text"],
prompt={
"id": "id",
"variables": {"foo": "string"},
"version": "version",
},
tool_choice="none",
tools=[
{
"description": "description",
"name": "name",
"parameters": {},
"type": "function",
}
],
tracing="auto",
truncation="auto",
)
assert call is None
@parametrize
def test_raw_response_accept(self, client: OpenAI) -> None:
response = client.realtime.calls.with_raw_response.accept(
call_id="call_id",
type="realtime",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
def test_streaming_response_accept(self, client: OpenAI) -> None:
with client.realtime.calls.with_streaming_response.accept(
call_id="call_id",
type="realtime",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_accept(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
client.realtime.calls.with_raw_response.accept(
call_id="",
type="realtime",
)
@parametrize
def test_method_hangup(self, client: OpenAI) -> None:
call = client.realtime.calls.hangup(
"call_id",
)
assert call is None
@parametrize
def test_raw_response_hangup(self, client: OpenAI) -> None:
response = client.realtime.calls.with_raw_response.hangup(
"call_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
def test_streaming_response_hangup(self, client: OpenAI) -> None:
with client.realtime.calls.with_streaming_response.hangup(
"call_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_hangup(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
client.realtime.calls.with_raw_response.hangup(
"",
)
@parametrize
def test_method_refer(self, client: OpenAI) -> None:
call = client.realtime.calls.refer(
call_id="call_id",
target_uri="tel:+14155550123",
)
assert call is None
@parametrize
def test_raw_response_refer(self, client: OpenAI) -> None:
response = client.realtime.calls.with_raw_response.refer(
call_id="call_id",
target_uri="tel:+14155550123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
def test_streaming_response_refer(self, client: OpenAI) -> None:
with client.realtime.calls.with_streaming_response.refer(
call_id="call_id",
target_uri="tel:+14155550123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_refer(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
client.realtime.calls.with_raw_response.refer(
call_id="",
target_uri="tel:+14155550123",
)
@parametrize
def test_method_reject(self, client: OpenAI) -> None:
call = client.realtime.calls.reject(
call_id="call_id",
)
assert call is None
@parametrize
def test_method_reject_with_all_params(self, client: OpenAI) -> None:
call = client.realtime.calls.reject(
call_id="call_id",
status_code=486,
)
assert call is None
@parametrize
def test_raw_response_reject(self, client: OpenAI) -> None:
response = client.realtime.calls.with_raw_response.reject(
call_id="call_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
def test_streaming_response_reject(self, client: OpenAI) -> None:
with client.realtime.calls.with_streaming_response.reject(
call_id="call_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_reject(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
client.realtime.calls.with_raw_response.reject(
call_id="",
)
class TestAsyncCalls:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
call = await async_client.realtime.calls.create(
sdp="sdp",
)
assert isinstance(call, _legacy_response.HttpxBinaryResponseContent)
assert call.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
call = await async_client.realtime.calls.create(
sdp="sdp",
session={
"type": "realtime",
"audio": {
"input": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"noise_reduction": {"type": "near_field"},
"transcription": {
"language": "language",
"model": "string",
"prompt": "prompt",
},
"turn_detection": {
"type": "server_vad",
"create_response": True,
"idle_timeout_ms": 5000,
"interrupt_response": True,
"prefix_padding_ms": 0,
"silence_duration_ms": 0,
"threshold": 0,
},
},
"output": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"speed": 0.25,
"voice": "ash",
},
},
"include": ["item.input_audio_transcription.logprobs"],
"instructions": "instructions",
"max_output_tokens": 0,
"model": "string",
"output_modalities": ["text"],
"prompt": {
"id": "id",
"variables": {"foo": "string"},
"version": "version",
},
"tool_choice": "none",
"tools": [
{
"description": "description",
"name": "name",
"parameters": {},
"type": "function",
}
],
"tracing": "auto",
"truncation": "auto",
},
)
assert isinstance(call, _legacy_response.HttpxBinaryResponseContent)
assert call.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.realtime.calls.with_raw_response.create(
sdp="sdp",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, call, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/realtime/calls").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.realtime.calls.with_streaming_response.create(
sdp="sdp",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = await response.parse()
assert_matches_type(bytes, call, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_accept(self, async_client: AsyncOpenAI) -> None:
call = await async_client.realtime.calls.accept(
call_id="call_id",
type="realtime",
)
assert call is None
@parametrize
async def test_method_accept_with_all_params(self, async_client: AsyncOpenAI) -> None:
call = await async_client.realtime.calls.accept(
call_id="call_id",
type="realtime",
audio={
"input": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"noise_reduction": {"type": "near_field"},
"transcription": {
"language": "language",
"model": "string",
"prompt": "prompt",
},
"turn_detection": {
"type": "server_vad",
"create_response": True,
"idle_timeout_ms": 5000,
"interrupt_response": True,
"prefix_padding_ms": 0,
"silence_duration_ms": 0,
"threshold": 0,
},
},
"output": {
"format": {
"rate": 24000,
"type": "audio/pcm",
},
"speed": 0.25,
"voice": "ash",
},
},
include=["item.input_audio_transcription.logprobs"],
instructions="instructions",
max_output_tokens=0,
model="string",
output_modalities=["text"],
prompt={
"id": "id",
"variables": {"foo": "string"},
"version": "version",
},
tool_choice="none",
tools=[
{
"description": "description",
"name": "name",
"parameters": {},
"type": "function",
}
],
tracing="auto",
truncation="auto",
)
assert call is None
@parametrize
async def test_raw_response_accept(self, async_client: AsyncOpenAI) -> None:
response = await async_client.realtime.calls.with_raw_response.accept(
call_id="call_id",
type="realtime",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
async def test_streaming_response_accept(self, async_client: AsyncOpenAI) -> None:
async with async_client.realtime.calls.with_streaming_response.accept(
call_id="call_id",
type="realtime",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = await response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_accept(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
await async_client.realtime.calls.with_raw_response.accept(
call_id="",
type="realtime",
)
@parametrize
async def test_method_hangup(self, async_client: AsyncOpenAI) -> None:
call = await async_client.realtime.calls.hangup(
"call_id",
)
assert call is None
@parametrize
async def test_raw_response_hangup(self, async_client: AsyncOpenAI) -> None:
response = await async_client.realtime.calls.with_raw_response.hangup(
"call_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
async def test_streaming_response_hangup(self, async_client: AsyncOpenAI) -> None:
async with async_client.realtime.calls.with_streaming_response.hangup(
"call_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = await response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_hangup(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
await async_client.realtime.calls.with_raw_response.hangup(
"",
)
@parametrize
async def test_method_refer(self, async_client: AsyncOpenAI) -> None:
call = await async_client.realtime.calls.refer(
call_id="call_id",
target_uri="tel:+14155550123",
)
assert call is None
@parametrize
async def test_raw_response_refer(self, async_client: AsyncOpenAI) -> None:
response = await async_client.realtime.calls.with_raw_response.refer(
call_id="call_id",
target_uri="tel:+14155550123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
async def test_streaming_response_refer(self, async_client: AsyncOpenAI) -> None:
async with async_client.realtime.calls.with_streaming_response.refer(
call_id="call_id",
target_uri="tel:+14155550123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = await response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_refer(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
await async_client.realtime.calls.with_raw_response.refer(
call_id="",
target_uri="tel:+14155550123",
)
@parametrize
async def test_method_reject(self, async_client: AsyncOpenAI) -> None:
call = await async_client.realtime.calls.reject(
call_id="call_id",
)
assert call is None
@parametrize
async def test_method_reject_with_all_params(self, async_client: AsyncOpenAI) -> None:
call = await async_client.realtime.calls.reject(
call_id="call_id",
status_code=486,
)
assert call is None
@parametrize
async def test_raw_response_reject(self, async_client: AsyncOpenAI) -> None:
response = await async_client.realtime.calls.with_raw_response.reject(
call_id="call_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = response.parse()
assert call is None
@parametrize
async def test_streaming_response_reject(self, async_client: AsyncOpenAI) -> None:
async with async_client.realtime.calls.with_streaming_response.reject(
call_id="call_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
call = await response.parse()
assert call is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_reject(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `call_id` but received ''"):
await async_client.realtime.calls.with_raw_response.reject(
call_id="",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/realtime/test_calls.py",
"license": "Apache License 2.0",
"lines": 606,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/types/batch_usage.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from .._models import BaseModel
__all__ = ["BatchUsage", "InputTokensDetails", "OutputTokensDetails"]
class InputTokensDetails(BaseModel):
"""A detailed breakdown of the input tokens."""
cached_tokens: int
"""The number of tokens that were retrieved from the cache.
[More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
"""
class OutputTokensDetails(BaseModel):
"""A detailed breakdown of the output tokens."""
reasoning_tokens: int
"""The number of reasoning tokens."""
class BatchUsage(BaseModel):
"""
Represents token usage details including input tokens, output tokens, a
breakdown of output tokens, and the total tokens used. Only populated on
batches created after September 7, 2025.
"""
input_tokens: int
"""The number of input tokens."""
input_tokens_details: InputTokensDetails
"""A detailed breakdown of the input tokens."""
output_tokens: int
"""The number of output tokens."""
output_tokens_details: OutputTokensDetails
"""A detailed breakdown of the output tokens."""
total_tokens: int
"""The total number of tokens used."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/batch_usage.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_function_call_output_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from .response_input_file_content import ResponseInputFileContent
from .response_input_text_content import ResponseInputTextContent
from .response_input_image_content import ResponseInputImageContent
__all__ = ["ResponseFunctionCallOutputItem"]
ResponseFunctionCallOutputItem: TypeAlias = Annotated[
Union[ResponseInputTextContent, ResponseInputImageContent, ResponseInputFileContent],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_call_output_item.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_function_call_output_item_list.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List
from typing_extensions import TypeAlias
from .response_function_call_output_item import ResponseFunctionCallOutputItem
__all__ = ["ResponseFunctionCallOutputItemList"]
ResponseFunctionCallOutputItemList: TypeAlias = List[ResponseFunctionCallOutputItem]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_call_output_item_list.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_function_call_output_item_list_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import List, Union
from typing_extensions import TypeAlias
from .response_input_file_content_param import ResponseInputFileContentParam
from .response_input_text_content_param import ResponseInputTextContentParam
from .response_input_image_content_param import ResponseInputImageContentParam
__all__ = ["ResponseFunctionCallOutputItemListParam", "ResponseFunctionCallOutputItemParam"]
ResponseFunctionCallOutputItemParam: TypeAlias = Union[
ResponseInputTextContentParam, ResponseInputImageContentParam, ResponseInputFileContentParam
]
ResponseFunctionCallOutputItemListParam: TypeAlias = List[ResponseFunctionCallOutputItemParam]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_call_output_item_list_param.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_function_call_output_item_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import TypeAlias
from .response_input_file_content_param import ResponseInputFileContentParam
from .response_input_text_content_param import ResponseInputTextContentParam
from .response_input_image_content_param import ResponseInputImageContentParam
__all__ = ["ResponseFunctionCallOutputItemParam"]
ResponseFunctionCallOutputItemParam: TypeAlias = Union[
ResponseInputTextContentParam, ResponseInputImageContentParam, ResponseInputFileContentParam
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_function_call_output_item_param.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.