sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
openai/openai-python:src/openai/types/responses/response_custom_tool_call_output.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .response_input_file import ResponseInputFile
from .response_input_text import ResponseInputText
from .response_input_image import ResponseInputImage
__all__ = ["ResponseCustomToolCallOutput", "OutputOutputContentList"]
OutputOutputContentList: TypeAlias = Annotated[
Union[ResponseInputText, ResponseInputImage, ResponseInputFile], PropertyInfo(discriminator="type")
]
class ResponseCustomToolCallOutput(BaseModel):
"""The output of a custom tool call from your code, being sent back to the model."""
call_id: str
"""The call ID, used to map this custom tool call output to a custom tool call."""
output: Union[str, List[OutputOutputContentList]]
"""
The output from the custom tool call generated by your code. Can be a string or
an list of output content.
"""
type: Literal["custom_tool_call_output"]
"""The type of the custom tool call output. Always `custom_tool_call_output`."""
id: Optional[str] = None
"""The unique ID of the custom tool call output in the OpenAI platform."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_custom_tool_call_output.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_custom_tool_call_output_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .response_input_file_param import ResponseInputFileParam
from .response_input_text_param import ResponseInputTextParam
from .response_input_image_param import ResponseInputImageParam
__all__ = ["ResponseCustomToolCallOutputParam", "OutputOutputContentList"]
OutputOutputContentList: TypeAlias = Union[ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
class ResponseCustomToolCallOutputParam(TypedDict, total=False):
"""The output of a custom tool call from your code, being sent back to the model."""
call_id: Required[str]
"""The call ID, used to map this custom tool call output to a custom tool call."""
output: Required[Union[str, Iterable[OutputOutputContentList]]]
"""
The output from the custom tool call generated by your code. Can be a string or
an list of output content.
"""
type: Required[Literal["custom_tool_call_output"]]
"""The type of the custom tool call output. Always `custom_tool_call_output`."""
id: str
"""The unique ID of the custom tool call output in the OpenAI platform."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_custom_tool_call_output_param.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_custom_tool_call_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseCustomToolCallParam"]
class ResponseCustomToolCallParam(TypedDict, total=False):
"""A call to a custom tool created by the model."""
call_id: Required[str]
"""An identifier used to map this custom tool call to a tool call output."""
input: Required[str]
"""The input for the custom tool call generated by the model."""
name: Required[str]
"""The name of the custom tool being called."""
type: Required[Literal["custom_tool_call"]]
"""The type of the custom tool call. Always `custom_tool_call`."""
id: str
"""The unique ID of the custom tool call in the OpenAI platform."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_custom_tool_call_param.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/tool_choice_allowed.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, List
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ToolChoiceAllowed"]
class ToolChoiceAllowed(BaseModel):
"""Constrains the tools available to the model to a pre-defined set."""
mode: Literal["auto", "required"]
"""Constrains the tools available to the model to a pre-defined set.
`auto` allows the model to pick from among the allowed tools and generate a
message.
`required` requires the model to call one or more of the allowed tools.
"""
tools: List[Dict[str, object]]
"""A list of tool definitions that the model should be allowed to call.
For the Responses API, the list of tool definitions might look like:
```json
[
{ "type": "function", "name": "get_weather" },
{ "type": "mcp", "server_label": "deepwiki" },
{ "type": "image_generation" }
]
```
"""
type: Literal["allowed_tools"]
"""Allowed tool configuration type. Always `allowed_tools`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_allowed.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/tool_choice_allowed_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Iterable
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ToolChoiceAllowedParam"]
class ToolChoiceAllowedParam(TypedDict, total=False):
"""Constrains the tools available to the model to a pre-defined set."""
mode: Required[Literal["auto", "required"]]
"""Constrains the tools available to the model to a pre-defined set.
`auto` allows the model to pick from among the allowed tools and generate a
message.
`required` requires the model to call one or more of the allowed tools.
"""
tools: Required[Iterable[Dict[str, object]]]
"""A list of tool definitions that the model should be allowed to call.
For the Responses API, the list of tool definitions might look like:
```json
[
{ "type": "function", "name": "get_weather" },
{ "type": "mcp", "server_label": "deepwiki" },
{ "type": "image_generation" }
]
```
"""
type: Required[Literal["allowed_tools"]]
"""Allowed tool configuration type. Always `allowed_tools`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_allowed_param.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/tool_choice_custom.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ToolChoiceCustom"]
class ToolChoiceCustom(BaseModel):
"""Use this option to force the model to call a specific custom tool."""
name: str
"""The name of the custom tool to call."""
type: Literal["custom"]
"""For custom tool calling, the type is always `custom`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_custom.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/tool_choice_custom_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ToolChoiceCustomParam"]
class ToolChoiceCustomParam(TypedDict, total=False):
"""Use this option to force the model to call a specific custom tool."""
name: Required[str]
"""The name of the custom tool to call."""
type: Required[Literal["custom"]]
"""For custom tool calling, the type is always `custom`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_custom_param.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/shared/custom_tool_input_format.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
__all__ = ["CustomToolInputFormat", "Text", "Grammar"]
class Text(BaseModel):
"""Unconstrained free-form text."""
type: Literal["text"]
"""Unconstrained text format. Always `text`."""
class Grammar(BaseModel):
"""A grammar defined by the user."""
definition: str
"""The grammar definition."""
syntax: Literal["lark", "regex"]
"""The syntax of the grammar definition. One of `lark` or `regex`."""
type: Literal["grammar"]
"""Grammar format. Always `grammar`."""
CustomToolInputFormat: TypeAlias = Annotated[Union[Text, Grammar], PropertyInfo(discriminator="type")]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/shared/custom_tool_input_format.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/shared/response_format_text_grammar.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseFormatTextGrammar"]
class ResponseFormatTextGrammar(BaseModel):
"""
A custom grammar for the model to follow when generating text.
Learn more in the [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars).
"""
grammar: str
"""The custom grammar for the model to follow."""
type: Literal["grammar"]
"""The type of response format being defined. Always `grammar`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/shared/response_format_text_grammar.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/shared/response_format_text_python.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseFormatTextPython"]
class ResponseFormatTextPython(BaseModel):
"""Configure the model to generate valid Python code.
See the
[custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars) for more details.
"""
type: Literal["python"]
"""The type of response format being defined. Always `python`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/shared/response_format_text_python.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/shared_params/custom_tool_input_format.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, Required, TypeAlias, TypedDict
__all__ = ["CustomToolInputFormat", "Text", "Grammar"]
class Text(TypedDict, total=False):
"""Unconstrained free-form text."""
type: Required[Literal["text"]]
"""Unconstrained text format. Always `text`."""
class Grammar(TypedDict, total=False):
"""A grammar defined by the user."""
definition: Required[str]
"""The grammar definition."""
syntax: Required[Literal["lark", "regex"]]
"""The syntax of the grammar definition. One of `lark` or `regex`."""
type: Required[Literal["grammar"]]
"""Grammar format. Always `grammar`."""
CustomToolInputFormat: TypeAlias = Union[Text, Grammar]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/shared_params/custom_tool_input_format.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_reasoning_text_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseReasoningTextDeltaEvent"]
class ResponseReasoningTextDeltaEvent(BaseModel):
"""Emitted when a delta is added to a reasoning text."""
content_index: int
"""The index of the reasoning content part this delta is associated with."""
delta: str
"""The text delta that was added to the reasoning content."""
item_id: str
"""The ID of the item this reasoning text delta is associated with."""
output_index: int
"""The index of the output item this reasoning text delta is associated with."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.reasoning_text.delta"]
"""The type of the event. Always `response.reasoning_text.delta`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_reasoning_text_delta_event.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_reasoning_text_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseReasoningTextDoneEvent"]
class ResponseReasoningTextDoneEvent(BaseModel):
"""Emitted when a reasoning text is completed."""
content_index: int
"""The index of the reasoning content part."""
item_id: str
"""The ID of the item this reasoning text is associated with."""
output_index: int
"""The index of the output item this reasoning text is associated with."""
sequence_number: int
"""The sequence number of this event."""
text: str
"""The full text of the completed reasoning content."""
type: Literal["response.reasoning_text.done"]
"""The type of the event. Always `response.reasoning_text.done`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_reasoning_text_done_event.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/chat/chat_completion_content_part_image.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ChatCompletionContentPartImage", "ImageURL"]
class ImageURL(BaseModel):
url: str
"""Either a URL of the image or the base64 encoded image data."""
detail: Optional[Literal["auto", "low", "high"]] = None
"""Specifies the detail level of the image.
Learn more in the
[Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
class ChatCompletionContentPartImage(BaseModel):
"""Learn about [image inputs](https://platform.openai.com/docs/guides/vision)."""
image_url: ImageURL
type: Literal["image_url"]
"""The type of the content part."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_content_part_image.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/chat/chat_completion_content_part_text.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ChatCompletionContentPartText"]
class ChatCompletionContentPartText(BaseModel):
"""
Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
"""
text: str
"""The text content."""
type: Literal["text"]
"""The type of the content part."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/chat/chat_completion_content_part_text.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:examples/image_stream.py | #!/usr/bin/env python
import base64
from pathlib import Path
from openai import OpenAI
client = OpenAI()
def main() -> None:
"""Example of OpenAI image streaming with partial images."""
stream = client.images.generate(
model="gpt-image-1",
prompt="A cute baby sea otter",
n=1,
size="1024x1024",
stream=True,
partial_images=3,
)
for event in stream:
if event.type == "image_generation.partial_image":
print(f" Partial image {event.partial_image_index + 1}/3 received")
print(f" Size: {len(event.b64_json)} characters (base64)")
# Save partial image to file
filename = f"partial_{event.partial_image_index + 1}.png"
image_data = base64.b64decode(event.b64_json)
with open(filename, "wb") as f:
f.write(image_data)
print(f" 💾 Saved to: {Path(filename).resolve()}")
elif event.type == "image_generation.completed":
print(f"\n✅ Final image completed!")
print(f" Size: {len(event.b64_json)} characters (base64)")
# Save final image to file
filename = "final_image.png"
image_data = base64.b64decode(event.b64_json)
with open(filename, "wb") as f:
f.write(image_data)
print(f" 💾 Saved to: {Path(filename).resolve()}")
else:
print(f"❓ Unknown event: {event}") # type: ignore[unreachable]
if __name__ == "__main__":
try:
main()
except Exception as error:
print(f"Error generating image: {error}")
| {
"repo_id": "openai/openai-python",
"file_path": "examples/image_stream.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/image_edit_completed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["ImageEditCompletedEvent", "Usage", "UsageInputTokensDetails"]
class UsageInputTokensDetails(BaseModel):
"""The input tokens detailed information for the image generation."""
image_tokens: int
"""The number of image tokens in the input prompt."""
text_tokens: int
"""The number of text tokens in the input prompt."""
class Usage(BaseModel):
"""
For the GPT image models only, the token usage information for the image generation.
"""
input_tokens: int
"""The number of tokens (images and text) in the input prompt."""
input_tokens_details: UsageInputTokensDetails
"""The input tokens detailed information for the image generation."""
output_tokens: int
"""The number of image tokens in the output image."""
total_tokens: int
"""The total number of tokens (images and text) used for the image generation."""
class ImageEditCompletedEvent(BaseModel):
"""Emitted when image editing has completed and the final image is available."""
b64_json: str
"""Base64-encoded final edited image data, suitable for rendering as an image."""
background: Literal["transparent", "opaque", "auto"]
"""The background setting for the edited image."""
created_at: int
"""The Unix timestamp when the event was created."""
output_format: Literal["png", "webp", "jpeg"]
"""The output format for the edited image."""
quality: Literal["low", "medium", "high", "auto"]
"""The quality setting for the edited image."""
size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
"""The size of the edited image."""
type: Literal["image_edit.completed"]
"""The type of the event. Always `image_edit.completed`."""
usage: Usage
"""
For the GPT image models only, the token usage information for the image
generation.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/image_edit_completed_event.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/image_edit_partial_image_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["ImageEditPartialImageEvent"]
class ImageEditPartialImageEvent(BaseModel):
"""Emitted when a partial image is available during image editing streaming."""
b64_json: str
"""Base64-encoded partial image data, suitable for rendering as an image."""
background: Literal["transparent", "opaque", "auto"]
"""The background setting for the requested edited image."""
created_at: int
"""The Unix timestamp when the event was created."""
output_format: Literal["png", "webp", "jpeg"]
"""The output format for the requested edited image."""
partial_image_index: int
"""0-based index for the partial image (streaming)."""
quality: Literal["low", "medium", "high", "auto"]
"""The quality setting for the requested edited image."""
size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
"""The size of the requested edited image."""
type: Literal["image_edit.partial_image"]
"""The type of the event. Always `image_edit.partial_image`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/image_edit_partial_image_event.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/image_edit_stream_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from .._utils import PropertyInfo
from .image_edit_completed_event import ImageEditCompletedEvent
from .image_edit_partial_image_event import ImageEditPartialImageEvent
__all__ = ["ImageEditStreamEvent"]
ImageEditStreamEvent: TypeAlias = Annotated[
Union[ImageEditPartialImageEvent, ImageEditCompletedEvent], PropertyInfo(discriminator="type")
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/image_edit_stream_event.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/image_gen_completed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"]
class UsageInputTokensDetails(BaseModel):
"""The input tokens detailed information for the image generation."""
image_tokens: int
"""The number of image tokens in the input prompt."""
text_tokens: int
"""The number of text tokens in the input prompt."""
class Usage(BaseModel):
"""
For the GPT image models only, the token usage information for the image generation.
"""
input_tokens: int
"""The number of tokens (images and text) in the input prompt."""
input_tokens_details: UsageInputTokensDetails
"""The input tokens detailed information for the image generation."""
output_tokens: int
"""The number of image tokens in the output image."""
total_tokens: int
"""The total number of tokens (images and text) used for the image generation."""
class ImageGenCompletedEvent(BaseModel):
"""Emitted when image generation has completed and the final image is available."""
b64_json: str
"""Base64-encoded image data, suitable for rendering as an image."""
background: Literal["transparent", "opaque", "auto"]
"""The background setting for the generated image."""
created_at: int
"""The Unix timestamp when the event was created."""
output_format: Literal["png", "webp", "jpeg"]
"""The output format for the generated image."""
quality: Literal["low", "medium", "high", "auto"]
"""The quality setting for the generated image."""
size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
"""The size of the generated image."""
type: Literal["image_generation.completed"]
"""The type of the event. Always `image_generation.completed`."""
usage: Usage
"""
For the GPT image models only, the token usage information for the image
generation.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/image_gen_completed_event.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/image_gen_partial_image_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["ImageGenPartialImageEvent"]
class ImageGenPartialImageEvent(BaseModel):
"""Emitted when a partial image is available during image generation streaming."""
b64_json: str
"""Base64-encoded partial image data, suitable for rendering as an image."""
background: Literal["transparent", "opaque", "auto"]
"""The background setting for the requested image."""
created_at: int
"""The Unix timestamp when the event was created."""
output_format: Literal["png", "webp", "jpeg"]
"""The output format for the requested image."""
partial_image_index: int
"""0-based index for the partial image (streaming)."""
quality: Literal["low", "medium", "high", "auto"]
"""The quality setting for the requested image."""
size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
"""The size of the requested image."""
type: Literal["image_generation.partial_image"]
"""The type of the event. Always `image_generation.partial_image`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/image_gen_partial_image_event.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/image_gen_stream_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from .._utils import PropertyInfo
from .image_gen_completed_event import ImageGenCompletedEvent
from .image_gen_partial_image_event import ImageGenPartialImageEvent
__all__ = ["ImageGenStreamEvent"]
ImageGenStreamEvent: TypeAlias = Annotated[
Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type")
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/image_gen_stream_event.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/cli/_api/fine_tuning/jobs.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING
from argparse import ArgumentParser
from ..._utils import get_client, print_model
from ...._types import Omittable, omit
from ...._utils import is_given
from ..._models import BaseModel
from ....pagination import SyncCursorPage
from ....types.fine_tuning import (
FineTuningJob,
FineTuningJobEvent,
)
if TYPE_CHECKING:
from argparse import _SubParsersAction
def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
sub = subparser.add_parser("fine_tuning.jobs.create")
sub.add_argument(
"-m",
"--model",
help="The model to fine-tune.",
required=True,
)
sub.add_argument(
"-F",
"--training-file",
help="The training file to fine-tune the model on.",
required=True,
)
sub.add_argument(
"-H",
"--hyperparameters",
help="JSON string of hyperparameters to use for fine-tuning.",
type=str,
)
sub.add_argument(
"-s",
"--suffix",
help="A suffix to add to the fine-tuned model name.",
)
sub.add_argument(
"-V",
"--validation-file",
help="The validation file to use for fine-tuning.",
)
sub.set_defaults(func=CLIFineTuningJobs.create, args_model=CLIFineTuningJobsCreateArgs)
sub = subparser.add_parser("fine_tuning.jobs.retrieve")
sub.add_argument(
"-i",
"--id",
help="The ID of the fine-tuning job to retrieve.",
required=True,
)
sub.set_defaults(func=CLIFineTuningJobs.retrieve, args_model=CLIFineTuningJobsRetrieveArgs)
sub = subparser.add_parser("fine_tuning.jobs.list")
sub.add_argument(
"-a",
"--after",
help="Identifier for the last job from the previous pagination request. If provided, only jobs created after this job will be returned.",
)
sub.add_argument(
"-l",
"--limit",
help="Number of fine-tuning jobs to retrieve.",
type=int,
)
sub.set_defaults(func=CLIFineTuningJobs.list, args_model=CLIFineTuningJobsListArgs)
sub = subparser.add_parser("fine_tuning.jobs.cancel")
sub.add_argument(
"-i",
"--id",
help="The ID of the fine-tuning job to cancel.",
required=True,
)
sub.set_defaults(func=CLIFineTuningJobs.cancel, args_model=CLIFineTuningJobsCancelArgs)
sub = subparser.add_parser("fine_tuning.jobs.list_events")
sub.add_argument(
"-i",
"--id",
help="The ID of the fine-tuning job to list events for.",
required=True,
)
sub.add_argument(
"-a",
"--after",
help="Identifier for the last event from the previous pagination request. If provided, only events created after this event will be returned.",
)
sub.add_argument(
"-l",
"--limit",
help="Number of fine-tuning job events to retrieve.",
type=int,
)
sub.set_defaults(func=CLIFineTuningJobs.list_events, args_model=CLIFineTuningJobsListEventsArgs)
class CLIFineTuningJobsCreateArgs(BaseModel):
model: str
training_file: str
hyperparameters: Omittable[str] = omit
suffix: Omittable[str] = omit
validation_file: Omittable[str] = omit
class CLIFineTuningJobsRetrieveArgs(BaseModel):
id: str
class CLIFineTuningJobsListArgs(BaseModel):
after: Omittable[str] = omit
limit: Omittable[int] = omit
class CLIFineTuningJobsCancelArgs(BaseModel):
id: str
class CLIFineTuningJobsListEventsArgs(BaseModel):
id: str
after: Omittable[str] = omit
limit: Omittable[int] = omit
class CLIFineTuningJobs:
@staticmethod
def create(args: CLIFineTuningJobsCreateArgs) -> None:
hyperparameters = json.loads(str(args.hyperparameters)) if is_given(args.hyperparameters) else omit
fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.create(
model=args.model,
training_file=args.training_file,
hyperparameters=hyperparameters,
suffix=args.suffix,
validation_file=args.validation_file,
)
print_model(fine_tuning_job)
@staticmethod
def retrieve(args: CLIFineTuningJobsRetrieveArgs) -> None:
fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.retrieve(fine_tuning_job_id=args.id)
print_model(fine_tuning_job)
@staticmethod
def list(args: CLIFineTuningJobsListArgs) -> None:
fine_tuning_jobs: SyncCursorPage[FineTuningJob] = get_client().fine_tuning.jobs.list(
after=args.after or omit, limit=args.limit or omit
)
print_model(fine_tuning_jobs)
@staticmethod
def cancel(args: CLIFineTuningJobsCancelArgs) -> None:
fine_tuning_job: FineTuningJob = get_client().fine_tuning.jobs.cancel(fine_tuning_job_id=args.id)
print_model(fine_tuning_job)
@staticmethod
def list_events(args: CLIFineTuningJobsListEventsArgs) -> None:
fine_tuning_job_events: SyncCursorPage[FineTuningJobEvent] = get_client().fine_tuning.jobs.list_events(
fine_tuning_job_id=args.id,
after=args.after or omit,
limit=args.limit or omit,
)
print_model(fine_tuning_job_events)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/cli/_api/fine_tuning/jobs.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/tool_choice_mcp.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ToolChoiceMcp"]
class ToolChoiceMcp(BaseModel):
"""
Use this option to force the model to call a specific tool on a remote MCP server.
"""
server_label: str
"""The label of the MCP server to use."""
type: Literal["mcp"]
"""For MCP tools, the type is always `mcp`."""
name: Optional[str] = None
"""The name of the tool to call on the server."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_mcp.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/tool_choice_mcp_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ToolChoiceMcpParam"]
class ToolChoiceMcpParam(TypedDict, total=False):
"""
Use this option to force the model to call a specific tool on a remote MCP server.
"""
server_label: Required[str]
"""The label of the MCP server to use."""
type: Required[Literal["mcp"]]
"""For MCP tools, the type is always `mcp`."""
name: Optional[str]
"""The name of the tool to call on the server."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/tool_choice_mcp_param.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/batch_cancelled_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["BatchCancelledWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the batch API request."""
class BatchCancelledWebhookEvent(BaseModel):
"""Sent when a batch API request has been cancelled."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the batch API request was cancelled."""
data: Data
"""Event data payload."""
type: Literal["batch.cancelled"]
"""The type of the event. Always `batch.cancelled`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/batch_cancelled_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/batch_completed_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["BatchCompletedWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the batch API request."""
class BatchCompletedWebhookEvent(BaseModel):
"""Sent when a batch API request has been completed."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the batch API request was completed."""
data: Data
"""Event data payload."""
type: Literal["batch.completed"]
"""The type of the event. Always `batch.completed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/batch_completed_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/batch_expired_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["BatchExpiredWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the batch API request."""
class BatchExpiredWebhookEvent(BaseModel):
"""Sent when a batch API request has expired."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the batch API request expired."""
data: Data
"""Event data payload."""
type: Literal["batch.expired"]
"""The type of the event. Always `batch.expired`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/batch_expired_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/batch_failed_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["BatchFailedWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the batch API request."""
class BatchFailedWebhookEvent(BaseModel):
"""Sent when a batch API request has failed."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the batch API request failed."""
data: Data
"""Event data payload."""
type: Literal["batch.failed"]
"""The type of the event. Always `batch.failed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/batch_failed_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/eval_run_canceled_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["EvalRunCanceledWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the eval run."""
class EvalRunCanceledWebhookEvent(BaseModel):
"""Sent when an eval run has been canceled."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the eval run was canceled."""
data: Data
"""Event data payload."""
type: Literal["eval.run.canceled"]
"""The type of the event. Always `eval.run.canceled`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/eval_run_canceled_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/eval_run_failed_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["EvalRunFailedWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the eval run."""
class EvalRunFailedWebhookEvent(BaseModel):
"""Sent when an eval run has failed."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the eval run failed."""
data: Data
"""Event data payload."""
type: Literal["eval.run.failed"]
"""The type of the event. Always `eval.run.failed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/eval_run_failed_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/eval_run_succeeded_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["EvalRunSucceededWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the eval run."""
class EvalRunSucceededWebhookEvent(BaseModel):
"""Sent when an eval run has succeeded."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the eval run succeeded."""
data: Data
"""Event data payload."""
type: Literal["eval.run.succeeded"]
"""The type of the event. Always `eval.run.succeeded`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/eval_run_succeeded_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["FineTuningJobCancelledWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the fine-tuning job."""
class FineTuningJobCancelledWebhookEvent(BaseModel):
"""Sent when a fine-tuning job has been cancelled."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the fine-tuning job was cancelled."""
data: Data
"""Event data payload."""
type: Literal["fine_tuning.job.cancelled"]
"""The type of the event. Always `fine_tuning.job.cancelled`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["FineTuningJobFailedWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the fine-tuning job."""
class FineTuningJobFailedWebhookEvent(BaseModel):
"""Sent when a fine-tuning job has failed."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the fine-tuning job failed."""
data: Data
"""Event data payload."""
type: Literal["fine_tuning.job.failed"]
"""The type of the event. Always `fine_tuning.job.failed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/fine_tuning_job_failed_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["FineTuningJobSucceededWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the fine-tuning job."""
class FineTuningJobSucceededWebhookEvent(BaseModel):
"""Sent when a fine-tuning job has succeeded."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the fine-tuning job succeeded."""
data: Data
"""Event data payload."""
type: Literal["fine_tuning.job.succeeded"]
"""The type of the event. Always `fine_tuning.job.succeeded`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/response_cancelled_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCancelledWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the model response."""
class ResponseCancelledWebhookEvent(BaseModel):
"""Sent when a background response has been cancelled."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the model response was cancelled."""
data: Data
"""Event data payload."""
type: Literal["response.cancelled"]
"""The type of the event. Always `response.cancelled`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/response_cancelled_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/response_completed_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseCompletedWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the model response."""
class ResponseCompletedWebhookEvent(BaseModel):
"""Sent when a background response has been completed."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the model response was completed."""
data: Data
"""Event data payload."""
type: Literal["response.completed"]
"""The type of the event. Always `response.completed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/response_completed_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/response_failed_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseFailedWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the model response."""
class ResponseFailedWebhookEvent(BaseModel):
"""Sent when a background response has failed."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the model response failed."""
data: Data
"""Event data payload."""
type: Literal["response.failed"]
"""The type of the event. Always `response.failed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/response_failed_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/response_incomplete_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseIncompleteWebhookEvent", "Data"]
class Data(BaseModel):
"""Event data payload."""
id: str
"""The unique ID of the model response."""
class ResponseIncompleteWebhookEvent(BaseModel):
"""Sent when a background response has been interrupted."""
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the model response was interrupted."""
data: Data
"""Event data payload."""
type: Literal["response.incomplete"]
"""The type of the event. Always `response.incomplete`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/response_incomplete_webhook_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/webhooks/unwrap_webhook_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Annotated, TypeAlias
from ..._utils import PropertyInfo
from .batch_failed_webhook_event import BatchFailedWebhookEvent
from .batch_expired_webhook_event import BatchExpiredWebhookEvent
from .batch_cancelled_webhook_event import BatchCancelledWebhookEvent
from .batch_completed_webhook_event import BatchCompletedWebhookEvent
from .eval_run_failed_webhook_event import EvalRunFailedWebhookEvent
from .response_failed_webhook_event import ResponseFailedWebhookEvent
from .eval_run_canceled_webhook_event import EvalRunCanceledWebhookEvent
from .eval_run_succeeded_webhook_event import EvalRunSucceededWebhookEvent
from .response_cancelled_webhook_event import ResponseCancelledWebhookEvent
from .response_completed_webhook_event import ResponseCompletedWebhookEvent
from .response_incomplete_webhook_event import ResponseIncompleteWebhookEvent
from .fine_tuning_job_failed_webhook_event import FineTuningJobFailedWebhookEvent
from .realtime_call_incoming_webhook_event import RealtimeCallIncomingWebhookEvent
from .fine_tuning_job_cancelled_webhook_event import FineTuningJobCancelledWebhookEvent
from .fine_tuning_job_succeeded_webhook_event import FineTuningJobSucceededWebhookEvent
__all__ = ["UnwrapWebhookEvent"]
UnwrapWebhookEvent: TypeAlias = Annotated[
Union[
BatchCancelledWebhookEvent,
BatchCompletedWebhookEvent,
BatchExpiredWebhookEvent,
BatchFailedWebhookEvent,
EvalRunCanceledWebhookEvent,
EvalRunFailedWebhookEvent,
EvalRunSucceededWebhookEvent,
FineTuningJobCancelledWebhookEvent,
FineTuningJobFailedWebhookEvent,
FineTuningJobSucceededWebhookEvent,
RealtimeCallIncomingWebhookEvent,
ResponseCancelledWebhookEvent,
ResponseCompletedWebhookEvent,
ResponseFailedWebhookEvent,
ResponseIncompleteWebhookEvent,
],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/webhooks/unwrap_webhook_event.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/api_resources/test_webhooks.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from unittest import mock
import pytest
import openai
from openai._exceptions import InvalidWebhookSignatureError
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
# Standardized test constants (matches TypeScript implementation)
TEST_SECRET = "whsec_RdvaYFYUXuIFuEbvZHwMfYFhUf7aMYjYcmM24+Aj40c="
TEST_PAYLOAD = '{"id": "evt_685c059ae3a481909bdc86819b066fb6", "object": "event", "created_at": 1750861210, "type": "response.completed", "data": {"id": "resp_123"}}'
TEST_TIMESTAMP = 1750861210 # Fixed timestamp that matches our test signature
TEST_WEBHOOK_ID = "wh_685c059ae39c8190af8c71ed1022a24d"
TEST_SIGNATURE = "v1,gUAg4R2hWouRZqRQG4uJypNS8YK885G838+EHb4nKBY="
def create_test_headers(
timestamp: int | None = None, signature: str | None = None, webhook_id: str | None = None
) -> dict[str, str]:
"""Helper function to create test headers"""
return {
"webhook-signature": signature or TEST_SIGNATURE,
"webhook-timestamp": str(timestamp or TEST_TIMESTAMP),
"webhook-id": webhook_id or TEST_WEBHOOK_ID,
}
class TestWebhooks:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_unwrap_with_secret(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
unwrapped = client.webhooks.unwrap(TEST_PAYLOAD, headers, secret=TEST_SECRET)
assert unwrapped.id == "evt_685c059ae3a481909bdc86819b066fb6"
assert unwrapped.created_at == 1750861210
@parametrize
def test_unwrap_without_secret(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
with pytest.raises(ValueError, match="The webhook secret must either be set"):
client.webhooks.unwrap(TEST_PAYLOAD, headers)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_valid(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
# Should not raise - this is a truly valid signature for this timestamp
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
def test_verify_signature_invalid_secret_format(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
with pytest.raises(ValueError, match="The webhook secret must either be set"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=None)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_invalid(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret="invalid_secret")
@parametrize
def test_verify_signature_missing_webhook_signature_header(self, client: openai.OpenAI) -> None:
headers = create_test_headers(signature=None)
del headers["webhook-signature"]
with pytest.raises(ValueError, match="Could not find webhook-signature header"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
def test_verify_signature_missing_webhook_timestamp_header(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
del headers["webhook-timestamp"]
with pytest.raises(ValueError, match="Could not find webhook-timestamp header"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
def test_verify_signature_missing_webhook_id_header(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
del headers["webhook-id"]
with pytest.raises(ValueError, match="Could not find webhook-id header"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_payload_bytes(self, client: openai.OpenAI) -> None:
headers = create_test_headers()
client.webhooks.verify_signature(TEST_PAYLOAD.encode("utf-8"), headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
def test_unwrap_with_client_secret(self) -> None:
test_client = openai.OpenAI(base_url=base_url, api_key="test-api-key", webhook_secret=TEST_SECRET)
headers = create_test_headers()
unwrapped = test_client.webhooks.unwrap(TEST_PAYLOAD, headers)
assert unwrapped.id == "evt_685c059ae3a481909bdc86819b066fb6"
assert unwrapped.created_at == 1750861210
@parametrize
def test_verify_signature_timestamp_too_old(self, client: openai.OpenAI) -> None:
# Use a timestamp that's older than 5 minutes from our test timestamp
old_timestamp = TEST_TIMESTAMP - 400 # 6 minutes 40 seconds ago
headers = create_test_headers(timestamp=old_timestamp, signature="v1,dummy_signature")
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too old"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_timestamp_too_new(self, client: openai.OpenAI) -> None:
# Use a timestamp that's in the future beyond tolerance from our test timestamp
future_timestamp = TEST_TIMESTAMP + 400 # 6 minutes 40 seconds in the future
headers = create_test_headers(timestamp=future_timestamp, signature="v1,dummy_signature")
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too new"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_custom_tolerance(self, client: openai.OpenAI) -> None:
# Use a timestamp that's older than default tolerance but within custom tolerance
old_timestamp = TEST_TIMESTAMP - 400 # 6 minutes 40 seconds ago from test timestamp
headers = create_test_headers(timestamp=old_timestamp, signature="v1,dummy_signature")
# Should fail with default tolerance
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too old"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
# Should also fail with custom tolerance of 10 minutes (signature won't match)
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET, tolerance=600)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_recent_timestamp_succeeds(self, client: openai.OpenAI) -> None:
# Use a recent timestamp with dummy signature
headers = create_test_headers(signature="v1,dummy_signature")
# Should fail on signature verification (not timestamp validation)
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_multiple_signatures_one_valid(self, client: openai.OpenAI) -> None:
# Test multiple signatures: one invalid, one valid
multiple_signatures = f"v1,invalid_signature {TEST_SIGNATURE}"
headers = create_test_headers(signature=multiple_signatures)
# Should not raise when at least one signature is valid
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
def test_verify_signature_multiple_signatures_all_invalid(self, client: openai.OpenAI) -> None:
# Test multiple invalid signatures
multiple_invalid_signatures = "v1,invalid_signature1 v1,invalid_signature2"
headers = create_test_headers(signature=multiple_invalid_signatures)
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
class TestAsyncWebhooks:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
async def test_unwrap_with_secret(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
unwrapped = async_client.webhooks.unwrap(TEST_PAYLOAD, headers, secret=TEST_SECRET)
assert unwrapped.id == "evt_685c059ae3a481909bdc86819b066fb6"
assert unwrapped.created_at == 1750861210
@parametrize
async def test_unwrap_without_secret(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
with pytest.raises(ValueError, match="The webhook secret must either be set"):
async_client.webhooks.unwrap(TEST_PAYLOAD, headers)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
async def test_verify_signature_valid(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
# Should not raise - this is a truly valid signature for this timestamp
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
async def test_verify_signature_invalid_secret_format(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
with pytest.raises(ValueError, match="The webhook secret must either be set"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=None)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
async def test_verify_signature_invalid(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret="invalid_secret")
@parametrize
async def test_verify_signature_missing_webhook_signature_header(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
del headers["webhook-signature"]
with pytest.raises(ValueError, match="Could not find webhook-signature header"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
async def test_verify_signature_missing_webhook_timestamp_header(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
del headers["webhook-timestamp"]
with pytest.raises(ValueError, match="Could not find webhook-timestamp header"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@parametrize
async def test_verify_signature_missing_webhook_id_header(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
del headers["webhook-id"]
with pytest.raises(ValueError, match="Could not find webhook-id header"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
async def test_verify_signature_payload_bytes(self, async_client: openai.AsyncOpenAI) -> None:
headers = create_test_headers()
async_client.webhooks.verify_signature(TEST_PAYLOAD.encode("utf-8"), headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
async def test_unwrap_with_client_secret(self) -> None:
test_async_client = openai.AsyncOpenAI(base_url=base_url, api_key="test-api-key", webhook_secret=TEST_SECRET)
headers = create_test_headers()
unwrapped = test_async_client.webhooks.unwrap(TEST_PAYLOAD, headers)
assert unwrapped.id == "evt_685c059ae3a481909bdc86819b066fb6"
assert unwrapped.created_at == 1750861210
@parametrize
async def test_verify_signature_timestamp_too_old(self, async_client: openai.AsyncOpenAI) -> None:
# Use a timestamp that's older than 5 minutes from our test timestamp
old_timestamp = TEST_TIMESTAMP - 400 # 6 minutes 40 seconds ago
headers = create_test_headers(timestamp=old_timestamp, signature="v1,dummy_signature")
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too old"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
async def test_verify_signature_timestamp_too_new(self, async_client: openai.AsyncOpenAI) -> None:
# Use a timestamp that's in the future beyond tolerance from our test timestamp
future_timestamp = TEST_TIMESTAMP + 400 # 6 minutes 40 seconds in the future
headers = create_test_headers(timestamp=future_timestamp, signature="v1,dummy_signature")
with pytest.raises(InvalidWebhookSignatureError, match="Webhook timestamp is too new"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
async def test_verify_signature_multiple_signatures_one_valid(self, async_client: openai.AsyncOpenAI) -> None:
# Test multiple signatures: one invalid, one valid
multiple_signatures = f"v1,invalid_signature {TEST_SIGNATURE}"
headers = create_test_headers(signature=multiple_signatures)
# Should not raise when at least one signature is valid
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
@mock.patch("time.time", mock.MagicMock(return_value=TEST_TIMESTAMP))
@parametrize
async def test_verify_signature_multiple_signatures_all_invalid(self, async_client: openai.AsyncOpenAI) -> None:
# Test multiple invalid signatures
multiple_invalid_signatures = "v1,invalid_signature1 v1,invalid_signature2"
headers = create_test_headers(signature=multiple_invalid_signatures)
with pytest.raises(InvalidWebhookSignatureError, match="The given webhook signature does not match"):
async_client.webhooks.verify_signature(TEST_PAYLOAD, headers, secret=TEST_SECRET)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/test_webhooks.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/types/responses/response_input_item.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, List, Union, Optional
from typing_extensions import Literal, Annotated, TypeAlias
from ..._utils import PropertyInfo
from ..._models import BaseModel
from .local_environment import LocalEnvironment
from .easy_input_message import EasyInputMessage
from .container_reference import ContainerReference
from .response_output_message import ResponseOutputMessage
from .response_reasoning_item import ResponseReasoningItem
from .response_custom_tool_call import ResponseCustomToolCall
from .response_computer_tool_call import ResponseComputerToolCall
from .response_function_tool_call import ResponseFunctionToolCall
from .response_function_web_search import ResponseFunctionWebSearch
from .response_compaction_item_param import ResponseCompactionItemParam
from .response_file_search_tool_call import ResponseFileSearchToolCall
from .response_custom_tool_call_output import ResponseCustomToolCallOutput
from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
from .response_input_message_content_list import ResponseInputMessageContentList
from .response_function_call_output_item_list import ResponseFunctionCallOutputItemList
from .response_function_shell_call_output_content import ResponseFunctionShellCallOutputContent
from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot
__all__ = [
"ResponseInputItem",
"Message",
"ComputerCallOutput",
"ComputerCallOutputAcknowledgedSafetyCheck",
"FunctionCallOutput",
"ImageGenerationCall",
"LocalShellCall",
"LocalShellCallAction",
"LocalShellCallOutput",
"ShellCall",
"ShellCallAction",
"ShellCallEnvironment",
"ShellCallOutput",
"ApplyPatchCall",
"ApplyPatchCallOperation",
"ApplyPatchCallOperationCreateFile",
"ApplyPatchCallOperationDeleteFile",
"ApplyPatchCallOperationUpdateFile",
"ApplyPatchCallOutput",
"McpListTools",
"McpListToolsTool",
"McpApprovalRequest",
"McpApprovalResponse",
"McpCall",
"ItemReference",
]
class Message(BaseModel):
"""
A message input to the model with a role indicating instruction following
hierarchy. Instructions given with the `developer` or `system` role take
precedence over instructions given with the `user` role.
"""
content: ResponseInputMessageContentList
"""
A list of one or many input items to the model, containing different content
types.
"""
role: Literal["user", "system", "developer"]
"""The role of the message input. One of `user`, `system`, or `developer`."""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
type: Optional[Literal["message"]] = None
"""The type of the message input. Always set to `message`."""
class ComputerCallOutputAcknowledgedSafetyCheck(BaseModel):
"""A pending safety check for the computer call."""
id: str
"""The ID of the pending safety check."""
code: Optional[str] = None
"""The type of the pending safety check."""
message: Optional[str] = None
"""Details about the pending safety check."""
class ComputerCallOutput(BaseModel):
"""The output of a computer tool call."""
call_id: str
"""The ID of the computer tool call that produced the output."""
output: ResponseComputerToolCallOutputScreenshot
"""A computer screenshot image used with the computer use tool."""
type: Literal["computer_call_output"]
"""The type of the computer tool call output. Always `computer_call_output`."""
id: Optional[str] = None
"""The ID of the computer tool call output."""
acknowledged_safety_checks: Optional[List[ComputerCallOutputAcknowledgedSafetyCheck]] = None
"""
The safety checks reported by the API that have been acknowledged by the
developer.
"""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the message input.
One of `in_progress`, `completed`, or `incomplete`. Populated when input items
are returned via API.
"""
class FunctionCallOutput(BaseModel):
"""The output of a function tool call."""
call_id: str
"""The unique ID of the function tool call generated by the model."""
output: Union[str, ResponseFunctionCallOutputItemList]
"""Text, image, or file output of the function tool call."""
type: Literal["function_call_output"]
"""The type of the function tool call output. Always `function_call_output`."""
id: Optional[str] = None
"""The unique ID of the function tool call output.
Populated when this item is returned via API.
"""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
class ImageGenerationCall(BaseModel):
"""An image generation request made by the model."""
id: str
"""The unique ID of the image generation call."""
result: Optional[str] = None
"""The generated image encoded in base64."""
status: Literal["in_progress", "completed", "generating", "failed"]
"""The status of the image generation call."""
type: Literal["image_generation_call"]
"""The type of the image generation call. Always `image_generation_call`."""
class LocalShellCallAction(BaseModel):
"""Execute a shell command on the server."""
command: List[str]
"""The command to run."""
env: Dict[str, str]
"""Environment variables to set for the command."""
type: Literal["exec"]
"""The type of the local shell action. Always `exec`."""
timeout_ms: Optional[int] = None
"""Optional timeout in milliseconds for the command."""
user: Optional[str] = None
"""Optional user to run the command as."""
working_directory: Optional[str] = None
"""Optional working directory to run the command in."""
class LocalShellCall(BaseModel):
"""A tool call to run a command on the local shell."""
id: str
"""The unique ID of the local shell call."""
action: LocalShellCallAction
"""Execute a shell command on the server."""
call_id: str
"""The unique ID of the local shell tool call generated by the model."""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the local shell call."""
type: Literal["local_shell_call"]
"""The type of the local shell call. Always `local_shell_call`."""
class LocalShellCallOutput(BaseModel):
"""The output of a local shell tool call."""
id: str
"""The unique ID of the local shell tool call generated by the model."""
output: str
"""A JSON string of the output of the local shell tool call."""
type: Literal["local_shell_call_output"]
"""The type of the local shell tool call output. Always `local_shell_call_output`."""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
class ShellCallAction(BaseModel):
"""The shell commands and limits that describe how to run the tool call."""
commands: List[str]
"""Ordered shell commands for the execution environment to run."""
max_output_length: Optional[int] = None
"""
Maximum number of UTF-8 characters to capture from combined stdout and stderr
output.
"""
timeout_ms: Optional[int] = None
"""Maximum wall-clock time in milliseconds to allow the shell commands to run."""
ShellCallEnvironment: TypeAlias = Annotated[
Union[LocalEnvironment, ContainerReference, None], PropertyInfo(discriminator="type")
]
class ShellCall(BaseModel):
"""A tool representing a request to execute one or more shell commands."""
action: ShellCallAction
"""The shell commands and limits that describe how to run the tool call."""
call_id: str
"""The unique ID of the shell tool call generated by the model."""
type: Literal["shell_call"]
"""The type of the item. Always `shell_call`."""
id: Optional[str] = None
"""The unique ID of the shell tool call.
Populated when this item is returned via API.
"""
environment: Optional[ShellCallEnvironment] = None
"""The environment to execute the shell commands in."""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the shell call.
One of `in_progress`, `completed`, or `incomplete`.
"""
class ShellCallOutput(BaseModel):
"""The streamed output items emitted by a shell tool call."""
call_id: str
"""The unique ID of the shell tool call generated by the model."""
output: List[ResponseFunctionShellCallOutputContent]
"""
Captured chunks of stdout and stderr output, along with their associated
outcomes.
"""
type: Literal["shell_call_output"]
"""The type of the item. Always `shell_call_output`."""
id: Optional[str] = None
"""The unique ID of the shell tool call output.
Populated when this item is returned via API.
"""
max_output_length: Optional[int] = None
"""
The maximum number of UTF-8 characters captured for this shell call's combined
output.
"""
status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
"""The status of the shell call output."""
class ApplyPatchCallOperationCreateFile(BaseModel):
"""Instruction for creating a new file via the apply_patch tool."""
diff: str
"""Unified diff content to apply when creating the file."""
path: str
"""Path of the file to create relative to the workspace root."""
type: Literal["create_file"]
"""The operation type. Always `create_file`."""
class ApplyPatchCallOperationDeleteFile(BaseModel):
"""Instruction for deleting an existing file via the apply_patch tool."""
path: str
"""Path of the file to delete relative to the workspace root."""
type: Literal["delete_file"]
"""The operation type. Always `delete_file`."""
class ApplyPatchCallOperationUpdateFile(BaseModel):
"""Instruction for updating an existing file via the apply_patch tool."""
diff: str
"""Unified diff content to apply to the existing file."""
path: str
"""Path of the file to update relative to the workspace root."""
type: Literal["update_file"]
"""The operation type. Always `update_file`."""
ApplyPatchCallOperation: TypeAlias = Annotated[
Union[ApplyPatchCallOperationCreateFile, ApplyPatchCallOperationDeleteFile, ApplyPatchCallOperationUpdateFile],
PropertyInfo(discriminator="type"),
]
class ApplyPatchCall(BaseModel):
"""
A tool call representing a request to create, delete, or update files using diff patches.
"""
call_id: str
"""The unique ID of the apply patch tool call generated by the model."""
operation: ApplyPatchCallOperation
"""
The specific create, delete, or update instruction for the apply_patch tool
call.
"""
status: Literal["in_progress", "completed"]
"""The status of the apply patch tool call. One of `in_progress` or `completed`."""
type: Literal["apply_patch_call"]
"""The type of the item. Always `apply_patch_call`."""
id: Optional[str] = None
"""The unique ID of the apply patch tool call.
Populated when this item is returned via API.
"""
class ApplyPatchCallOutput(BaseModel):
"""The streamed output emitted by an apply patch tool call."""
call_id: str
"""The unique ID of the apply patch tool call generated by the model."""
status: Literal["completed", "failed"]
"""The status of the apply patch tool call output. One of `completed` or `failed`."""
type: Literal["apply_patch_call_output"]
"""The type of the item. Always `apply_patch_call_output`."""
id: Optional[str] = None
"""The unique ID of the apply patch tool call output.
Populated when this item is returned via API.
"""
output: Optional[str] = None
"""
Optional human-readable log text from the apply patch tool (e.g., patch results
or errors).
"""
class McpListToolsTool(BaseModel):
"""A tool available on an MCP server."""
input_schema: object
"""The JSON schema describing the tool's input."""
name: str
"""The name of the tool."""
annotations: Optional[object] = None
"""Additional annotations about the tool."""
description: Optional[str] = None
"""The description of the tool."""
class McpListTools(BaseModel):
"""A list of tools available on an MCP server."""
id: str
"""The unique ID of the list."""
server_label: str
"""The label of the MCP server."""
tools: List[McpListToolsTool]
"""The tools available on the server."""
type: Literal["mcp_list_tools"]
"""The type of the item. Always `mcp_list_tools`."""
error: Optional[str] = None
"""Error message if the server could not list tools."""
class McpApprovalRequest(BaseModel):
"""A request for human approval of a tool invocation."""
id: str
"""The unique ID of the approval request."""
arguments: str
"""A JSON string of arguments for the tool."""
name: str
"""The name of the tool to run."""
server_label: str
"""The label of the MCP server making the request."""
type: Literal["mcp_approval_request"]
"""The type of the item. Always `mcp_approval_request`."""
class McpApprovalResponse(BaseModel):
"""A response to an MCP approval request."""
approval_request_id: str
"""The ID of the approval request being answered."""
approve: bool
"""Whether the request was approved."""
type: Literal["mcp_approval_response"]
"""The type of the item. Always `mcp_approval_response`."""
id: Optional[str] = None
"""The unique ID of the approval response"""
reason: Optional[str] = None
"""Optional reason for the decision."""
class McpCall(BaseModel):
"""An invocation of a tool on an MCP server."""
id: str
"""The unique ID of the tool call."""
arguments: str
"""A JSON string of the arguments passed to the tool."""
name: str
"""The name of the tool that was run."""
server_label: str
"""The label of the MCP server running the tool."""
type: Literal["mcp_call"]
"""The type of the item. Always `mcp_call`."""
approval_request_id: Optional[str] = None
"""
Unique identifier for the MCP tool call approval request. Include this value in
a subsequent `mcp_approval_response` input to approve or reject the
corresponding tool call.
"""
error: Optional[str] = None
"""The error from the tool call, if any."""
output: Optional[str] = None
"""The output from the tool call."""
status: Optional[Literal["in_progress", "completed", "incomplete", "calling", "failed"]] = None
"""The status of the tool call.
One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`.
"""
class ItemReference(BaseModel):
"""An internal identifier for an item to reference."""
id: str
"""The ID of the item to reference."""
type: Optional[Literal["item_reference"]] = None
"""The type of item to reference. Always `item_reference`."""
ResponseInputItem: TypeAlias = Annotated[
Union[
EasyInputMessage,
Message,
ResponseOutputMessage,
ResponseFileSearchToolCall,
ResponseComputerToolCall,
ComputerCallOutput,
ResponseFunctionWebSearch,
ResponseFunctionToolCall,
FunctionCallOutput,
ResponseReasoningItem,
ResponseCompactionItemParam,
ImageGenerationCall,
ResponseCodeInterpreterToolCall,
LocalShellCall,
LocalShellCallOutput,
ShellCall,
ShellCallOutput,
ApplyPatchCall,
ApplyPatchCallOutput,
McpListTools,
McpApprovalRequest,
McpApprovalResponse,
McpCall,
ResponseCustomToolCallOutput,
ResponseCustomToolCall,
ItemReference,
],
PropertyInfo(discriminator="type"),
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_input_item.py",
"license": "Apache License 2.0",
"lines": 385,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_prompt.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, Union, Optional
from typing_extensions import TypeAlias
from ..._models import BaseModel
from .response_input_file import ResponseInputFile
from .response_input_text import ResponseInputText
from .response_input_image import ResponseInputImage
__all__ = ["ResponsePrompt", "Variables"]
Variables: TypeAlias = Union[str, ResponseInputText, ResponseInputImage, ResponseInputFile]
class ResponsePrompt(BaseModel):
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
id: str
"""The unique identifier of the prompt template to use."""
variables: Optional[Dict[str, Variables]] = None
"""Optional map of values to substitute in for variables in your prompt.
The substitution values can either be strings, or other Response input types
like images or files.
"""
version: Optional[str] = None
"""Optional version of the prompt template."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_prompt.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_prompt_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Union, Optional
from typing_extensions import Required, TypeAlias, TypedDict
from .response_input_file_param import ResponseInputFileParam
from .response_input_text_param import ResponseInputTextParam
from .response_input_image_param import ResponseInputImageParam
__all__ = ["ResponsePromptParam", "Variables"]
Variables: TypeAlias = Union[str, ResponseInputTextParam, ResponseInputImageParam, ResponseInputFileParam]
class ResponsePromptParam(TypedDict, total=False):
"""
Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""
id: Required[str]
"""The unique identifier of the prompt template to use."""
variables: Optional[Dict[str, Variables]]
"""Optional map of values to substitute in for variables in your prompt.
The substitution values can either be strings, or other Response input types
like images or files.
"""
version: Optional[str]
"""Optional version of the prompt template."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_prompt_param.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:examples/responses/background.py | from typing import List
import rich
from pydantic import BaseModel
from openai import OpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
client = OpenAI()
id = None
with client.responses.create(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
background=True,
stream=True,
) as stream:
for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break
print("Interrupted. Continuing...")
assert id is not None
with client.responses.retrieve(
response_id=id,
stream=True,
starting_after=10,
) as stream:
for event in stream:
if "output_text" in event.type:
rich.print(event)
| {
"repo_id": "openai/openai-python",
"file_path": "examples/responses/background.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:examples/responses/background_async.py | import asyncio
from typing import List
import rich
from pydantic import BaseModel
from openai._client import AsyncOpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
async def main() -> None:
client = AsyncOpenAI()
id = None
async with await client.responses.create(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
background=True,
stream=True,
) as stream:
async for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break
print("Interrupted. Continuing...")
assert id is not None
async with await client.responses.retrieve(
response_id=id,
stream=True,
starting_after=10,
) as stream:
async for event in stream:
if "output_text" in event.type:
rich.print(event)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "openai/openai-python",
"file_path": "examples/responses/background_async.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:examples/responses/background_streaming.py | #!/usr/bin/env -S rye run python
from typing import List
import rich
from pydantic import BaseModel
from openai import OpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
client = OpenAI()
id = None
with client.responses.stream(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
text_format=MathResponse,
background=True,
) as stream:
for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break
print("Interrupted. Continuing...")
assert id is not None
with client.responses.stream(
response_id=id,
starting_after=10,
text_format=MathResponse,
) as stream:
for event in stream:
if "output_text" in event.type:
rich.print(event)
rich.print(stream.get_final_response())
| {
"repo_id": "openai/openai-python",
"file_path": "examples/responses/background_streaming.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:examples/responses/background_streaming_async.py | import asyncio
from typing import List
import rich
from pydantic import BaseModel
from openai import AsyncOpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
async def main() -> None:
client = AsyncOpenAI()
id = None
async with client.responses.stream(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
text_format=MathResponse,
background=True,
) as stream:
async for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break
print("Interrupted. Continuing...")
assert id is not None
async with client.responses.stream(
response_id=id,
starting_after=10,
text_format=MathResponse,
) as stream:
async for event in stream:
if "output_text" in event.type:
rich.print(event)
rich.print(stream.get_final_response())
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "openai/openai-python",
"file_path": "examples/responses/background_streaming_async.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/containers/containers.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Iterable
from typing_extensions import Literal
import httpx
from ... import _legacy_response
from ...types import container_list_params, container_create_params
from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from .files.files import (
Files,
AsyncFiles,
FilesWithRawResponse,
AsyncFilesWithRawResponse,
FilesWithStreamingResponse,
AsyncFilesWithStreamingResponse,
)
from ...pagination import SyncCursorPage, AsyncCursorPage
from ..._base_client import AsyncPaginator, make_request_options
from ...types.container_list_response import ContainerListResponse
from ...types.container_create_response import ContainerCreateResponse
from ...types.container_retrieve_response import ContainerRetrieveResponse
__all__ = ["Containers", "AsyncContainers"]
class Containers(SyncAPIResource):
@cached_property
def files(self) -> Files:
return Files(self._client)
@cached_property
def with_raw_response(self) -> ContainersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ContainersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ContainersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ContainersWithStreamingResponse(self)
def create(
self,
*,
name: str,
expires_after: container_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
network_policy: container_create_params.NetworkPolicy | Omit = omit,
skills: Iterable[container_create_params.Skill] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerCreateResponse:
"""
Create Container
Args:
name: Name of the container to create.
expires_after: Container expiration time in seconds relative to the 'anchor' time.
file_ids: IDs of files to copy to the container.
memory_limit: Optional memory limit for the container. Defaults to "1g".
network_policy: Network access policy for the container.
skills: An optional list of skills referenced by id or inline data.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/containers",
body=maybe_transform(
{
"name": name,
"expires_after": expires_after,
"file_ids": file_ids,
"memory_limit": memory_limit,
"network_policy": network_policy,
"skills": skills,
},
container_create_params.ContainerCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerCreateResponse,
)
def retrieve(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerRetrieveResponse:
"""
Retrieve Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerRetrieveResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
name: str | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[ContainerListResponse]:
"""List Containers
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
name: Filter results by container name.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/containers",
page=SyncCursorPage[ContainerListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"name": name,
"order": order,
},
container_list_params.ContainerListParams,
),
),
model=ContainerListResponse,
)
def delete(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class AsyncContainers(AsyncAPIResource):
@cached_property
def files(self) -> AsyncFiles:
return AsyncFiles(self._client)
@cached_property
def with_raw_response(self) -> AsyncContainersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncContainersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncContainersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncContainersWithStreamingResponse(self)
async def create(
self,
*,
name: str,
expires_after: container_create_params.ExpiresAfter | Omit = omit,
file_ids: SequenceNotStr[str] | Omit = omit,
memory_limit: Literal["1g", "4g", "16g", "64g"] | Omit = omit,
network_policy: container_create_params.NetworkPolicy | Omit = omit,
skills: Iterable[container_create_params.Skill] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerCreateResponse:
"""
Create Container
Args:
name: Name of the container to create.
expires_after: Container expiration time in seconds relative to the 'anchor' time.
file_ids: IDs of files to copy to the container.
memory_limit: Optional memory limit for the container. Defaults to "1g".
network_policy: Network access policy for the container.
skills: An optional list of skills referenced by id or inline data.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/containers",
body=await async_maybe_transform(
{
"name": name,
"expires_after": expires_after,
"file_ids": file_ids,
"memory_limit": memory_limit,
"network_policy": network_policy,
"skills": skills,
},
container_create_params.ContainerCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerCreateResponse,
)
async def retrieve(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ContainerRetrieveResponse:
"""
Retrieve Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return await self._get(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ContainerRetrieveResponse,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
name: str | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ContainerListResponse, AsyncCursorPage[ContainerListResponse]]:
"""List Containers
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
name: Filter results by container name.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/containers",
page=AsyncCursorPage[ContainerListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"name": name,
"order": order,
},
container_list_params.ContainerListParams,
),
),
model=ContainerListResponse,
)
async def delete(
self,
container_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
f"/containers/{container_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class ContainersWithRawResponse:
def __init__(self, containers: Containers) -> None:
self._containers = containers
self.create = _legacy_response.to_raw_response_wrapper(
containers.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
containers.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
containers.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> FilesWithRawResponse:
return FilesWithRawResponse(self._containers.files)
class AsyncContainersWithRawResponse:
def __init__(self, containers: AsyncContainers) -> None:
self._containers = containers
self.create = _legacy_response.async_to_raw_response_wrapper(
containers.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
containers.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
containers.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> AsyncFilesWithRawResponse:
return AsyncFilesWithRawResponse(self._containers.files)
class ContainersWithStreamingResponse:
def __init__(self, containers: Containers) -> None:
self._containers = containers
self.create = to_streamed_response_wrapper(
containers.create,
)
self.retrieve = to_streamed_response_wrapper(
containers.retrieve,
)
self.list = to_streamed_response_wrapper(
containers.list,
)
self.delete = to_streamed_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> FilesWithStreamingResponse:
return FilesWithStreamingResponse(self._containers.files)
class AsyncContainersWithStreamingResponse:
def __init__(self, containers: AsyncContainers) -> None:
self._containers = containers
self.create = async_to_streamed_response_wrapper(
containers.create,
)
self.retrieve = async_to_streamed_response_wrapper(
containers.retrieve,
)
self.list = async_to_streamed_response_wrapper(
containers.list,
)
self.delete = async_to_streamed_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> AsyncFilesWithStreamingResponse:
return AsyncFilesWithStreamingResponse(self._containers.files)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/containers/containers.py",
"license": "Apache License 2.0",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/containers/files/content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
StreamedBinaryAPIResponse,
AsyncStreamedBinaryAPIResponse,
to_custom_streamed_response_wrapper,
async_to_custom_streamed_response_wrapper,
)
from ...._base_client import make_request_options
__all__ = ["Content", "AsyncContent"]
class Content(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ContentWithStreamingResponse(self)
def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Retrieve Container File Content
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return self._get(
f"/containers/{container_id}/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class AsyncContent(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncContentWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncContentWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncContentWithStreamingResponse(self)
async def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> _legacy_response.HttpxBinaryResponseContent:
"""
Retrieve Container File Content
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
return await self._get(
f"/containers/{container_id}/files/{file_id}/content",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=_legacy_response.HttpxBinaryResponseContent,
)
class ContentWithRawResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = _legacy_response.to_raw_response_wrapper(
content.retrieve,
)
class AsyncContentWithRawResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
content.retrieve,
)
class ContentWithStreamingResponse:
def __init__(self, content: Content) -> None:
self._content = content
self.retrieve = to_custom_streamed_response_wrapper(
content.retrieve,
StreamedBinaryAPIResponse,
)
class AsyncContentWithStreamingResponse:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = async_to_custom_streamed_response_wrapper(
content.retrieve,
AsyncStreamedBinaryAPIResponse,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/containers/files/content.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/resources/containers/files/files.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Mapping, cast
from typing_extensions import Literal
import httpx
from .... import _legacy_response
from .content import (
Content,
AsyncContent,
ContentWithRawResponse,
AsyncContentWithRawResponse,
ContentWithStreamingResponse,
AsyncContentWithStreamingResponse,
)
from ...._types import Body, Omit, Query, Headers, NoneType, NotGiven, FileTypes, omit, not_given
from ...._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ....pagination import SyncCursorPage, AsyncCursorPage
from ...._base_client import AsyncPaginator, make_request_options
from ....types.containers import file_list_params, file_create_params
from ....types.containers.file_list_response import FileListResponse
from ....types.containers.file_create_response import FileCreateResponse
from ....types.containers.file_retrieve_response import FileRetrieveResponse
__all__ = ["Files", "AsyncFiles"]
class Files(SyncAPIResource):
@cached_property
def content(self) -> Content:
return Content(self._client)
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return FilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> FilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return FilesWithStreamingResponse(self)
def create(
self,
container_id: str,
*,
file: FileTypes | Omit = omit,
file_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileCreateResponse:
"""
Create a Container File
You can send either a multipart/form-data request with the raw file content, or
a JSON request with a file ID.
Args:
file: The File object (not file name) to be uploaded.
file_id: Name of the file to create.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
body = deepcopy_minimal(
{
"file": file,
"file_id": file_id,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
f"/containers/{container_id}/files",
body=maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileCreateResponse,
)
def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileRetrieveResponse:
"""
Retrieve Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return self._get(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileRetrieveResponse,
)
def list(
self,
container_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FileListResponse]:
"""List Container files
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get_api_list(
f"/containers/{container_id}/files",
page=SyncCursorPage[FileListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
model=FileListResponse,
)
def delete(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._delete(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class AsyncFiles(AsyncAPIResource):
@cached_property
def content(self) -> AsyncContent:
return AsyncContent(self._client)
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFilesWithStreamingResponse(self)
async def create(
self,
container_id: str,
*,
file: FileTypes | Omit = omit,
file_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileCreateResponse:
"""
Create a Container File
You can send either a multipart/form-data request with the raw file content, or
a JSON request with a file ID.
Args:
file: The File object (not file name) to be uploaded.
file_id: Name of the file to create.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
body = deepcopy_minimal(
{
"file": file,
"file_id": file_id,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
if files:
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
f"/containers/{container_id}/files",
body=await async_maybe_transform(body, file_create_params.FileCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileCreateResponse,
)
async def retrieve(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileRetrieveResponse:
"""
Retrieve Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
return await self._get(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileRetrieveResponse,
)
def list(
self,
container_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FileListResponse, AsyncCursorPage[FileListResponse]]:
"""List Container files
Args:
after: A cursor for use in pagination.
`after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
return self._get_api_list(
f"/containers/{container_id}/files",
page=AsyncCursorPage[FileListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
},
file_list_params.FileListParams,
),
),
model=FileListResponse,
)
async def delete(
self,
file_id: str,
*,
container_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Container File
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not container_id:
raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._delete(
f"/containers/{container_id}/files/{file_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=NoneType,
)
class FilesWithRawResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = _legacy_response.to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> ContentWithRawResponse:
return ContentWithRawResponse(self._files.content)
class AsyncFilesWithRawResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = _legacy_response.async_to_raw_response_wrapper(
files.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
files.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
files.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> AsyncContentWithRawResponse:
return AsyncContentWithRawResponse(self._files.content)
class FilesWithStreamingResponse:
def __init__(self, files: Files) -> None:
self._files = files
self.create = to_streamed_response_wrapper(
files.create,
)
self.retrieve = to_streamed_response_wrapper(
files.retrieve,
)
self.list = to_streamed_response_wrapper(
files.list,
)
self.delete = to_streamed_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> ContentWithStreamingResponse:
return ContentWithStreamingResponse(self._files.content)
class AsyncFilesWithStreamingResponse:
def __init__(self, files: AsyncFiles) -> None:
self._files = files
self.create = async_to_streamed_response_wrapper(
files.create,
)
self.retrieve = async_to_streamed_response_wrapper(
files.retrieve,
)
self.list = async_to_streamed_response_wrapper(
files.list,
)
self.delete = async_to_streamed_response_wrapper(
files.delete,
)
@cached_property
def content(self) -> AsyncContentWithStreamingResponse:
return AsyncContentWithStreamingResponse(self._files.content)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/containers/files/files.py",
"license": "Apache License 2.0",
"lines": 460,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/container_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from .._types import SequenceNotStr
from .responses.inline_skill_param import InlineSkillParam
from .responses.skill_reference_param import SkillReferenceParam
from .responses.container_network_policy_disabled_param import ContainerNetworkPolicyDisabledParam
from .responses.container_network_policy_allowlist_param import ContainerNetworkPolicyAllowlistParam
__all__ = ["ContainerCreateParams", "ExpiresAfter", "NetworkPolicy", "Skill"]
class ContainerCreateParams(TypedDict, total=False):
name: Required[str]
"""Name of the container to create."""
expires_after: ExpiresAfter
"""Container expiration time in seconds relative to the 'anchor' time."""
file_ids: SequenceNotStr[str]
"""IDs of files to copy to the container."""
memory_limit: Literal["1g", "4g", "16g", "64g"]
"""Optional memory limit for the container. Defaults to "1g"."""
network_policy: NetworkPolicy
"""Network access policy for the container."""
skills: Iterable[Skill]
"""An optional list of skills referenced by id or inline data."""
class ExpiresAfter(TypedDict, total=False):
"""Container expiration time in seconds relative to the 'anchor' time."""
anchor: Required[Literal["last_active_at"]]
"""Time anchor for the expiration time.
Currently only 'last_active_at' is supported.
"""
minutes: Required[int]
NetworkPolicy: TypeAlias = Union[ContainerNetworkPolicyDisabledParam, ContainerNetworkPolicyAllowlistParam]
Skill: TypeAlias = Union[SkillReferenceParam, InlineSkillParam]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/container_create_params.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/container_create_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["ContainerCreateResponse", "ExpiresAfter", "NetworkPolicy"]
class ExpiresAfter(BaseModel):
"""
The container will expire after this time period.
The anchor is the reference point for the expiration.
The minutes is the number of minutes after the anchor before the container expires.
"""
anchor: Optional[Literal["last_active_at"]] = None
"""The reference point for the expiration."""
minutes: Optional[int] = None
"""The number of minutes after the anchor before the container expires."""
class NetworkPolicy(BaseModel):
"""Network access policy for the container."""
type: Literal["allowlist", "disabled"]
"""The network policy mode."""
allowed_domains: Optional[List[str]] = None
"""Allowed outbound domains when `type` is `allowlist`."""
class ContainerCreateResponse(BaseModel):
id: str
"""Unique identifier for the container."""
created_at: int
"""Unix timestamp (in seconds) when the container was created."""
name: str
"""Name of the container."""
object: str
"""The type of this object."""
status: str
"""Status of the container (e.g., active, deleted)."""
expires_after: Optional[ExpiresAfter] = None
"""
The container will expire after this time period. The anchor is the reference
point for the expiration. The minutes is the number of minutes after the anchor
before the container expires.
"""
last_active_at: Optional[int] = None
"""Unix timestamp (in seconds) when the container was last active."""
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit configured for the container."""
network_policy: Optional[NetworkPolicy] = None
"""Network access policy for the container."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/container_create_response.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/container_list_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["ContainerListParams"]
class ContainerListParams(TypedDict, total=False):
after: str
"""A cursor for use in pagination.
`after` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, ending with obj_foo, your
subsequent call can include after=obj_foo in order to fetch the next page of the
list.
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
name: str
"""Filter results by container name."""
order: Literal["asc", "desc"]
"""Sort order by the `created_at` timestamp of the objects.
`asc` for ascending order and `desc` for descending order.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/container_list_params.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/container_list_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["ContainerListResponse", "ExpiresAfter", "NetworkPolicy"]
class ExpiresAfter(BaseModel):
"""
The container will expire after this time period.
The anchor is the reference point for the expiration.
The minutes is the number of minutes after the anchor before the container expires.
"""
anchor: Optional[Literal["last_active_at"]] = None
"""The reference point for the expiration."""
minutes: Optional[int] = None
"""The number of minutes after the anchor before the container expires."""
class NetworkPolicy(BaseModel):
"""Network access policy for the container."""
type: Literal["allowlist", "disabled"]
"""The network policy mode."""
allowed_domains: Optional[List[str]] = None
"""Allowed outbound domains when `type` is `allowlist`."""
class ContainerListResponse(BaseModel):
id: str
"""Unique identifier for the container."""
created_at: int
"""Unix timestamp (in seconds) when the container was created."""
name: str
"""Name of the container."""
object: str
"""The type of this object."""
status: str
"""Status of the container (e.g., active, deleted)."""
expires_after: Optional[ExpiresAfter] = None
"""
The container will expire after this time period. The anchor is the reference
point for the expiration. The minutes is the number of minutes after the anchor
before the container expires.
"""
last_active_at: Optional[int] = None
"""Unix timestamp (in seconds) when the container was last active."""
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit configured for the container."""
network_policy: Optional[NetworkPolicy] = None
"""Network access policy for the container."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/container_list_response.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/container_retrieve_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
from typing_extensions import Literal
from .._models import BaseModel
__all__ = ["ContainerRetrieveResponse", "ExpiresAfter", "NetworkPolicy"]
class ExpiresAfter(BaseModel):
"""
The container will expire after this time period.
The anchor is the reference point for the expiration.
The minutes is the number of minutes after the anchor before the container expires.
"""
anchor: Optional[Literal["last_active_at"]] = None
"""The reference point for the expiration."""
minutes: Optional[int] = None
"""The number of minutes after the anchor before the container expires."""
class NetworkPolicy(BaseModel):
"""Network access policy for the container."""
type: Literal["allowlist", "disabled"]
"""The network policy mode."""
allowed_domains: Optional[List[str]] = None
"""Allowed outbound domains when `type` is `allowlist`."""
class ContainerRetrieveResponse(BaseModel):
id: str
"""Unique identifier for the container."""
created_at: int
"""Unix timestamp (in seconds) when the container was created."""
name: str
"""Name of the container."""
object: str
"""The type of this object."""
status: str
"""Status of the container (e.g., active, deleted)."""
expires_after: Optional[ExpiresAfter] = None
"""
The container will expire after this time period. The anchor is the reference
point for the expiration. The minutes is the number of minutes after the anchor
before the container expires.
"""
last_active_at: Optional[int] = None
"""Unix timestamp (in seconds) when the container was last active."""
memory_limit: Optional[Literal["1g", "4g", "16g", "64g"]] = None
"""The memory limit configured for the container."""
network_policy: Optional[NetworkPolicy] = None
"""Network access policy for the container."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/container_retrieve_response.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/containers/file_create_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
from ..._types import FileTypes
__all__ = ["FileCreateParams"]
class FileCreateParams(TypedDict, total=False):
file: FileTypes
"""The File object (not file name) to be uploaded."""
file_id: str
"""Name of the file to create."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/containers/file_create_params.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/containers/file_create_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["FileCreateResponse"]
class FileCreateResponse(BaseModel):
id: str
"""Unique identifier for the file."""
bytes: int
"""Size of the file in bytes."""
container_id: str
"""The container this file belongs to."""
created_at: int
"""Unix timestamp (in seconds) when the file was created."""
object: Literal["container.file"]
"""The type of this object (`container.file`)."""
path: str
"""Path of the file in the container."""
source: str
"""Source of the file (e.g., `user`, `assistant`)."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/containers/file_create_response.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/containers/file_list_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import Literal, TypedDict
__all__ = ["FileListParams"]
class FileListParams(TypedDict, total=False):
after: str
"""A cursor for use in pagination.
`after` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, ending with obj_foo, your
subsequent call can include after=obj_foo in order to fetch the next page of the
list.
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""Sort order by the `created_at` timestamp of the objects.
`asc` for ascending order and `desc` for descending order.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/containers/file_list_params.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/containers/file_list_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["FileListResponse"]
class FileListResponse(BaseModel):
id: str
"""Unique identifier for the file."""
bytes: int
"""Size of the file in bytes."""
container_id: str
"""The container this file belongs to."""
created_at: int
"""Unix timestamp (in seconds) when the file was created."""
object: Literal["container.file"]
"""The type of this object (`container.file`)."""
path: str
"""Path of the file in the container."""
source: str
"""Source of the file (e.g., `user`, `assistant`)."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/containers/file_list_response.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/containers/file_retrieve_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["FileRetrieveResponse"]
class FileRetrieveResponse(BaseModel):
id: str
"""Unique identifier for the file."""
bytes: int
"""Size of the file in bytes."""
container_id: str
"""The container this file belongs to."""
created_at: int
"""Unix timestamp (in seconds) when the file was created."""
object: Literal["container.file"]
"""The type of this object (`container.file`)."""
path: str
"""Path of the file in the container."""
source: str
"""Source of the file (e.g., `user`, `assistant`)."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/containers/file_retrieve_response.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:tests/api_resources/containers/files/test_content.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import httpx
import pytest
from respx import MockRouter
import openai._legacy_response as _legacy_response
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
# pyright: reportDeprecated=false
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestContent:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
content = client.containers.files.content.retrieve(
file_id="file_id",
container_id="container_id",
)
assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
assert content.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
response = client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_streaming_response_retrieve(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
with client.containers.files.content.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.containers.files.content.with_raw_response.retrieve(
file_id="",
container_id="container_id",
)
class TestAsyncContent:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
content = await async_client.containers.files.content.retrieve(
file_id="file_id",
container_id="container_id",
)
assert isinstance(content, _legacy_response.HttpxBinaryResponseContent)
assert content.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
response = await async_client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, content, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/containers/container_id/files/file_id/content").mock(
return_value=httpx.Response(200, json={"foo": "bar"})
)
async with async_client.containers.files.content.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
content = await response.parse()
assert_matches_type(bytes, content, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.content.with_raw_response.retrieve(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.containers.files.content.with_raw_response.retrieve(
file_id="",
container_id="container_id",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/containers/files/test_content.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/containers/test_files.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.pagination import SyncCursorPage, AsyncCursorPage
from openai.types.containers import (
FileListResponse,
FileCreateResponse,
FileRetrieveResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestFiles:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
file = client.containers.files.create(
container_id="container_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
file = client.containers.files.create(
container_id="container_id",
file=b"raw file contents",
file_id="file_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.containers.files.with_raw_response.create(
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.containers.files.with_streaming_response.create(
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileCreateResponse, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_create(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.files.with_raw_response.create(
container_id="",
)
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
file = client.containers.files.retrieve(
file_id="file_id",
container_id="container_id",
)
assert_matches_type(FileRetrieveResponse, file, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.containers.files.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileRetrieveResponse, file, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.containers.files.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileRetrieveResponse, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.files.with_raw_response.retrieve(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.containers.files.with_raw_response.retrieve(
file_id="",
container_id="container_id",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
file = client.containers.files.list(
container_id="container_id",
)
assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
file = client.containers.files.list(
container_id="container_id",
after="after",
limit=0,
order="asc",
)
assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.containers.files.with_raw_response.list(
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.containers.files.with_streaming_response.list(
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.files.with_raw_response.list(
container_id="",
)
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
file = client.containers.files.delete(
file_id="file_id",
container_id="container_id",
)
assert file is None
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.containers.files.with_raw_response.delete(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert file is None
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.containers.files.with_streaming_response.delete(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert file is None
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.files.with_raw_response.delete(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.containers.files.with_raw_response.delete(
file_id="",
container_id="container_id",
)
class TestAsyncFiles:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.create(
container_id="container_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.create(
container_id="container_id",
file=b"raw file contents",
file_id="file_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.create(
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.create(
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert_matches_type(FileCreateResponse, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.create(
container_id="",
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.retrieve(
file_id="file_id",
container_id="container_id",
)
assert_matches_type(FileRetrieveResponse, file, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileRetrieveResponse, file, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert_matches_type(FileRetrieveResponse, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.retrieve(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.containers.files.with_raw_response.retrieve(
file_id="",
container_id="container_id",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.list(
container_id="container_id",
)
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.list(
container_id="container_id",
after="after",
limit=0,
order="asc",
)
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.list(
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.list(
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.list(
container_id="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.delete(
file_id="file_id",
container_id="container_id",
)
assert file is None
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.delete(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert file is None
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.delete(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert file is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.delete(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.containers.files.with_raw_response.delete(
file_id="",
container_id="container_id",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/containers/test_files.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:tests/api_resources/test_containers.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import os
from typing import Any, cast
import pytest
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
from openai.types import (
ContainerListResponse,
ContainerCreateResponse,
ContainerRetrieveResponse,
)
from openai.pagination import SyncCursorPage, AsyncCursorPage
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
class TestContainers:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
container = client.containers.create(
name="name",
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
container = client.containers.create(
name="name",
expires_after={
"anchor": "last_active_at",
"minutes": 0,
},
file_ids=["string"],
memory_limit="1g",
network_policy={"type": "disabled"},
skills=[
{
"skill_id": "x",
"type": "skill_reference",
"version": "version",
}
],
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.create(
name="name",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.create(
name="name",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
container = client.containers.retrieve(
"container_id",
)
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.retrieve(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.retrieve(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
container = client.containers.list()
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
container = client.containers.list(
after="after",
limit=0,
name="name",
order="asc",
)
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
container = client.containers.delete(
"container_id",
)
assert container is None
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.containers.with_raw_response.delete(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert container is None
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.containers.with_streaming_response.delete(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert container is None
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
client.containers.with_raw_response.delete(
"",
)
class TestAsyncContainers:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.create(
name="name",
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.create(
name="name",
expires_after={
"anchor": "last_active_at",
"minutes": 0,
},
file_ids=["string"],
memory_limit="1g",
network_policy={"type": "disabled"},
skills=[
{
"skill_id": "x",
"type": "skill_reference",
"version": "version",
}
],
)
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.create(
name="name",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.create(
name="name",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert_matches_type(ContainerCreateResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.retrieve(
"container_id",
)
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.retrieve(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.retrieve(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.list()
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.list(
after="after",
limit=0,
name="name",
order="asc",
)
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
container = await async_client.containers.delete(
"container_id",
)
assert container is None
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.with_raw_response.delete(
"container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = response.parse()
assert container is None
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.with_streaming_response.delete(
"container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
container = await response.parse()
assert container is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.with_raw_response.delete(
"",
)
| {
"repo_id": "openai/openai-python",
"file_path": "tests/api_resources/test_containers.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
openai/openai-python:src/openai/types/responses/response_code_interpreter_tool_call_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable, Optional
from typing_extensions import Literal, Required, TypeAlias, TypedDict
__all__ = ["ResponseCodeInterpreterToolCallParam", "Output", "OutputLogs", "OutputImage"]
class OutputLogs(TypedDict, total=False):
"""The logs output from the code interpreter."""
logs: Required[str]
"""The logs output from the code interpreter."""
type: Required[Literal["logs"]]
"""The type of the output. Always `logs`."""
class OutputImage(TypedDict, total=False):
"""The image output from the code interpreter."""
type: Required[Literal["image"]]
"""The type of the output. Always `image`."""
url: Required[str]
"""The URL of the image output from the code interpreter."""
Output: TypeAlias = Union[OutputLogs, OutputImage]
class ResponseCodeInterpreterToolCallParam(TypedDict, total=False):
"""A tool call to run code."""
id: Required[str]
"""The unique ID of the code interpreter tool call."""
code: Required[Optional[str]]
"""The code to run, or null if not available."""
container_id: Required[str]
"""The ID of the container used to run the code."""
outputs: Required[Optional[Iterable[Output]]]
"""
The outputs generated by the code interpreter, such as logs or images. Can be
null if no outputs are available.
"""
status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]]
"""The status of the code interpreter tool call.
Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and
`failed`.
"""
type: Required[Literal["code_interpreter_call"]]
"""The type of the code interpreter tool call. Always `code_interpreter_call`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_code_interpreter_tool_call_param.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_image_gen_call_completed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseImageGenCallCompletedEvent"]
class ResponseImageGenCallCompletedEvent(BaseModel):
"""
Emitted when an image generation tool call has completed and the final image is available.
"""
item_id: str
"""The unique identifier of the image generation item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.image_generation_call.completed"]
"""The type of the event. Always 'response.image_generation_call.completed'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_image_gen_call_completed_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_image_gen_call_generating_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseImageGenCallGeneratingEvent"]
class ResponseImageGenCallGeneratingEvent(BaseModel):
"""
Emitted when an image generation tool call is actively generating an image (intermediate state).
"""
item_id: str
"""The unique identifier of the image generation item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of the image generation item being processed."""
type: Literal["response.image_generation_call.generating"]
"""The type of the event. Always 'response.image_generation_call.generating'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_image_gen_call_generating_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_image_gen_call_in_progress_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseImageGenCallInProgressEvent"]
class ResponseImageGenCallInProgressEvent(BaseModel):
"""Emitted when an image generation tool call is in progress."""
item_id: str
"""The unique identifier of the image generation item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of the image generation item being processed."""
type: Literal["response.image_generation_call.in_progress"]
"""The type of the event. Always 'response.image_generation_call.in_progress'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_image_gen_call_in_progress_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_image_gen_call_partial_image_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseImageGenCallPartialImageEvent"]
class ResponseImageGenCallPartialImageEvent(BaseModel):
"""Emitted when a partial image is available during image generation streaming."""
item_id: str
"""The unique identifier of the image generation item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
partial_image_b64: str
"""Base64-encoded partial image data, suitable for rendering as an image."""
partial_image_index: int
"""
0-based index for the partial image (backend is 1-based, but this is 0-based for
the user).
"""
sequence_number: int
"""The sequence number of the image generation item being processed."""
type: Literal["response.image_generation_call.partial_image"]
"""The type of the event. Always 'response.image_generation_call.partial_image'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_image_gen_call_partial_image_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_mcp_call_arguments_delta_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallArgumentsDeltaEvent"]
class ResponseMcpCallArgumentsDeltaEvent(BaseModel):
"""
Emitted when there is a delta (partial update) to the arguments of an MCP tool call.
"""
delta: str
"""
A JSON string containing the partial update to the arguments for the MCP tool
call.
"""
item_id: str
"""The unique identifier of the MCP tool call item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_call_arguments.delta"]
"""The type of the event. Always 'response.mcp_call_arguments.delta'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_call_arguments_delta_event.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_mcp_call_arguments_done_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallArgumentsDoneEvent"]
class ResponseMcpCallArgumentsDoneEvent(BaseModel):
"""Emitted when the arguments for an MCP tool call are finalized."""
arguments: str
"""A JSON string containing the finalized arguments for the MCP tool call."""
item_id: str
"""The unique identifier of the MCP tool call item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_call_arguments.done"]
"""The type of the event. Always 'response.mcp_call_arguments.done'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_call_arguments_done_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_mcp_call_completed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallCompletedEvent"]
class ResponseMcpCallCompletedEvent(BaseModel):
"""Emitted when an MCP tool call has completed successfully."""
item_id: str
"""The ID of the MCP tool call item that completed."""
output_index: int
"""The index of the output item that completed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_call.completed"]
"""The type of the event. Always 'response.mcp_call.completed'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_call_completed_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_mcp_call_failed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallFailedEvent"]
class ResponseMcpCallFailedEvent(BaseModel):
"""Emitted when an MCP tool call has failed."""
item_id: str
"""The ID of the MCP tool call item that failed."""
output_index: int
"""The index of the output item that failed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_call.failed"]
"""The type of the event. Always 'response.mcp_call.failed'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_call_failed_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_mcp_call_in_progress_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallInProgressEvent"]
class ResponseMcpCallInProgressEvent(BaseModel):
"""Emitted when an MCP tool call is in progress."""
item_id: str
"""The unique identifier of the MCP tool call item being processed."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_call.in_progress"]
"""The type of the event. Always 'response.mcp_call.in_progress'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_call_in_progress_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_mcp_list_tools_completed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpListToolsCompletedEvent"]
class ResponseMcpListToolsCompletedEvent(BaseModel):
"""Emitted when the list of available MCP tools has been successfully retrieved."""
item_id: str
"""The ID of the MCP tool call item that produced this output."""
output_index: int
"""The index of the output item that was processed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_list_tools.completed"]
"""The type of the event. Always 'response.mcp_list_tools.completed'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_list_tools_completed_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_mcp_list_tools_failed_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpListToolsFailedEvent"]
class ResponseMcpListToolsFailedEvent(BaseModel):
"""Emitted when the attempt to list available MCP tools has failed."""
item_id: str
"""The ID of the MCP tool call item that failed."""
output_index: int
"""The index of the output item that failed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_list_tools.failed"]
"""The type of the event. Always 'response.mcp_list_tools.failed'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_list_tools_failed_event.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_mcp_list_tools_in_progress_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpListToolsInProgressEvent"]
class ResponseMcpListToolsInProgressEvent(BaseModel):
"""
Emitted when the system is in the process of retrieving the list of available MCP tools.
"""
item_id: str
"""The ID of the MCP tool call item that is being processed."""
output_index: int
"""The index of the output item that is being processed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_list_tools.in_progress"]
"""The type of the event. Always 'response.mcp_list_tools.in_progress'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_mcp_list_tools_in_progress_event.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/responses/response_output_text_annotation_added_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseOutputTextAnnotationAddedEvent"]
class ResponseOutputTextAnnotationAddedEvent(BaseModel):
"""Emitted when an annotation is added to output text content."""
annotation: object
"""The annotation object being added. (See annotation schema for details.)"""
annotation_index: int
"""The index of the annotation within the content part."""
content_index: int
"""The index of the content part within the output item."""
item_id: str
"""The unique identifier of the item to which the annotation is being added."""
output_index: int
"""The index of the output item in the response's output array."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.output_text.annotation.added"]
"""The type of the event. Always 'response.output_text.annotation.added'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_output_text_annotation_added_event.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/responses/response_queued_event.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .response import Response
from ..._models import BaseModel
__all__ = ["ResponseQueuedEvent"]
class ResponseQueuedEvent(BaseModel):
"""Emitted when a response is queued and waiting to be processed."""
response: Response
"""The full response object that is queued."""
sequence_number: int
"""The sequence number for this event."""
type: Literal["response.queued"]
"""The type of the event. Always 'response.queued'."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/responses/response_queued_event.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/_utils/_resources_proxy.py | from __future__ import annotations
from typing import Any
from typing_extensions import override
from ._proxy import LazyProxy
class ResourcesProxy(LazyProxy[Any]):
"""A proxy for the `openai.resources` module.
This is used so that we can lazily import `openai.resources` only when
needed *and* so that users can just import `openai` and reference `openai.resources`
"""
@override
def __load__(self) -> Any:
import importlib
mod = importlib.import_module("openai.resources")
return mod
resources = ResourcesProxy().__as_proxied__()
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/_utils/_resources_proxy.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/fine_tuning/alpha/alpha.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from .graders import (
Graders,
AsyncGraders,
GradersWithRawResponse,
AsyncGradersWithRawResponse,
GradersWithStreamingResponse,
AsyncGradersWithStreamingResponse,
)
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
__all__ = ["Alpha", "AsyncAlpha"]
class Alpha(SyncAPIResource):
@cached_property
def graders(self) -> Graders:
return Graders(self._client)
@cached_property
def with_raw_response(self) -> AlphaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AlphaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AlphaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AlphaWithStreamingResponse(self)
class AsyncAlpha(AsyncAPIResource):
@cached_property
def graders(self) -> AsyncGraders:
return AsyncGraders(self._client)
@cached_property
def with_raw_response(self) -> AsyncAlphaWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncAlphaWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncAlphaWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncAlphaWithStreamingResponse(self)
class AlphaWithRawResponse:
def __init__(self, alpha: Alpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> GradersWithRawResponse:
return GradersWithRawResponse(self._alpha.graders)
class AsyncAlphaWithRawResponse:
def __init__(self, alpha: AsyncAlpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> AsyncGradersWithRawResponse:
return AsyncGradersWithRawResponse(self._alpha.graders)
class AlphaWithStreamingResponse:
def __init__(self, alpha: Alpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> GradersWithStreamingResponse:
return GradersWithStreamingResponse(self._alpha.graders)
class AsyncAlphaWithStreamingResponse:
def __init__(self, alpha: AsyncAlpha) -> None:
self._alpha = alpha
@cached_property
def graders(self) -> AsyncGradersWithStreamingResponse:
return AsyncGradersWithStreamingResponse(self._alpha.graders)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/fine_tuning/alpha/alpha.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/resources/fine_tuning/alpha/graders.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
import httpx
from .... import _legacy_response
from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ...._utils import maybe_transform, async_maybe_transform
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ...._base_client import make_request_options
from ....types.fine_tuning.alpha import grader_run_params, grader_validate_params
from ....types.fine_tuning.alpha.grader_run_response import GraderRunResponse
from ....types.fine_tuning.alpha.grader_validate_response import GraderValidateResponse
__all__ = ["Graders", "AsyncGraders"]
class Graders(SyncAPIResource):
@cached_property
def with_raw_response(self) -> GradersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return GradersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> GradersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return GradersWithStreamingResponse(self)
def run(
self,
*,
grader: grader_run_params.Grader,
model_sample: str,
item: object | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderRunResponse:
"""
Run a grader.
Args:
grader: The grader used for the fine-tuning job.
model_sample: The model sample to be evaluated. This value will be used to populate the
`sample` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
The `output_json` variable will be populated if the model sample is a valid JSON
string.
item: The dataset item provided to the grader. This will be used to populate the
`item` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/fine_tuning/alpha/graders/run",
body=maybe_transform(
{
"grader": grader,
"model_sample": model_sample,
"item": item,
},
grader_run_params.GraderRunParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderRunResponse,
)
def validate(
self,
*,
grader: grader_validate_params.Grader,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderValidateResponse:
"""
Validate a grader.
Args:
grader: The grader used for the fine-tuning job.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/fine_tuning/alpha/graders/validate",
body=maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderValidateResponse,
)
class AsyncGraders(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncGradersWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncGradersWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncGradersWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncGradersWithStreamingResponse(self)
async def run(
self,
*,
grader: grader_run_params.Grader,
model_sample: str,
item: object | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderRunResponse:
"""
Run a grader.
Args:
grader: The grader used for the fine-tuning job.
model_sample: The model sample to be evaluated. This value will be used to populate the
`sample` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
The `output_json` variable will be populated if the model sample is a valid JSON
string.
item: The dataset item provided to the grader. This will be used to populate the
`item` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/fine_tuning/alpha/graders/run",
body=await async_maybe_transform(
{
"grader": grader,
"model_sample": model_sample,
"item": item,
},
grader_run_params.GraderRunParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderRunResponse,
)
async def validate(
self,
*,
grader: grader_validate_params.Grader,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> GraderValidateResponse:
"""
Validate a grader.
Args:
grader: The grader used for the fine-tuning job.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
"/fine_tuning/alpha/graders/validate",
body=await async_maybe_transform({"grader": grader}, grader_validate_params.GraderValidateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GraderValidateResponse,
)
class GradersWithRawResponse:
def __init__(self, graders: Graders) -> None:
self._graders = graders
self.run = _legacy_response.to_raw_response_wrapper(
graders.run,
)
self.validate = _legacy_response.to_raw_response_wrapper(
graders.validate,
)
class AsyncGradersWithRawResponse:
def __init__(self, graders: AsyncGraders) -> None:
self._graders = graders
self.run = _legacy_response.async_to_raw_response_wrapper(
graders.run,
)
self.validate = _legacy_response.async_to_raw_response_wrapper(
graders.validate,
)
class GradersWithStreamingResponse:
def __init__(self, graders: Graders) -> None:
self._graders = graders
self.run = to_streamed_response_wrapper(
graders.run,
)
self.validate = to_streamed_response_wrapper(
graders.validate,
)
class AsyncGradersWithStreamingResponse:
def __init__(self, graders: AsyncGraders) -> None:
self._graders = graders
self.run = async_to_streamed_response_wrapper(
graders.run,
)
self.validate = async_to_streamed_response_wrapper(
graders.validate,
)
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/resources/fine_tuning/alpha/graders.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
openai/openai-python:src/openai/types/fine_tuning/alpha/grader_run_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Required, TypeAlias, TypedDict
from ...graders.multi_grader_param import MultiGraderParam
from ...graders.python_grader_param import PythonGraderParam
from ...graders.score_model_grader_param import ScoreModelGraderParam
from ...graders.string_check_grader_param import StringCheckGraderParam
from ...graders.text_similarity_grader_param import TextSimilarityGraderParam
__all__ = ["GraderRunParams", "Grader"]
class GraderRunParams(TypedDict, total=False):
grader: Required[Grader]
"""The grader used for the fine-tuning job."""
model_sample: Required[str]
"""The model sample to be evaluated.
This value will be used to populate the `sample` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
The `output_json` variable will be populated if the model sample is a valid JSON
string.
"""
item: object
"""The dataset item provided to the grader.
This will be used to populate the `item` namespace. See
[the guide](https://platform.openai.com/docs/guides/graders) for more details.
"""
Grader: TypeAlias = Union[
StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, MultiGraderParam
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/alpha/grader_run_params.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/alpha/grader_run_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Dict, Optional
from pydantic import Field as FieldInfo
from ...._models import BaseModel
__all__ = ["GraderRunResponse", "Metadata", "MetadataErrors"]
class MetadataErrors(BaseModel):
formula_parse_error: bool
invalid_variable_error: bool
api_model_grader_parse_error: bool = FieldInfo(alias="model_grader_parse_error")
api_model_grader_refusal_error: bool = FieldInfo(alias="model_grader_refusal_error")
api_model_grader_server_error: bool = FieldInfo(alias="model_grader_server_error")
api_model_grader_server_error_details: Optional[str] = FieldInfo(
alias="model_grader_server_error_details", default=None
)
other_error: bool
python_grader_runtime_error: bool
python_grader_runtime_error_details: Optional[str] = None
python_grader_server_error: bool
python_grader_server_error_type: Optional[str] = None
sample_parse_error: bool
truncated_observation_error: bool
unresponsive_reward_error: bool
class Metadata(BaseModel):
errors: MetadataErrors
execution_time: float
name: str
sampled_model_name: Optional[str] = None
scores: Dict[str, object]
token_usage: Optional[int] = None
type: str
class GraderRunResponse(BaseModel):
metadata: Metadata
api_model_grader_token_usage_per_model: Dict[str, object] = FieldInfo(alias="model_grader_token_usage_per_model")
reward: float
sub_rewards: Dict[str, object]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/alpha/grader_run_response.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/alpha/grader_validate_params.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Required, TypeAlias, TypedDict
from ...graders.multi_grader_param import MultiGraderParam
from ...graders.python_grader_param import PythonGraderParam
from ...graders.score_model_grader_param import ScoreModelGraderParam
from ...graders.string_check_grader_param import StringCheckGraderParam
from ...graders.text_similarity_grader_param import TextSimilarityGraderParam
__all__ = ["GraderValidateParams", "Grader"]
class GraderValidateParams(TypedDict, total=False):
grader: Required[Grader]
"""The grader used for the fine-tuning job."""
Grader: TypeAlias = Union[
StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, MultiGraderParam
]
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/alpha/grader_validate_params.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/alpha/grader_validate_response.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import TypeAlias
from ...._models import BaseModel
from ...graders.multi_grader import MultiGrader
from ...graders.python_grader import PythonGrader
from ...graders.score_model_grader import ScoreModelGrader
from ...graders.string_check_grader import StringCheckGrader
from ...graders.text_similarity_grader import TextSimilarityGrader
__all__ = ["GraderValidateResponse", "Grader"]
Grader: TypeAlias = Union[StringCheckGrader, TextSimilarityGrader, PythonGrader, ScoreModelGrader, MultiGrader]
class GraderValidateResponse(BaseModel):
grader: Optional[Grader] = None
"""The grader used for the fine-tuning job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/alpha/grader_validate_response.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/dpo_hyperparameters.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["DpoHyperparameters"]
class DpoHyperparameters(BaseModel):
"""The hyperparameters used for the DPO fine-tuning job."""
batch_size: Union[Literal["auto"], int, None] = None
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
beta: Union[Literal["auto"], float, None] = None
"""The beta value for the DPO method.
A higher beta value will increase the weight of the penalty between the policy
and reference model.
"""
learning_rate_multiplier: Union[Literal["auto"], float, None] = None
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int, None] = None
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/dpo_hyperparameters.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/fine_tuning/dpo_hyperparameters_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypedDict
__all__ = ["DpoHyperparametersParam"]
class DpoHyperparametersParam(TypedDict, total=False):
"""The hyperparameters used for the DPO fine-tuning job."""
batch_size: Union[Literal["auto"], int]
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
beta: Union[Literal["auto"], float]
"""The beta value for the DPO method.
A higher beta value will increase the weight of the penalty between the policy
and reference model.
"""
learning_rate_multiplier: Union[Literal["auto"], float]
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int]
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/dpo_hyperparameters_param.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/fine_tuning/dpo_method.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .dpo_hyperparameters import DpoHyperparameters
__all__ = ["DpoMethod"]
class DpoMethod(BaseModel):
"""Configuration for the DPO fine-tuning method."""
hyperparameters: Optional[DpoHyperparameters] = None
"""The hyperparameters used for the DPO fine-tuning job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/dpo_method.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/dpo_method_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
from .dpo_hyperparameters_param import DpoHyperparametersParam
__all__ = ["DpoMethodParam"]
class DpoMethodParam(TypedDict, total=False):
"""Configuration for the DPO fine-tuning method."""
hyperparameters: DpoHyperparametersParam
"""The hyperparameters used for the DPO fine-tuning job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/dpo_method_param.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/reinforcement_hyperparameters.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ReinforcementHyperparameters"]
class ReinforcementHyperparameters(BaseModel):
"""The hyperparameters used for the reinforcement fine-tuning job."""
batch_size: Union[Literal["auto"], int, None] = None
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
compute_multiplier: Union[Literal["auto"], float, None] = None
"""
Multiplier on amount of compute used for exploring search space during training.
"""
eval_interval: Union[Literal["auto"], int, None] = None
"""The number of training steps between evaluation runs."""
eval_samples: Union[Literal["auto"], int, None] = None
"""Number of evaluation samples to generate per training step."""
learning_rate_multiplier: Union[Literal["auto"], float, None] = None
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int, None] = None
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
reasoning_effort: Optional[Literal["default", "low", "medium", "high"]] = None
"""Level of reasoning effort."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/reinforcement_hyperparameters.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypedDict
__all__ = ["ReinforcementHyperparametersParam"]
class ReinforcementHyperparametersParam(TypedDict, total=False):
"""The hyperparameters used for the reinforcement fine-tuning job."""
batch_size: Union[Literal["auto"], int]
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
compute_multiplier: Union[Literal["auto"], float]
"""
Multiplier on amount of compute used for exploring search space during training.
"""
eval_interval: Union[Literal["auto"], int]
"""The number of training steps between evaluation runs."""
eval_samples: Union[Literal["auto"], int]
"""Number of evaluation samples to generate per training step."""
learning_rate_multiplier: Union[Literal["auto"], float]
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int]
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
reasoning_effort: Literal["default", "low", "medium", "high"]
"""Level of reasoning effort."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/reinforcement_hyperparameters_param.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/fine_tuning/reinforcement_method.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union, Optional
from typing_extensions import TypeAlias
from ..._models import BaseModel
from ..graders.multi_grader import MultiGrader
from ..graders.python_grader import PythonGrader
from ..graders.score_model_grader import ScoreModelGrader
from ..graders.string_check_grader import StringCheckGrader
from .reinforcement_hyperparameters import ReinforcementHyperparameters
from ..graders.text_similarity_grader import TextSimilarityGrader
__all__ = ["ReinforcementMethod", "Grader"]
Grader: TypeAlias = Union[StringCheckGrader, TextSimilarityGrader, PythonGrader, ScoreModelGrader, MultiGrader]
class ReinforcementMethod(BaseModel):
"""Configuration for the reinforcement fine-tuning method."""
grader: Grader
"""The grader used for the fine-tuning job."""
hyperparameters: Optional[ReinforcementHyperparameters] = None
"""The hyperparameters used for the reinforcement fine-tuning job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/reinforcement_method.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/reinforcement_method_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Required, TypeAlias, TypedDict
from ..graders.multi_grader_param import MultiGraderParam
from ..graders.python_grader_param import PythonGraderParam
from ..graders.score_model_grader_param import ScoreModelGraderParam
from ..graders.string_check_grader_param import StringCheckGraderParam
from .reinforcement_hyperparameters_param import ReinforcementHyperparametersParam
from ..graders.text_similarity_grader_param import TextSimilarityGraderParam
__all__ = ["ReinforcementMethodParam", "Grader"]
Grader: TypeAlias = Union[
StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, MultiGraderParam
]
class ReinforcementMethodParam(TypedDict, total=False):
"""Configuration for the reinforcement fine-tuning method."""
grader: Required[Grader]
"""The grader used for the fine-tuning job."""
hyperparameters: ReinforcementHyperparametersParam
"""The hyperparameters used for the reinforcement fine-tuning job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/reinforcement_method_param.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/supervised_hyperparameters.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["SupervisedHyperparameters"]
class SupervisedHyperparameters(BaseModel):
"""The hyperparameters used for the fine-tuning job."""
batch_size: Union[Literal["auto"], int, None] = None
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
learning_rate_multiplier: Union[Literal["auto"], float, None] = None
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int, None] = None
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/supervised_hyperparameters.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/fine_tuning/supervised_hyperparameters_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union
from typing_extensions import Literal, TypedDict
__all__ = ["SupervisedHyperparametersParam"]
class SupervisedHyperparametersParam(TypedDict, total=False):
"""The hyperparameters used for the fine-tuning job."""
batch_size: Union[Literal["auto"], int]
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
learning_rate_multiplier: Union[Literal["auto"], float]
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int]
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/supervised_hyperparameters_param.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/fine_tuning/supervised_method.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .supervised_hyperparameters import SupervisedHyperparameters
__all__ = ["SupervisedMethod"]
class SupervisedMethod(BaseModel):
"""Configuration for the supervised fine-tuning method."""
hyperparameters: Optional[SupervisedHyperparameters] = None
"""The hyperparameters used for the fine-tuning job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/supervised_method.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/fine_tuning/supervised_method_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing_extensions import TypedDict
from .supervised_hyperparameters_param import SupervisedHyperparametersParam
__all__ = ["SupervisedMethodParam"]
class SupervisedMethodParam(TypedDict, total=False):
"""Configuration for the supervised fine-tuning method."""
hyperparameters: SupervisedHyperparametersParam
"""The hyperparameters used for the fine-tuning job."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/fine_tuning/supervised_method_param.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
openai/openai-python:src/openai/types/graders/label_model_grader_param.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Union, Iterable
from typing_extensions import Literal, Required, TypeAlias, TypedDict
from ..._types import SequenceNotStr
from .grader_inputs_param import GraderInputsParam
from ..responses.response_input_text_param import ResponseInputTextParam
from ..responses.response_input_audio_param import ResponseInputAudioParam
__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"]
class InputContentOutputText(TypedDict, total=False):
"""A text output from the model."""
text: Required[str]
"""The text output from the model."""
type: Required[Literal["output_text"]]
"""The type of the output text. Always `output_text`."""
class InputContentInputImage(TypedDict, total=False):
"""An image input block used within EvalItem content arrays."""
image_url: Required[str]
"""The URL of the image input."""
type: Required[Literal["input_image"]]
"""The type of the image input. Always `input_image`."""
detail: str
"""The detail level of the image to be sent to the model.
One of `high`, `low`, or `auto`. Defaults to `auto`.
"""
InputContent: TypeAlias = Union[
str,
ResponseInputTextParam,
InputContentOutputText,
InputContentInputImage,
ResponseInputAudioParam,
GraderInputsParam,
]
class Input(TypedDict, total=False):
"""
A message input to the model with a role indicating instruction following
hierarchy. Instructions given with the `developer` or `system` role take
precedence over instructions given with the `user` role. Messages with the
`assistant` role are presumed to have been generated by the model in previous
interactions.
"""
content: Required[InputContent]
"""Inputs to the model - can contain template strings.
Supports text, output text, input images, and input audio, either as a single
item or an array of items.
"""
role: Required[Literal["user", "assistant", "system", "developer"]]
"""The role of the message input.
One of `user`, `assistant`, `system`, or `developer`.
"""
type: Literal["message"]
"""The type of the message input. Always `message`."""
class LabelModelGraderParam(TypedDict, total=False):
"""
A LabelModelGrader object which uses a model to assign labels to each item
in the evaluation.
"""
input: Required[Iterable[Input]]
labels: Required[SequenceNotStr[str]]
"""The labels to assign to each item in the evaluation."""
model: Required[str]
"""The model to use for the evaluation. Must support structured outputs."""
name: Required[str]
"""The name of the grader."""
passing_labels: Required[SequenceNotStr[str]]
"""The labels that indicate a passing result. Must be a subset of labels."""
type: Required[Literal["label_model"]]
"""The object type, which is always `label_model`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/graders/label_model_grader_param.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
openai/openai-python:src/openai/types/graders/multi_grader.py | # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Union
from typing_extensions import Literal, TypeAlias
from ..._models import BaseModel
from .python_grader import PythonGrader
from .label_model_grader import LabelModelGrader
from .score_model_grader import ScoreModelGrader
from .string_check_grader import StringCheckGrader
from .text_similarity_grader import TextSimilarityGrader
__all__ = ["MultiGrader", "Graders"]
Graders: TypeAlias = Union[StringCheckGrader, TextSimilarityGrader, PythonGrader, ScoreModelGrader, LabelModelGrader]
class MultiGrader(BaseModel):
"""
A MultiGrader object combines the output of multiple graders to produce a single score.
"""
calculate_output: str
"""A formula to calculate the output based on grader results."""
graders: Graders
"""
A StringCheckGrader object that performs a string comparison between input and
reference using a specified operation.
"""
name: str
"""The name of the grader."""
type: Literal["multi"]
"""The object type, which is always `multi`."""
| {
"repo_id": "openai/openai-python",
"file_path": "src/openai/types/graders/multi_grader.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.