Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- .venv/lib/python3.11/site-packages/distlib/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/distlib/__pycache__/index.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/distlib/t64-arm.exe +3 -0
- .venv/lib/python3.11/site-packages/distlib/t64.exe +3 -0
- .venv/lib/python3.11/site-packages/distlib/w64-arm.exe +3 -0
- .venv/lib/python3.11/site-packages/distlib/w64.exe +3 -0
- .venv/lib/python3.11/site-packages/openai/__init__.py +363 -0
- .venv/lib/python3.11/site-packages/openai/__main__.py +3 -0
- .venv/lib/python3.11/site-packages/openai/_base_client.py +2082 -0
- .venv/lib/python3.11/site-packages/openai/_client.py +565 -0
- .venv/lib/python3.11/site-packages/openai/_compat.py +231 -0
- .venv/lib/python3.11/site-packages/openai/_constants.py +14 -0
- .venv/lib/python3.11/site-packages/openai/_exceptions.py +156 -0
- .venv/lib/python3.11/site-packages/openai/_files.py +123 -0
- .venv/lib/python3.11/site-packages/openai/_legacy_response.py +488 -0
- .venv/lib/python3.11/site-packages/openai/_models.py +835 -0
- .venv/lib/python3.11/site-packages/openai/_module_client.py +85 -0
- .venv/lib/python3.11/site-packages/openai/_qs.py +150 -0
- .venv/lib/python3.11/site-packages/openai/_resource.py +43 -0
- .venv/lib/python3.11/site-packages/openai/_response.py +848 -0
- .venv/lib/python3.11/site-packages/openai/_streaming.py +410 -0
- .venv/lib/python3.11/site-packages/openai/_types.py +219 -0
- .venv/lib/python3.11/site-packages/openai/_version.py +4 -0
- .venv/lib/python3.11/site-packages/openai/pagination.py +107 -0
- .venv/lib/python3.11/site-packages/openai/py.typed +0 -0
- .venv/lib/python3.11/site-packages/openai/types/__init__.py +54 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/speech_create_params.py +41 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/speech_model.py +7 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/transcription.py +11 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/transcription_create_params.py +67 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/transcription_segment.py +49 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/transcription_verbose.py +26 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/transcription_word.py +17 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/translation.py +10 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/translation_create_params.py +50 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/translation_create_response.py +11 -0
- .venv/lib/python3.11/site-packages/openai/types/audio/translation_verbose.py +22 -0
- .venv/lib/python3.11/site-packages/openai/types/audio_model.py +7 -0
- .venv/lib/python3.11/site-packages/openai/types/audio_response_format.py +7 -0
- .venv/lib/python3.11/site-packages/openai/types/batch.py +87 -0
- .venv/lib/python3.11/site-packages/openai/types/batch_create_params.py +48 -0
- .venv/lib/python3.11/site-packages/openai/types/batch_error.py +21 -0
- .venv/lib/python3.11/site-packages/openai/types/batch_list_params.py +24 -0
- .venv/lib/python3.11/site-packages/openai/types/batch_request_counts.py +17 -0
- .venv/lib/python3.11/site-packages/openai/types/chat_model.py +47 -0
- .venv/lib/python3.11/site-packages/openai/types/completion.py +37 -0
- .venv/lib/python3.11/site-packages/openai/types/completion_choice.py +35 -0
- .venv/lib/python3.11/site-packages/openai/types/completion_create_params.py +187 -0
- .venv/lib/python3.11/site-packages/openai/types/completion_usage.py +54 -0
.gitattributes
CHANGED
|
@@ -339,3 +339,7 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
|
|
| 339 |
.venv/lib/python3.11/site-packages/pip/_vendor/rich/__pycache__/console.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 340 |
.venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 341 |
.venv/lib/python3.11/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
.venv/lib/python3.11/site-packages/pip/_vendor/rich/__pycache__/console.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 340 |
.venv/lib/python3.11/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 341 |
.venv/lib/python3.11/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 342 |
+
.venv/lib/python3.11/site-packages/distlib/w64-arm.exe filter=lfs diff=lfs merge=lfs -text
|
| 343 |
+
.venv/lib/python3.11/site-packages/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text
|
| 344 |
+
.venv/lib/python3.11/site-packages/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text
|
| 345 |
+
.venv/lib/python3.11/site-packages/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text
|
.venv/lib/python3.11/site-packages/distlib/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (1.45 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/distlib/__pycache__/index.cpython-311.pyc
ADDED
|
Binary file (26.6 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/distlib/t64-arm.exe
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebc4c06b7d95e74e315419ee7e88e1d0f71e9e9477538c00a93a9ff8c66a6cfc
|
| 3 |
+
size 182784
|
.venv/lib/python3.11/site-packages/distlib/t64.exe
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:81a618f21cb87db9076134e70388b6e9cb7c2106739011b6a51772d22cae06b7
|
| 3 |
+
size 108032
|
.venv/lib/python3.11/site-packages/distlib/w64-arm.exe
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c5dc9884a8f458371550e09bd396e5418bf375820a31b9899f6499bf391c7b2e
|
| 3 |
+
size 168448
|
.venv/lib/python3.11/site-packages/distlib/w64.exe
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a319ffaba23a017d7b1e18ba726ba6c54c53d6446db55f92af53c279894f8ad
|
| 3 |
+
size 101888
|
.venv/lib/python3.11/site-packages/openai/__init__.py
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import os as _os
|
| 6 |
+
from typing_extensions import override
|
| 7 |
+
|
| 8 |
+
from . import types
|
| 9 |
+
from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes
|
| 10 |
+
from ._utils import file_from_path
|
| 11 |
+
from ._client import Client, OpenAI, Stream, Timeout, Transport, AsyncClient, AsyncOpenAI, AsyncStream, RequestOptions
|
| 12 |
+
from ._models import BaseModel
|
| 13 |
+
from ._version import __title__, __version__
|
| 14 |
+
from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse
|
| 15 |
+
from ._constants import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES, DEFAULT_CONNECTION_LIMITS
|
| 16 |
+
from ._exceptions import (
|
| 17 |
+
APIError,
|
| 18 |
+
OpenAIError,
|
| 19 |
+
ConflictError,
|
| 20 |
+
NotFoundError,
|
| 21 |
+
APIStatusError,
|
| 22 |
+
RateLimitError,
|
| 23 |
+
APITimeoutError,
|
| 24 |
+
BadRequestError,
|
| 25 |
+
APIConnectionError,
|
| 26 |
+
AuthenticationError,
|
| 27 |
+
InternalServerError,
|
| 28 |
+
PermissionDeniedError,
|
| 29 |
+
LengthFinishReasonError,
|
| 30 |
+
UnprocessableEntityError,
|
| 31 |
+
APIResponseValidationError,
|
| 32 |
+
ContentFilterFinishReasonError,
|
| 33 |
+
)
|
| 34 |
+
from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
|
| 35 |
+
from ._utils._logs import setup_logging as _setup_logging
|
| 36 |
+
from ._legacy_response import HttpxBinaryResponseContent as HttpxBinaryResponseContent
|
| 37 |
+
|
| 38 |
+
__all__ = [
|
| 39 |
+
"types",
|
| 40 |
+
"__version__",
|
| 41 |
+
"__title__",
|
| 42 |
+
"NoneType",
|
| 43 |
+
"Transport",
|
| 44 |
+
"ProxiesTypes",
|
| 45 |
+
"NotGiven",
|
| 46 |
+
"NOT_GIVEN",
|
| 47 |
+
"Omit",
|
| 48 |
+
"OpenAIError",
|
| 49 |
+
"APIError",
|
| 50 |
+
"APIStatusError",
|
| 51 |
+
"APITimeoutError",
|
| 52 |
+
"APIConnectionError",
|
| 53 |
+
"APIResponseValidationError",
|
| 54 |
+
"BadRequestError",
|
| 55 |
+
"AuthenticationError",
|
| 56 |
+
"PermissionDeniedError",
|
| 57 |
+
"NotFoundError",
|
| 58 |
+
"ConflictError",
|
| 59 |
+
"UnprocessableEntityError",
|
| 60 |
+
"RateLimitError",
|
| 61 |
+
"InternalServerError",
|
| 62 |
+
"LengthFinishReasonError",
|
| 63 |
+
"ContentFilterFinishReasonError",
|
| 64 |
+
"Timeout",
|
| 65 |
+
"RequestOptions",
|
| 66 |
+
"Client",
|
| 67 |
+
"AsyncClient",
|
| 68 |
+
"Stream",
|
| 69 |
+
"AsyncStream",
|
| 70 |
+
"OpenAI",
|
| 71 |
+
"AsyncOpenAI",
|
| 72 |
+
"file_from_path",
|
| 73 |
+
"BaseModel",
|
| 74 |
+
"DEFAULT_TIMEOUT",
|
| 75 |
+
"DEFAULT_MAX_RETRIES",
|
| 76 |
+
"DEFAULT_CONNECTION_LIMITS",
|
| 77 |
+
"DefaultHttpxClient",
|
| 78 |
+
"DefaultAsyncHttpxClient",
|
| 79 |
+
]
|
| 80 |
+
|
| 81 |
+
from .lib import azure as _azure, pydantic_function_tool as pydantic_function_tool
|
| 82 |
+
from .version import VERSION as VERSION
|
| 83 |
+
from .lib.azure import AzureOpenAI as AzureOpenAI, AsyncAzureOpenAI as AsyncAzureOpenAI
|
| 84 |
+
from .lib._old_api import *
|
| 85 |
+
from .lib.streaming import (
|
| 86 |
+
AssistantEventHandler as AssistantEventHandler,
|
| 87 |
+
AsyncAssistantEventHandler as AsyncAssistantEventHandler,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
_setup_logging()
|
| 91 |
+
|
| 92 |
+
# Update the __module__ attribute for exported symbols so that
|
| 93 |
+
# error messages point to this module instead of the module
|
| 94 |
+
# it was originally defined in, e.g.
|
| 95 |
+
# openai._exceptions.NotFoundError -> openai.NotFoundError
|
| 96 |
+
__locals = locals()
|
| 97 |
+
for __name in __all__:
|
| 98 |
+
if not __name.startswith("__"):
|
| 99 |
+
try:
|
| 100 |
+
__locals[__name].__module__ = "openai"
|
| 101 |
+
except (TypeError, AttributeError):
|
| 102 |
+
# Some of our exported symbols are builtins which we can't set attributes for.
|
| 103 |
+
pass
|
| 104 |
+
|
| 105 |
+
# ------ Module level client ------
|
| 106 |
+
import typing as _t
|
| 107 |
+
import typing_extensions as _te
|
| 108 |
+
|
| 109 |
+
import httpx as _httpx
|
| 110 |
+
|
| 111 |
+
from ._base_client import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES
|
| 112 |
+
|
| 113 |
+
api_key: str | None = None
|
| 114 |
+
|
| 115 |
+
organization: str | None = None
|
| 116 |
+
|
| 117 |
+
project: str | None = None
|
| 118 |
+
|
| 119 |
+
base_url: str | _httpx.URL | None = None
|
| 120 |
+
|
| 121 |
+
timeout: float | Timeout | None = DEFAULT_TIMEOUT
|
| 122 |
+
|
| 123 |
+
max_retries: int = DEFAULT_MAX_RETRIES
|
| 124 |
+
|
| 125 |
+
default_headers: _t.Mapping[str, str] | None = None
|
| 126 |
+
|
| 127 |
+
default_query: _t.Mapping[str, object] | None = None
|
| 128 |
+
|
| 129 |
+
http_client: _httpx.Client | None = None
|
| 130 |
+
|
| 131 |
+
_ApiType = _te.Literal["openai", "azure"]
|
| 132 |
+
|
| 133 |
+
api_type: _ApiType | None = _t.cast(_ApiType, _os.environ.get("OPENAI_API_TYPE"))
|
| 134 |
+
|
| 135 |
+
api_version: str | None = _os.environ.get("OPENAI_API_VERSION")
|
| 136 |
+
|
| 137 |
+
azure_endpoint: str | None = _os.environ.get("AZURE_OPENAI_ENDPOINT")
|
| 138 |
+
|
| 139 |
+
azure_ad_token: str | None = _os.environ.get("AZURE_OPENAI_AD_TOKEN")
|
| 140 |
+
|
| 141 |
+
azure_ad_token_provider: _azure.AzureADTokenProvider | None = None
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class _ModuleClient(OpenAI):
|
| 145 |
+
# Note: we have to use type: ignores here as overriding class members
|
| 146 |
+
# with properties is technically unsafe but it is fine for our use case
|
| 147 |
+
|
| 148 |
+
@property # type: ignore
|
| 149 |
+
@override
|
| 150 |
+
def api_key(self) -> str | None:
|
| 151 |
+
return api_key
|
| 152 |
+
|
| 153 |
+
@api_key.setter # type: ignore
|
| 154 |
+
def api_key(self, value: str | None) -> None: # type: ignore
|
| 155 |
+
global api_key
|
| 156 |
+
|
| 157 |
+
api_key = value
|
| 158 |
+
|
| 159 |
+
@property # type: ignore
|
| 160 |
+
@override
|
| 161 |
+
def organization(self) -> str | None:
|
| 162 |
+
return organization
|
| 163 |
+
|
| 164 |
+
@organization.setter # type: ignore
|
| 165 |
+
def organization(self, value: str | None) -> None: # type: ignore
|
| 166 |
+
global organization
|
| 167 |
+
|
| 168 |
+
organization = value
|
| 169 |
+
|
| 170 |
+
@property # type: ignore
|
| 171 |
+
@override
|
| 172 |
+
def project(self) -> str | None:
|
| 173 |
+
return project
|
| 174 |
+
|
| 175 |
+
@project.setter # type: ignore
|
| 176 |
+
def project(self, value: str | None) -> None: # type: ignore
|
| 177 |
+
global project
|
| 178 |
+
|
| 179 |
+
project = value
|
| 180 |
+
|
| 181 |
+
@property
|
| 182 |
+
@override
|
| 183 |
+
def base_url(self) -> _httpx.URL:
|
| 184 |
+
if base_url is not None:
|
| 185 |
+
return _httpx.URL(base_url)
|
| 186 |
+
|
| 187 |
+
return super().base_url
|
| 188 |
+
|
| 189 |
+
@base_url.setter
|
| 190 |
+
def base_url(self, url: _httpx.URL | str) -> None:
|
| 191 |
+
super().base_url = url # type: ignore[misc]
|
| 192 |
+
|
| 193 |
+
@property # type: ignore
|
| 194 |
+
@override
|
| 195 |
+
def timeout(self) -> float | Timeout | None:
|
| 196 |
+
return timeout
|
| 197 |
+
|
| 198 |
+
@timeout.setter # type: ignore
|
| 199 |
+
def timeout(self, value: float | Timeout | None) -> None: # type: ignore
|
| 200 |
+
global timeout
|
| 201 |
+
|
| 202 |
+
timeout = value
|
| 203 |
+
|
| 204 |
+
@property # type: ignore
|
| 205 |
+
@override
|
| 206 |
+
def max_retries(self) -> int:
|
| 207 |
+
return max_retries
|
| 208 |
+
|
| 209 |
+
@max_retries.setter # type: ignore
|
| 210 |
+
def max_retries(self, value: int) -> None: # type: ignore
|
| 211 |
+
global max_retries
|
| 212 |
+
|
| 213 |
+
max_retries = value
|
| 214 |
+
|
| 215 |
+
@property # type: ignore
|
| 216 |
+
@override
|
| 217 |
+
def _custom_headers(self) -> _t.Mapping[str, str] | None:
|
| 218 |
+
return default_headers
|
| 219 |
+
|
| 220 |
+
@_custom_headers.setter # type: ignore
|
| 221 |
+
def _custom_headers(self, value: _t.Mapping[str, str] | None) -> None: # type: ignore
|
| 222 |
+
global default_headers
|
| 223 |
+
|
| 224 |
+
default_headers = value
|
| 225 |
+
|
| 226 |
+
@property # type: ignore
|
| 227 |
+
@override
|
| 228 |
+
def _custom_query(self) -> _t.Mapping[str, object] | None:
|
| 229 |
+
return default_query
|
| 230 |
+
|
| 231 |
+
@_custom_query.setter # type: ignore
|
| 232 |
+
def _custom_query(self, value: _t.Mapping[str, object] | None) -> None: # type: ignore
|
| 233 |
+
global default_query
|
| 234 |
+
|
| 235 |
+
default_query = value
|
| 236 |
+
|
| 237 |
+
@property # type: ignore
|
| 238 |
+
@override
|
| 239 |
+
def _client(self) -> _httpx.Client:
|
| 240 |
+
return http_client or super()._client
|
| 241 |
+
|
| 242 |
+
@_client.setter # type: ignore
|
| 243 |
+
def _client(self, value: _httpx.Client) -> None: # type: ignore
|
| 244 |
+
global http_client
|
| 245 |
+
|
| 246 |
+
http_client = value
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class _AzureModuleClient(_ModuleClient, AzureOpenAI): # type: ignore
|
| 250 |
+
...
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
class _AmbiguousModuleClientUsageError(OpenAIError):
|
| 254 |
+
def __init__(self) -> None:
|
| 255 |
+
super().__init__(
|
| 256 |
+
"Ambiguous use of module client; please set `openai.api_type` or the `OPENAI_API_TYPE` environment variable to `openai` or `azure`"
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def _has_openai_credentials() -> bool:
|
| 261 |
+
return _os.environ.get("OPENAI_API_KEY") is not None
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _has_azure_credentials() -> bool:
|
| 265 |
+
return azure_endpoint is not None or _os.environ.get("AZURE_OPENAI_API_KEY") is not None
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def _has_azure_ad_credentials() -> bool:
|
| 269 |
+
return (
|
| 270 |
+
_os.environ.get("AZURE_OPENAI_AD_TOKEN") is not None
|
| 271 |
+
or azure_ad_token is not None
|
| 272 |
+
or azure_ad_token_provider is not None
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
_client: OpenAI | None = None
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def _load_client() -> OpenAI: # type: ignore[reportUnusedFunction]
|
| 280 |
+
global _client
|
| 281 |
+
|
| 282 |
+
if _client is None:
|
| 283 |
+
global api_type, azure_endpoint, azure_ad_token, api_version
|
| 284 |
+
|
| 285 |
+
if azure_endpoint is None:
|
| 286 |
+
azure_endpoint = _os.environ.get("AZURE_OPENAI_ENDPOINT")
|
| 287 |
+
|
| 288 |
+
if azure_ad_token is None:
|
| 289 |
+
azure_ad_token = _os.environ.get("AZURE_OPENAI_AD_TOKEN")
|
| 290 |
+
|
| 291 |
+
if api_version is None:
|
| 292 |
+
api_version = _os.environ.get("OPENAI_API_VERSION")
|
| 293 |
+
|
| 294 |
+
if api_type is None:
|
| 295 |
+
has_openai = _has_openai_credentials()
|
| 296 |
+
has_azure = _has_azure_credentials()
|
| 297 |
+
has_azure_ad = _has_azure_ad_credentials()
|
| 298 |
+
|
| 299 |
+
if has_openai and (has_azure or has_azure_ad):
|
| 300 |
+
raise _AmbiguousModuleClientUsageError()
|
| 301 |
+
|
| 302 |
+
if (azure_ad_token is not None or azure_ad_token_provider is not None) and _os.environ.get(
|
| 303 |
+
"AZURE_OPENAI_API_KEY"
|
| 304 |
+
) is not None:
|
| 305 |
+
raise _AmbiguousModuleClientUsageError()
|
| 306 |
+
|
| 307 |
+
if has_azure or has_azure_ad:
|
| 308 |
+
api_type = "azure"
|
| 309 |
+
else:
|
| 310 |
+
api_type = "openai"
|
| 311 |
+
|
| 312 |
+
if api_type == "azure":
|
| 313 |
+
_client = _AzureModuleClient( # type: ignore
|
| 314 |
+
api_version=api_version,
|
| 315 |
+
azure_endpoint=azure_endpoint,
|
| 316 |
+
api_key=api_key,
|
| 317 |
+
azure_ad_token=azure_ad_token,
|
| 318 |
+
azure_ad_token_provider=azure_ad_token_provider,
|
| 319 |
+
organization=organization,
|
| 320 |
+
base_url=base_url,
|
| 321 |
+
timeout=timeout,
|
| 322 |
+
max_retries=max_retries,
|
| 323 |
+
default_headers=default_headers,
|
| 324 |
+
default_query=default_query,
|
| 325 |
+
http_client=http_client,
|
| 326 |
+
)
|
| 327 |
+
return _client
|
| 328 |
+
|
| 329 |
+
_client = _ModuleClient(
|
| 330 |
+
api_key=api_key,
|
| 331 |
+
organization=organization,
|
| 332 |
+
project=project,
|
| 333 |
+
base_url=base_url,
|
| 334 |
+
timeout=timeout,
|
| 335 |
+
max_retries=max_retries,
|
| 336 |
+
default_headers=default_headers,
|
| 337 |
+
default_query=default_query,
|
| 338 |
+
http_client=http_client,
|
| 339 |
+
)
|
| 340 |
+
return _client
|
| 341 |
+
|
| 342 |
+
return _client
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def _reset_client() -> None: # type: ignore[reportUnusedFunction]
|
| 346 |
+
global _client
|
| 347 |
+
|
| 348 |
+
_client = None
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
from ._module_client import (
|
| 352 |
+
beta as beta,
|
| 353 |
+
chat as chat,
|
| 354 |
+
audio as audio,
|
| 355 |
+
files as files,
|
| 356 |
+
images as images,
|
| 357 |
+
models as models,
|
| 358 |
+
batches as batches,
|
| 359 |
+
embeddings as embeddings,
|
| 360 |
+
completions as completions,
|
| 361 |
+
fine_tuning as fine_tuning,
|
| 362 |
+
moderations as moderations,
|
| 363 |
+
)
|
.venv/lib/python3.11/site-packages/openai/__main__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .cli import main
|
| 2 |
+
|
| 3 |
+
main()
|
.venv/lib/python3.11/site-packages/openai/_base_client.py
ADDED
|
@@ -0,0 +1,2082 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
import json
|
| 5 |
+
import time
|
| 6 |
+
import uuid
|
| 7 |
+
import email
|
| 8 |
+
import asyncio
|
| 9 |
+
import inspect
|
| 10 |
+
import logging
|
| 11 |
+
import platform
|
| 12 |
+
import warnings
|
| 13 |
+
import email.utils
|
| 14 |
+
from types import TracebackType
|
| 15 |
+
from random import random
|
| 16 |
+
from typing import (
|
| 17 |
+
TYPE_CHECKING,
|
| 18 |
+
Any,
|
| 19 |
+
Dict,
|
| 20 |
+
Type,
|
| 21 |
+
Union,
|
| 22 |
+
Generic,
|
| 23 |
+
Mapping,
|
| 24 |
+
TypeVar,
|
| 25 |
+
Iterable,
|
| 26 |
+
Iterator,
|
| 27 |
+
Optional,
|
| 28 |
+
Generator,
|
| 29 |
+
AsyncIterator,
|
| 30 |
+
cast,
|
| 31 |
+
overload,
|
| 32 |
+
)
|
| 33 |
+
from typing_extensions import Literal, override, get_origin
|
| 34 |
+
|
| 35 |
+
import anyio
|
| 36 |
+
import httpx
|
| 37 |
+
import distro
|
| 38 |
+
import pydantic
|
| 39 |
+
from httpx import URL, Limits
|
| 40 |
+
from pydantic import PrivateAttr
|
| 41 |
+
|
| 42 |
+
from . import _exceptions
|
| 43 |
+
from ._qs import Querystring
|
| 44 |
+
from ._files import to_httpx_files, async_to_httpx_files
|
| 45 |
+
from ._types import (
|
| 46 |
+
NOT_GIVEN,
|
| 47 |
+
Body,
|
| 48 |
+
Omit,
|
| 49 |
+
Query,
|
| 50 |
+
Headers,
|
| 51 |
+
Timeout,
|
| 52 |
+
NotGiven,
|
| 53 |
+
ResponseT,
|
| 54 |
+
Transport,
|
| 55 |
+
AnyMapping,
|
| 56 |
+
PostParser,
|
| 57 |
+
ProxiesTypes,
|
| 58 |
+
RequestFiles,
|
| 59 |
+
HttpxSendArgs,
|
| 60 |
+
AsyncTransport,
|
| 61 |
+
RequestOptions,
|
| 62 |
+
HttpxRequestFiles,
|
| 63 |
+
ModelBuilderProtocol,
|
| 64 |
+
)
|
| 65 |
+
from ._utils import SensitiveHeadersFilter, is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
|
| 66 |
+
from ._compat import model_copy, model_dump
|
| 67 |
+
from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type
|
| 68 |
+
from ._response import (
|
| 69 |
+
APIResponse,
|
| 70 |
+
BaseAPIResponse,
|
| 71 |
+
AsyncAPIResponse,
|
| 72 |
+
extract_response_type,
|
| 73 |
+
)
|
| 74 |
+
from ._constants import (
|
| 75 |
+
DEFAULT_TIMEOUT,
|
| 76 |
+
MAX_RETRY_DELAY,
|
| 77 |
+
DEFAULT_MAX_RETRIES,
|
| 78 |
+
INITIAL_RETRY_DELAY,
|
| 79 |
+
RAW_RESPONSE_HEADER,
|
| 80 |
+
OVERRIDE_CAST_TO_HEADER,
|
| 81 |
+
DEFAULT_CONNECTION_LIMITS,
|
| 82 |
+
)
|
| 83 |
+
from ._streaming import Stream, SSEDecoder, AsyncStream, SSEBytesDecoder
|
| 84 |
+
from ._exceptions import (
|
| 85 |
+
APIStatusError,
|
| 86 |
+
APITimeoutError,
|
| 87 |
+
APIConnectionError,
|
| 88 |
+
APIResponseValidationError,
|
| 89 |
+
)
|
| 90 |
+
from ._legacy_response import LegacyAPIResponse
|
| 91 |
+
|
| 92 |
+
log: logging.Logger = logging.getLogger(__name__)
|
| 93 |
+
log.addFilter(SensitiveHeadersFilter())
|
| 94 |
+
|
| 95 |
+
# TODO: make base page type vars covariant
|
| 96 |
+
SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]")
|
| 97 |
+
AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
_T = TypeVar("_T")
|
| 101 |
+
_T_co = TypeVar("_T_co", covariant=True)
|
| 102 |
+
|
| 103 |
+
_StreamT = TypeVar("_StreamT", bound=Stream[Any])
|
| 104 |
+
_AsyncStreamT = TypeVar("_AsyncStreamT", bound=AsyncStream[Any])
|
| 105 |
+
|
| 106 |
+
if TYPE_CHECKING:
|
| 107 |
+
from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT
|
| 108 |
+
else:
|
| 109 |
+
try:
|
| 110 |
+
from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT
|
| 111 |
+
except ImportError:
|
| 112 |
+
# taken from https://github.com/encode/httpx/blob/3ba5fe0d7ac70222590e759c31442b1cab263791/httpx/_config.py#L366
|
| 113 |
+
HTTPX_DEFAULT_TIMEOUT = Timeout(5.0)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class PageInfo:
|
| 117 |
+
"""Stores the necessary information to build the request to retrieve the next page.
|
| 118 |
+
|
| 119 |
+
Either `url` or `params` must be set.
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
url: URL | NotGiven
|
| 123 |
+
params: Query | NotGiven
|
| 124 |
+
|
| 125 |
+
@overload
|
| 126 |
+
def __init__(
|
| 127 |
+
self,
|
| 128 |
+
*,
|
| 129 |
+
url: URL,
|
| 130 |
+
) -> None: ...
|
| 131 |
+
|
| 132 |
+
@overload
|
| 133 |
+
def __init__(
|
| 134 |
+
self,
|
| 135 |
+
*,
|
| 136 |
+
params: Query,
|
| 137 |
+
) -> None: ...
|
| 138 |
+
|
| 139 |
+
def __init__(
|
| 140 |
+
self,
|
| 141 |
+
*,
|
| 142 |
+
url: URL | NotGiven = NOT_GIVEN,
|
| 143 |
+
params: Query | NotGiven = NOT_GIVEN,
|
| 144 |
+
) -> None:
|
| 145 |
+
self.url = url
|
| 146 |
+
self.params = params
|
| 147 |
+
|
| 148 |
+
@override
|
| 149 |
+
def __repr__(self) -> str:
|
| 150 |
+
if self.url:
|
| 151 |
+
return f"{self.__class__.__name__}(url={self.url})"
|
| 152 |
+
return f"{self.__class__.__name__}(params={self.params})"
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class BasePage(GenericModel, Generic[_T]):
|
| 156 |
+
"""
|
| 157 |
+
Defines the core interface for pagination.
|
| 158 |
+
|
| 159 |
+
Type Args:
|
| 160 |
+
ModelT: The pydantic model that represents an item in the response.
|
| 161 |
+
|
| 162 |
+
Methods:
|
| 163 |
+
has_next_page(): Check if there is another page available
|
| 164 |
+
next_page_info(): Get the necessary information to make a request for the next page
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
_options: FinalRequestOptions = PrivateAttr()
|
| 168 |
+
_model: Type[_T] = PrivateAttr()
|
| 169 |
+
|
| 170 |
+
def has_next_page(self) -> bool:
|
| 171 |
+
items = self._get_page_items()
|
| 172 |
+
if not items:
|
| 173 |
+
return False
|
| 174 |
+
return self.next_page_info() is not None
|
| 175 |
+
|
| 176 |
+
def next_page_info(self) -> Optional[PageInfo]: ...
|
| 177 |
+
|
| 178 |
+
def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body]
|
| 179 |
+
...
|
| 180 |
+
|
| 181 |
+
def _params_from_url(self, url: URL) -> httpx.QueryParams:
|
| 182 |
+
# TODO: do we have to preprocess params here?
|
| 183 |
+
return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params)
|
| 184 |
+
|
| 185 |
+
def _info_to_options(self, info: PageInfo) -> FinalRequestOptions:
|
| 186 |
+
options = model_copy(self._options)
|
| 187 |
+
options._strip_raw_response_header()
|
| 188 |
+
|
| 189 |
+
if not isinstance(info.params, NotGiven):
|
| 190 |
+
options.params = {**options.params, **info.params}
|
| 191 |
+
return options
|
| 192 |
+
|
| 193 |
+
if not isinstance(info.url, NotGiven):
|
| 194 |
+
params = self._params_from_url(info.url)
|
| 195 |
+
url = info.url.copy_with(params=params)
|
| 196 |
+
options.params = dict(url.params)
|
| 197 |
+
options.url = str(url)
|
| 198 |
+
return options
|
| 199 |
+
|
| 200 |
+
raise ValueError("Unexpected PageInfo state")
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class BaseSyncPage(BasePage[_T], Generic[_T]):
|
| 204 |
+
_client: SyncAPIClient = pydantic.PrivateAttr()
|
| 205 |
+
|
| 206 |
+
def _set_private_attributes(
|
| 207 |
+
self,
|
| 208 |
+
client: SyncAPIClient,
|
| 209 |
+
model: Type[_T],
|
| 210 |
+
options: FinalRequestOptions,
|
| 211 |
+
) -> None:
|
| 212 |
+
self._model = model
|
| 213 |
+
self._client = client
|
| 214 |
+
self._options = options
|
| 215 |
+
|
| 216 |
+
# Pydantic uses a custom `__iter__` method to support casting BaseModels
|
| 217 |
+
# to dictionaries. e.g. dict(model).
|
| 218 |
+
# As we want to support `for item in page`, this is inherently incompatible
|
| 219 |
+
# with the default pydantic behaviour. It is not possible to support both
|
| 220 |
+
# use cases at once. Fortunately, this is not a big deal as all other pydantic
|
| 221 |
+
# methods should continue to work as expected as there is an alternative method
|
| 222 |
+
# to cast a model to a dictionary, model.dict(), which is used internally
|
| 223 |
+
# by pydantic.
|
| 224 |
+
def __iter__(self) -> Iterator[_T]: # type: ignore
|
| 225 |
+
for page in self.iter_pages():
|
| 226 |
+
for item in page._get_page_items():
|
| 227 |
+
yield item
|
| 228 |
+
|
| 229 |
+
def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]:
|
| 230 |
+
page = self
|
| 231 |
+
while True:
|
| 232 |
+
yield page
|
| 233 |
+
if page.has_next_page():
|
| 234 |
+
page = page.get_next_page()
|
| 235 |
+
else:
|
| 236 |
+
return
|
| 237 |
+
|
| 238 |
+
def get_next_page(self: SyncPageT) -> SyncPageT:
|
| 239 |
+
info = self.next_page_info()
|
| 240 |
+
if not info:
|
| 241 |
+
raise RuntimeError(
|
| 242 |
+
"No next page expected; please check `.has_next_page()` before calling `.get_next_page()`."
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
options = self._info_to_options(info)
|
| 246 |
+
return self._client._request_api_list(self._model, page=self.__class__, options=options)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class AsyncPaginator(Generic[_T, AsyncPageT]):
|
| 250 |
+
def __init__(
|
| 251 |
+
self,
|
| 252 |
+
client: AsyncAPIClient,
|
| 253 |
+
options: FinalRequestOptions,
|
| 254 |
+
page_cls: Type[AsyncPageT],
|
| 255 |
+
model: Type[_T],
|
| 256 |
+
) -> None:
|
| 257 |
+
self._model = model
|
| 258 |
+
self._client = client
|
| 259 |
+
self._options = options
|
| 260 |
+
self._page_cls = page_cls
|
| 261 |
+
|
| 262 |
+
def __await__(self) -> Generator[Any, None, AsyncPageT]:
|
| 263 |
+
return self._get_page().__await__()
|
| 264 |
+
|
| 265 |
+
async def _get_page(self) -> AsyncPageT:
|
| 266 |
+
def _parser(resp: AsyncPageT) -> AsyncPageT:
|
| 267 |
+
resp._set_private_attributes(
|
| 268 |
+
model=self._model,
|
| 269 |
+
options=self._options,
|
| 270 |
+
client=self._client,
|
| 271 |
+
)
|
| 272 |
+
return resp
|
| 273 |
+
|
| 274 |
+
self._options.post_parser = _parser
|
| 275 |
+
|
| 276 |
+
return await self._client.request(self._page_cls, self._options)
|
| 277 |
+
|
| 278 |
+
async def __aiter__(self) -> AsyncIterator[_T]:
|
| 279 |
+
# https://github.com/microsoft/pyright/issues/3464
|
| 280 |
+
page = cast(
|
| 281 |
+
AsyncPageT,
|
| 282 |
+
await self, # type: ignore
|
| 283 |
+
)
|
| 284 |
+
async for item in page:
|
| 285 |
+
yield item
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
class BaseAsyncPage(BasePage[_T], Generic[_T]):
|
| 289 |
+
_client: AsyncAPIClient = pydantic.PrivateAttr()
|
| 290 |
+
|
| 291 |
+
def _set_private_attributes(
|
| 292 |
+
self,
|
| 293 |
+
model: Type[_T],
|
| 294 |
+
client: AsyncAPIClient,
|
| 295 |
+
options: FinalRequestOptions,
|
| 296 |
+
) -> None:
|
| 297 |
+
self._model = model
|
| 298 |
+
self._client = client
|
| 299 |
+
self._options = options
|
| 300 |
+
|
| 301 |
+
async def __aiter__(self) -> AsyncIterator[_T]:
|
| 302 |
+
async for page in self.iter_pages():
|
| 303 |
+
for item in page._get_page_items():
|
| 304 |
+
yield item
|
| 305 |
+
|
| 306 |
+
async def iter_pages(self: AsyncPageT) -> AsyncIterator[AsyncPageT]:
|
| 307 |
+
page = self
|
| 308 |
+
while True:
|
| 309 |
+
yield page
|
| 310 |
+
if page.has_next_page():
|
| 311 |
+
page = await page.get_next_page()
|
| 312 |
+
else:
|
| 313 |
+
return
|
| 314 |
+
|
| 315 |
+
async def get_next_page(self: AsyncPageT) -> AsyncPageT:
|
| 316 |
+
info = self.next_page_info()
|
| 317 |
+
if not info:
|
| 318 |
+
raise RuntimeError(
|
| 319 |
+
"No next page expected; please check `.has_next_page()` before calling `.get_next_page()`."
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
options = self._info_to_options(info)
|
| 323 |
+
return await self._client._request_api_list(self._model, page=self.__class__, options=options)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
_HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient])
|
| 327 |
+
_DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]])
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]):
|
| 331 |
+
_client: _HttpxClientT
|
| 332 |
+
_version: str
|
| 333 |
+
_base_url: URL
|
| 334 |
+
max_retries: int
|
| 335 |
+
timeout: Union[float, Timeout, None]
|
| 336 |
+
_limits: httpx.Limits
|
| 337 |
+
_proxies: ProxiesTypes | None
|
| 338 |
+
_transport: Transport | AsyncTransport | None
|
| 339 |
+
_strict_response_validation: bool
|
| 340 |
+
_idempotency_header: str | None
|
| 341 |
+
_default_stream_cls: type[_DefaultStreamT] | None = None
|
| 342 |
+
|
| 343 |
+
def __init__(
|
| 344 |
+
self,
|
| 345 |
+
*,
|
| 346 |
+
version: str,
|
| 347 |
+
base_url: str | URL,
|
| 348 |
+
_strict_response_validation: bool,
|
| 349 |
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
| 350 |
+
timeout: float | Timeout | None = DEFAULT_TIMEOUT,
|
| 351 |
+
limits: httpx.Limits,
|
| 352 |
+
transport: Transport | AsyncTransport | None,
|
| 353 |
+
proxies: ProxiesTypes | None,
|
| 354 |
+
custom_headers: Mapping[str, str] | None = None,
|
| 355 |
+
custom_query: Mapping[str, object] | None = None,
|
| 356 |
+
) -> None:
|
| 357 |
+
self._version = version
|
| 358 |
+
self._base_url = self._enforce_trailing_slash(URL(base_url))
|
| 359 |
+
self.max_retries = max_retries
|
| 360 |
+
self.timeout = timeout
|
| 361 |
+
self._limits = limits
|
| 362 |
+
self._proxies = proxies
|
| 363 |
+
self._transport = transport
|
| 364 |
+
self._custom_headers = custom_headers or {}
|
| 365 |
+
self._custom_query = custom_query or {}
|
| 366 |
+
self._strict_response_validation = _strict_response_validation
|
| 367 |
+
self._idempotency_header = None
|
| 368 |
+
self._platform: Platform | None = None
|
| 369 |
+
|
| 370 |
+
if max_retries is None: # pyright: ignore[reportUnnecessaryComparison]
|
| 371 |
+
raise TypeError(
|
| 372 |
+
"max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `openai.DEFAULT_MAX_RETRIES`"
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
def _enforce_trailing_slash(self, url: URL) -> URL:
|
| 376 |
+
if url.raw_path.endswith(b"/"):
|
| 377 |
+
return url
|
| 378 |
+
return url.copy_with(raw_path=url.raw_path + b"/")
|
| 379 |
+
|
| 380 |
+
def _make_status_error_from_response(
|
| 381 |
+
self,
|
| 382 |
+
response: httpx.Response,
|
| 383 |
+
) -> APIStatusError:
|
| 384 |
+
if response.is_closed and not response.is_stream_consumed:
|
| 385 |
+
# We can't read the response body as it has been closed
|
| 386 |
+
# before it was read. This can happen if an event hook
|
| 387 |
+
# raises a status error.
|
| 388 |
+
body = None
|
| 389 |
+
err_msg = f"Error code: {response.status_code}"
|
| 390 |
+
else:
|
| 391 |
+
err_text = response.text.strip()
|
| 392 |
+
body = err_text
|
| 393 |
+
|
| 394 |
+
try:
|
| 395 |
+
body = json.loads(err_text)
|
| 396 |
+
err_msg = f"Error code: {response.status_code} - {body}"
|
| 397 |
+
except Exception:
|
| 398 |
+
err_msg = err_text or f"Error code: {response.status_code}"
|
| 399 |
+
|
| 400 |
+
return self._make_status_error(err_msg, body=body, response=response)
|
| 401 |
+
|
| 402 |
+
def _make_status_error(
|
| 403 |
+
self,
|
| 404 |
+
err_msg: str,
|
| 405 |
+
*,
|
| 406 |
+
body: object,
|
| 407 |
+
response: httpx.Response,
|
| 408 |
+
) -> _exceptions.APIStatusError:
|
| 409 |
+
raise NotImplementedError()
|
| 410 |
+
|
| 411 |
+
def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0) -> httpx.Headers:
|
| 412 |
+
custom_headers = options.headers or {}
|
| 413 |
+
headers_dict = _merge_mappings(self.default_headers, custom_headers)
|
| 414 |
+
self._validate_headers(headers_dict, custom_headers)
|
| 415 |
+
|
| 416 |
+
# headers are case-insensitive while dictionaries are not.
|
| 417 |
+
headers = httpx.Headers(headers_dict)
|
| 418 |
+
|
| 419 |
+
idempotency_header = self._idempotency_header
|
| 420 |
+
if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
|
| 421 |
+
headers[idempotency_header] = options.idempotency_key or self._idempotency_key()
|
| 422 |
+
|
| 423 |
+
# Don't set the retry count header if it was already set or removed by the caller. We check
|
| 424 |
+
# `custom_headers`, which can contain `Omit()`, instead of `headers` to account for the removal case.
|
| 425 |
+
if "x-stainless-retry-count" not in (header.lower() for header in custom_headers):
|
| 426 |
+
headers["x-stainless-retry-count"] = str(retries_taken)
|
| 427 |
+
|
| 428 |
+
return headers
|
| 429 |
+
|
| 430 |
+
def _prepare_url(self, url: str) -> URL:
|
| 431 |
+
"""
|
| 432 |
+
Merge a URL argument together with any 'base_url' on the client,
|
| 433 |
+
to create the URL used for the outgoing request.
|
| 434 |
+
"""
|
| 435 |
+
# Copied from httpx's `_merge_url` method.
|
| 436 |
+
merge_url = URL(url)
|
| 437 |
+
if merge_url.is_relative_url:
|
| 438 |
+
merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/")
|
| 439 |
+
return self.base_url.copy_with(raw_path=merge_raw_path)
|
| 440 |
+
|
| 441 |
+
return merge_url
|
| 442 |
+
|
| 443 |
+
def _make_sse_decoder(self) -> SSEDecoder | SSEBytesDecoder:
|
| 444 |
+
return SSEDecoder()
|
| 445 |
+
|
| 446 |
+
def _build_request(
|
| 447 |
+
self,
|
| 448 |
+
options: FinalRequestOptions,
|
| 449 |
+
*,
|
| 450 |
+
retries_taken: int = 0,
|
| 451 |
+
) -> httpx.Request:
|
| 452 |
+
if log.isEnabledFor(logging.DEBUG):
|
| 453 |
+
log.debug("Request options: %s", model_dump(options, exclude_unset=True))
|
| 454 |
+
|
| 455 |
+
kwargs: dict[str, Any] = {}
|
| 456 |
+
|
| 457 |
+
json_data = options.json_data
|
| 458 |
+
if options.extra_json is not None:
|
| 459 |
+
if json_data is None:
|
| 460 |
+
json_data = cast(Body, options.extra_json)
|
| 461 |
+
elif is_mapping(json_data):
|
| 462 |
+
json_data = _merge_mappings(json_data, options.extra_json)
|
| 463 |
+
else:
|
| 464 |
+
raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`")
|
| 465 |
+
|
| 466 |
+
headers = self._build_headers(options, retries_taken=retries_taken)
|
| 467 |
+
params = _merge_mappings(self.default_query, options.params)
|
| 468 |
+
content_type = headers.get("Content-Type")
|
| 469 |
+
files = options.files
|
| 470 |
+
|
| 471 |
+
# If the given Content-Type header is multipart/form-data then it
|
| 472 |
+
# has to be removed so that httpx can generate the header with
|
| 473 |
+
# additional information for us as it has to be in this form
|
| 474 |
+
# for the server to be able to correctly parse the request:
|
| 475 |
+
# multipart/form-data; boundary=---abc--
|
| 476 |
+
if content_type is not None and content_type.startswith("multipart/form-data"):
|
| 477 |
+
if "boundary" not in content_type:
|
| 478 |
+
# only remove the header if the boundary hasn't been explicitly set
|
| 479 |
+
# as the caller doesn't want httpx to come up with their own boundary
|
| 480 |
+
headers.pop("Content-Type")
|
| 481 |
+
|
| 482 |
+
# As we are now sending multipart/form-data instead of application/json
|
| 483 |
+
# we need to tell httpx to use it, https://www.python-httpx.org/advanced/clients/#multipart-file-encoding
|
| 484 |
+
if json_data:
|
| 485 |
+
if not is_dict(json_data):
|
| 486 |
+
raise TypeError(
|
| 487 |
+
f"Expected query input to be a dictionary for multipart requests but got {type(json_data)} instead."
|
| 488 |
+
)
|
| 489 |
+
kwargs["data"] = self._serialize_multipartform(json_data)
|
| 490 |
+
|
| 491 |
+
# httpx determines whether or not to send a "multipart/form-data"
|
| 492 |
+
# request based on the truthiness of the "files" argument.
|
| 493 |
+
# This gets around that issue by generating a dict value that
|
| 494 |
+
# evaluates to true.
|
| 495 |
+
#
|
| 496 |
+
# https://github.com/encode/httpx/discussions/2399#discussioncomment-3814186
|
| 497 |
+
if not files:
|
| 498 |
+
files = cast(HttpxRequestFiles, ForceMultipartDict())
|
| 499 |
+
|
| 500 |
+
prepared_url = self._prepare_url(options.url)
|
| 501 |
+
if "_" in prepared_url.host:
|
| 502 |
+
# work around https://github.com/encode/httpx/discussions/2880
|
| 503 |
+
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
|
| 504 |
+
|
| 505 |
+
# TODO: report this error to httpx
|
| 506 |
+
return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
|
| 507 |
+
headers=headers,
|
| 508 |
+
timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout,
|
| 509 |
+
method=options.method,
|
| 510 |
+
url=prepared_url,
|
| 511 |
+
# the `Query` type that we use is incompatible with qs'
|
| 512 |
+
# `Params` type as it needs to be typed as `Mapping[str, object]`
|
| 513 |
+
# so that passing a `TypedDict` doesn't cause an error.
|
| 514 |
+
# https://github.com/microsoft/pyright/issues/3526#event-6715453066
|
| 515 |
+
params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
|
| 516 |
+
json=json_data,
|
| 517 |
+
files=files,
|
| 518 |
+
**kwargs,
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
def _serialize_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
|
| 522 |
+
items = self.qs.stringify_items(
|
| 523 |
+
# TODO: type ignore is required as stringify_items is well typed but we can't be
|
| 524 |
+
# well typed without heavy validation.
|
| 525 |
+
data, # type: ignore
|
| 526 |
+
array_format="brackets",
|
| 527 |
+
)
|
| 528 |
+
serialized: dict[str, object] = {}
|
| 529 |
+
for key, value in items:
|
| 530 |
+
existing = serialized.get(key)
|
| 531 |
+
|
| 532 |
+
if not existing:
|
| 533 |
+
serialized[key] = value
|
| 534 |
+
continue
|
| 535 |
+
|
| 536 |
+
# If a value has already been set for this key then that
|
| 537 |
+
# means we're sending data like `array[]=[1, 2, 3]` and we
|
| 538 |
+
# need to tell httpx that we want to send multiple values with
|
| 539 |
+
# the same key which is done by using a list or a tuple.
|
| 540 |
+
#
|
| 541 |
+
# Note: 2d arrays should never result in the same key at both
|
| 542 |
+
# levels so it's safe to assume that if the value is a list,
|
| 543 |
+
# it was because we changed it to be a list.
|
| 544 |
+
if is_list(existing):
|
| 545 |
+
existing.append(value)
|
| 546 |
+
else:
|
| 547 |
+
serialized[key] = [existing, value]
|
| 548 |
+
|
| 549 |
+
return serialized
|
| 550 |
+
|
| 551 |
+
def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalRequestOptions) -> type[ResponseT]:
|
| 552 |
+
if not is_given(options.headers):
|
| 553 |
+
return cast_to
|
| 554 |
+
|
| 555 |
+
# make a copy of the headers so we don't mutate user-input
|
| 556 |
+
headers = dict(options.headers)
|
| 557 |
+
|
| 558 |
+
# we internally support defining a temporary header to override the
|
| 559 |
+
# default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response`
|
| 560 |
+
# see _response.py for implementation details
|
| 561 |
+
override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN)
|
| 562 |
+
if is_given(override_cast_to):
|
| 563 |
+
options.headers = headers
|
| 564 |
+
return cast(Type[ResponseT], override_cast_to)
|
| 565 |
+
|
| 566 |
+
return cast_to
|
| 567 |
+
|
| 568 |
+
def _should_stream_response_body(self, request: httpx.Request) -> bool:
|
| 569 |
+
return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return]
|
| 570 |
+
|
| 571 |
+
def _process_response_data(
|
| 572 |
+
self,
|
| 573 |
+
*,
|
| 574 |
+
data: object,
|
| 575 |
+
cast_to: type[ResponseT],
|
| 576 |
+
response: httpx.Response,
|
| 577 |
+
) -> ResponseT:
|
| 578 |
+
if data is None:
|
| 579 |
+
return cast(ResponseT, None)
|
| 580 |
+
|
| 581 |
+
if cast_to is object:
|
| 582 |
+
return cast(ResponseT, data)
|
| 583 |
+
|
| 584 |
+
try:
|
| 585 |
+
if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol):
|
| 586 |
+
return cast(ResponseT, cast_to.build(response=response, data=data))
|
| 587 |
+
|
| 588 |
+
if self._strict_response_validation:
|
| 589 |
+
return cast(ResponseT, validate_type(type_=cast_to, value=data))
|
| 590 |
+
|
| 591 |
+
return cast(ResponseT, construct_type(type_=cast_to, value=data))
|
| 592 |
+
except pydantic.ValidationError as err:
|
| 593 |
+
raise APIResponseValidationError(response=response, body=data) from err
|
| 594 |
+
|
| 595 |
+
@property
|
| 596 |
+
def qs(self) -> Querystring:
|
| 597 |
+
return Querystring()
|
| 598 |
+
|
| 599 |
+
@property
|
| 600 |
+
def custom_auth(self) -> httpx.Auth | None:
|
| 601 |
+
return None
|
| 602 |
+
|
| 603 |
+
@property
|
| 604 |
+
def auth_headers(self) -> dict[str, str]:
|
| 605 |
+
return {}
|
| 606 |
+
|
| 607 |
+
@property
|
| 608 |
+
def default_headers(self) -> dict[str, str | Omit]:
|
| 609 |
+
return {
|
| 610 |
+
"Accept": "application/json",
|
| 611 |
+
"Content-Type": "application/json",
|
| 612 |
+
"User-Agent": self.user_agent,
|
| 613 |
+
**self.platform_headers(),
|
| 614 |
+
**self.auth_headers,
|
| 615 |
+
**self._custom_headers,
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
@property
|
| 619 |
+
def default_query(self) -> dict[str, object]:
|
| 620 |
+
return {
|
| 621 |
+
**self._custom_query,
|
| 622 |
+
}
|
| 623 |
+
|
| 624 |
+
def _validate_headers(
|
| 625 |
+
self,
|
| 626 |
+
headers: Headers, # noqa: ARG002
|
| 627 |
+
custom_headers: Headers, # noqa: ARG002
|
| 628 |
+
) -> None:
|
| 629 |
+
"""Validate the given default headers and custom headers.
|
| 630 |
+
|
| 631 |
+
Does nothing by default.
|
| 632 |
+
"""
|
| 633 |
+
return
|
| 634 |
+
|
| 635 |
+
@property
|
| 636 |
+
def user_agent(self) -> str:
|
| 637 |
+
return f"{self.__class__.__name__}/Python {self._version}"
|
| 638 |
+
|
| 639 |
+
@property
|
| 640 |
+
def base_url(self) -> URL:
|
| 641 |
+
return self._base_url
|
| 642 |
+
|
| 643 |
+
@base_url.setter
|
| 644 |
+
def base_url(self, url: URL | str) -> None:
|
| 645 |
+
self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url))
|
| 646 |
+
|
| 647 |
+
def platform_headers(self) -> Dict[str, str]:
|
| 648 |
+
# the actual implementation is in a separate `lru_cache` decorated
|
| 649 |
+
# function because adding `lru_cache` to methods will leak memory
|
| 650 |
+
# https://github.com/python/cpython/issues/88476
|
| 651 |
+
return platform_headers(self._version, platform=self._platform)
|
| 652 |
+
|
| 653 |
+
def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None:
|
| 654 |
+
"""Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified.
|
| 655 |
+
|
| 656 |
+
About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
| 657 |
+
See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax
|
| 658 |
+
"""
|
| 659 |
+
if response_headers is None:
|
| 660 |
+
return None
|
| 661 |
+
|
| 662 |
+
# First, try the non-standard `retry-after-ms` header for milliseconds,
|
| 663 |
+
# which is more precise than integer-seconds `retry-after`
|
| 664 |
+
try:
|
| 665 |
+
retry_ms_header = response_headers.get("retry-after-ms", None)
|
| 666 |
+
return float(retry_ms_header) / 1000
|
| 667 |
+
except (TypeError, ValueError):
|
| 668 |
+
pass
|
| 669 |
+
|
| 670 |
+
# Next, try parsing `retry-after` header as seconds (allowing nonstandard floats).
|
| 671 |
+
retry_header = response_headers.get("retry-after")
|
| 672 |
+
try:
|
| 673 |
+
# note: the spec indicates that this should only ever be an integer
|
| 674 |
+
# but if someone sends a float there's no reason for us to not respect it
|
| 675 |
+
return float(retry_header)
|
| 676 |
+
except (TypeError, ValueError):
|
| 677 |
+
pass
|
| 678 |
+
|
| 679 |
+
# Last, try parsing `retry-after` as a date.
|
| 680 |
+
retry_date_tuple = email.utils.parsedate_tz(retry_header)
|
| 681 |
+
if retry_date_tuple is None:
|
| 682 |
+
return None
|
| 683 |
+
|
| 684 |
+
retry_date = email.utils.mktime_tz(retry_date_tuple)
|
| 685 |
+
return float(retry_date - time.time())
|
| 686 |
+
|
| 687 |
+
def _calculate_retry_timeout(
|
| 688 |
+
self,
|
| 689 |
+
remaining_retries: int,
|
| 690 |
+
options: FinalRequestOptions,
|
| 691 |
+
response_headers: Optional[httpx.Headers] = None,
|
| 692 |
+
) -> float:
|
| 693 |
+
max_retries = options.get_max_retries(self.max_retries)
|
| 694 |
+
|
| 695 |
+
# If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says.
|
| 696 |
+
retry_after = self._parse_retry_after_header(response_headers)
|
| 697 |
+
if retry_after is not None and 0 < retry_after <= 60:
|
| 698 |
+
return retry_after
|
| 699 |
+
|
| 700 |
+
# Also cap retry count to 1000 to avoid any potential overflows with `pow`
|
| 701 |
+
nb_retries = min(max_retries - remaining_retries, 1000)
|
| 702 |
+
|
| 703 |
+
# Apply exponential backoff, but not more than the max.
|
| 704 |
+
sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY)
|
| 705 |
+
|
| 706 |
+
# Apply some jitter, plus-or-minus half a second.
|
| 707 |
+
jitter = 1 - 0.25 * random()
|
| 708 |
+
timeout = sleep_seconds * jitter
|
| 709 |
+
return timeout if timeout >= 0 else 0
|
| 710 |
+
|
| 711 |
+
def _should_retry(self, response: httpx.Response) -> bool:
|
| 712 |
+
# Note: this is not a standard header
|
| 713 |
+
should_retry_header = response.headers.get("x-should-retry")
|
| 714 |
+
|
| 715 |
+
# If the server explicitly says whether or not to retry, obey.
|
| 716 |
+
if should_retry_header == "true":
|
| 717 |
+
log.debug("Retrying as header `x-should-retry` is set to `true`")
|
| 718 |
+
return True
|
| 719 |
+
if should_retry_header == "false":
|
| 720 |
+
log.debug("Not retrying as header `x-should-retry` is set to `false`")
|
| 721 |
+
return False
|
| 722 |
+
|
| 723 |
+
# Retry on request timeouts.
|
| 724 |
+
if response.status_code == 408:
|
| 725 |
+
log.debug("Retrying due to status code %i", response.status_code)
|
| 726 |
+
return True
|
| 727 |
+
|
| 728 |
+
# Retry on lock timeouts.
|
| 729 |
+
if response.status_code == 409:
|
| 730 |
+
log.debug("Retrying due to status code %i", response.status_code)
|
| 731 |
+
return True
|
| 732 |
+
|
| 733 |
+
# Retry on rate limits.
|
| 734 |
+
if response.status_code == 429:
|
| 735 |
+
log.debug("Retrying due to status code %i", response.status_code)
|
| 736 |
+
return True
|
| 737 |
+
|
| 738 |
+
# Retry internal errors.
|
| 739 |
+
if response.status_code >= 500:
|
| 740 |
+
log.debug("Retrying due to status code %i", response.status_code)
|
| 741 |
+
return True
|
| 742 |
+
|
| 743 |
+
log.debug("Not retrying")
|
| 744 |
+
return False
|
| 745 |
+
|
| 746 |
+
def _idempotency_key(self) -> str:
|
| 747 |
+
return f"stainless-python-retry-{uuid.uuid4()}"
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
class _DefaultHttpxClient(httpx.Client):
|
| 751 |
+
def __init__(self, **kwargs: Any) -> None:
|
| 752 |
+
kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
|
| 753 |
+
kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
|
| 754 |
+
kwargs.setdefault("follow_redirects", True)
|
| 755 |
+
super().__init__(**kwargs)
|
| 756 |
+
|
| 757 |
+
|
| 758 |
+
if TYPE_CHECKING:
|
| 759 |
+
DefaultHttpxClient = httpx.Client
|
| 760 |
+
"""An alias to `httpx.Client` that provides the same defaults that this SDK
|
| 761 |
+
uses internally.
|
| 762 |
+
|
| 763 |
+
This is useful because overriding the `http_client` with your own instance of
|
| 764 |
+
`httpx.Client` will result in httpx's defaults being used, not ours.
|
| 765 |
+
"""
|
| 766 |
+
else:
|
| 767 |
+
DefaultHttpxClient = _DefaultHttpxClient
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
class SyncHttpxClientWrapper(DefaultHttpxClient):
|
| 771 |
+
def __del__(self) -> None:
|
| 772 |
+
if self.is_closed:
|
| 773 |
+
return
|
| 774 |
+
|
| 775 |
+
try:
|
| 776 |
+
self.close()
|
| 777 |
+
except Exception:
|
| 778 |
+
pass
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]):
|
| 782 |
+
_client: httpx.Client
|
| 783 |
+
_default_stream_cls: type[Stream[Any]] | None = None
|
| 784 |
+
|
| 785 |
+
def __init__(
|
| 786 |
+
self,
|
| 787 |
+
*,
|
| 788 |
+
version: str,
|
| 789 |
+
base_url: str | URL,
|
| 790 |
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
| 791 |
+
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
|
| 792 |
+
transport: Transport | None = None,
|
| 793 |
+
proxies: ProxiesTypes | None = None,
|
| 794 |
+
limits: Limits | None = None,
|
| 795 |
+
http_client: httpx.Client | None = None,
|
| 796 |
+
custom_headers: Mapping[str, str] | None = None,
|
| 797 |
+
custom_query: Mapping[str, object] | None = None,
|
| 798 |
+
_strict_response_validation: bool,
|
| 799 |
+
) -> None:
|
| 800 |
+
kwargs: dict[str, Any] = {}
|
| 801 |
+
if limits is not None:
|
| 802 |
+
warnings.warn(
|
| 803 |
+
"The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead",
|
| 804 |
+
category=DeprecationWarning,
|
| 805 |
+
stacklevel=3,
|
| 806 |
+
)
|
| 807 |
+
if http_client is not None:
|
| 808 |
+
raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`")
|
| 809 |
+
else:
|
| 810 |
+
limits = DEFAULT_CONNECTION_LIMITS
|
| 811 |
+
|
| 812 |
+
if transport is not None:
|
| 813 |
+
kwargs["transport"] = transport
|
| 814 |
+
warnings.warn(
|
| 815 |
+
"The `transport` argument is deprecated. The `http_client` argument should be passed instead",
|
| 816 |
+
category=DeprecationWarning,
|
| 817 |
+
stacklevel=3,
|
| 818 |
+
)
|
| 819 |
+
if http_client is not None:
|
| 820 |
+
raise ValueError("The `http_client` argument is mutually exclusive with `transport`")
|
| 821 |
+
|
| 822 |
+
if proxies is not None:
|
| 823 |
+
kwargs["proxies"] = proxies
|
| 824 |
+
warnings.warn(
|
| 825 |
+
"The `proxies` argument is deprecated. The `http_client` argument should be passed instead",
|
| 826 |
+
category=DeprecationWarning,
|
| 827 |
+
stacklevel=3,
|
| 828 |
+
)
|
| 829 |
+
if http_client is not None:
|
| 830 |
+
raise ValueError("The `http_client` argument is mutually exclusive with `proxies`")
|
| 831 |
+
|
| 832 |
+
if not is_given(timeout):
|
| 833 |
+
# if the user passed in a custom http client with a non-default
|
| 834 |
+
# timeout set then we use that timeout.
|
| 835 |
+
#
|
| 836 |
+
# note: there is an edge case here where the user passes in a client
|
| 837 |
+
# where they've explicitly set the timeout to match the default timeout
|
| 838 |
+
# as this check is structural, meaning that we'll think they didn't
|
| 839 |
+
# pass in a timeout and will ignore it
|
| 840 |
+
if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT:
|
| 841 |
+
timeout = http_client.timeout
|
| 842 |
+
else:
|
| 843 |
+
timeout = DEFAULT_TIMEOUT
|
| 844 |
+
|
| 845 |
+
if http_client is not None and not isinstance(http_client, httpx.Client): # pyright: ignore[reportUnnecessaryIsInstance]
|
| 846 |
+
raise TypeError(
|
| 847 |
+
f"Invalid `http_client` argument; Expected an instance of `httpx.Client` but got {type(http_client)}"
|
| 848 |
+
)
|
| 849 |
+
|
| 850 |
+
super().__init__(
|
| 851 |
+
version=version,
|
| 852 |
+
limits=limits,
|
| 853 |
+
# cast to a valid type because mypy doesn't understand our type narrowing
|
| 854 |
+
timeout=cast(Timeout, timeout),
|
| 855 |
+
proxies=proxies,
|
| 856 |
+
base_url=base_url,
|
| 857 |
+
transport=transport,
|
| 858 |
+
max_retries=max_retries,
|
| 859 |
+
custom_query=custom_query,
|
| 860 |
+
custom_headers=custom_headers,
|
| 861 |
+
_strict_response_validation=_strict_response_validation,
|
| 862 |
+
)
|
| 863 |
+
self._client = http_client or SyncHttpxClientWrapper(
|
| 864 |
+
base_url=base_url,
|
| 865 |
+
# cast to a valid type because mypy doesn't understand our type narrowing
|
| 866 |
+
timeout=cast(Timeout, timeout),
|
| 867 |
+
limits=limits,
|
| 868 |
+
follow_redirects=True,
|
| 869 |
+
**kwargs, # type: ignore
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
def is_closed(self) -> bool:
|
| 873 |
+
return self._client.is_closed
|
| 874 |
+
|
| 875 |
+
def close(self) -> None:
|
| 876 |
+
"""Close the underlying HTTPX client.
|
| 877 |
+
|
| 878 |
+
The client will *not* be usable after this.
|
| 879 |
+
"""
|
| 880 |
+
# If an error is thrown while constructing a client, self._client
|
| 881 |
+
# may not be present
|
| 882 |
+
if hasattr(self, "_client"):
|
| 883 |
+
self._client.close()
|
| 884 |
+
|
| 885 |
+
def __enter__(self: _T) -> _T:
|
| 886 |
+
return self
|
| 887 |
+
|
| 888 |
+
def __exit__(
|
| 889 |
+
self,
|
| 890 |
+
exc_type: type[BaseException] | None,
|
| 891 |
+
exc: BaseException | None,
|
| 892 |
+
exc_tb: TracebackType | None,
|
| 893 |
+
) -> None:
|
| 894 |
+
self.close()
|
| 895 |
+
|
| 896 |
+
def _prepare_options(
|
| 897 |
+
self,
|
| 898 |
+
options: FinalRequestOptions, # noqa: ARG002
|
| 899 |
+
) -> FinalRequestOptions:
|
| 900 |
+
"""Hook for mutating the given options"""
|
| 901 |
+
return options
|
| 902 |
+
|
| 903 |
+
def _prepare_request(
|
| 904 |
+
self,
|
| 905 |
+
request: httpx.Request, # noqa: ARG002
|
| 906 |
+
) -> None:
|
| 907 |
+
"""This method is used as a callback for mutating the `Request` object
|
| 908 |
+
after it has been constructed.
|
| 909 |
+
This is useful for cases where you want to add certain headers based off of
|
| 910 |
+
the request properties, e.g. `url`, `method` etc.
|
| 911 |
+
"""
|
| 912 |
+
return None
|
| 913 |
+
|
| 914 |
+
@overload
|
| 915 |
+
def request(
|
| 916 |
+
self,
|
| 917 |
+
cast_to: Type[ResponseT],
|
| 918 |
+
options: FinalRequestOptions,
|
| 919 |
+
remaining_retries: Optional[int] = None,
|
| 920 |
+
*,
|
| 921 |
+
stream: Literal[True],
|
| 922 |
+
stream_cls: Type[_StreamT],
|
| 923 |
+
) -> _StreamT: ...
|
| 924 |
+
|
| 925 |
+
@overload
|
| 926 |
+
def request(
|
| 927 |
+
self,
|
| 928 |
+
cast_to: Type[ResponseT],
|
| 929 |
+
options: FinalRequestOptions,
|
| 930 |
+
remaining_retries: Optional[int] = None,
|
| 931 |
+
*,
|
| 932 |
+
stream: Literal[False] = False,
|
| 933 |
+
) -> ResponseT: ...
|
| 934 |
+
|
| 935 |
+
@overload
|
| 936 |
+
def request(
|
| 937 |
+
self,
|
| 938 |
+
cast_to: Type[ResponseT],
|
| 939 |
+
options: FinalRequestOptions,
|
| 940 |
+
remaining_retries: Optional[int] = None,
|
| 941 |
+
*,
|
| 942 |
+
stream: bool = False,
|
| 943 |
+
stream_cls: Type[_StreamT] | None = None,
|
| 944 |
+
) -> ResponseT | _StreamT: ...
|
| 945 |
+
|
| 946 |
+
def request(
|
| 947 |
+
self,
|
| 948 |
+
cast_to: Type[ResponseT],
|
| 949 |
+
options: FinalRequestOptions,
|
| 950 |
+
remaining_retries: Optional[int] = None,
|
| 951 |
+
*,
|
| 952 |
+
stream: bool = False,
|
| 953 |
+
stream_cls: type[_StreamT] | None = None,
|
| 954 |
+
) -> ResponseT | _StreamT:
|
| 955 |
+
if remaining_retries is not None:
|
| 956 |
+
retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
|
| 957 |
+
else:
|
| 958 |
+
retries_taken = 0
|
| 959 |
+
|
| 960 |
+
return self._request(
|
| 961 |
+
cast_to=cast_to,
|
| 962 |
+
options=options,
|
| 963 |
+
stream=stream,
|
| 964 |
+
stream_cls=stream_cls,
|
| 965 |
+
retries_taken=retries_taken,
|
| 966 |
+
)
|
| 967 |
+
|
| 968 |
+
def _request(
|
| 969 |
+
self,
|
| 970 |
+
*,
|
| 971 |
+
cast_to: Type[ResponseT],
|
| 972 |
+
options: FinalRequestOptions,
|
| 973 |
+
retries_taken: int,
|
| 974 |
+
stream: bool,
|
| 975 |
+
stream_cls: type[_StreamT] | None,
|
| 976 |
+
) -> ResponseT | _StreamT:
|
| 977 |
+
# create a copy of the options we were given so that if the
|
| 978 |
+
# options are mutated later & we then retry, the retries are
|
| 979 |
+
# given the original options
|
| 980 |
+
input_options = model_copy(options)
|
| 981 |
+
|
| 982 |
+
cast_to = self._maybe_override_cast_to(cast_to, options)
|
| 983 |
+
options = self._prepare_options(options)
|
| 984 |
+
|
| 985 |
+
remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
|
| 986 |
+
request = self._build_request(options, retries_taken=retries_taken)
|
| 987 |
+
self._prepare_request(request)
|
| 988 |
+
|
| 989 |
+
kwargs: HttpxSendArgs = {}
|
| 990 |
+
if self.custom_auth is not None:
|
| 991 |
+
kwargs["auth"] = self.custom_auth
|
| 992 |
+
|
| 993 |
+
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
|
| 994 |
+
|
| 995 |
+
try:
|
| 996 |
+
response = self._client.send(
|
| 997 |
+
request,
|
| 998 |
+
stream=stream or self._should_stream_response_body(request=request),
|
| 999 |
+
**kwargs,
|
| 1000 |
+
)
|
| 1001 |
+
except httpx.TimeoutException as err:
|
| 1002 |
+
log.debug("Encountered httpx.TimeoutException", exc_info=True)
|
| 1003 |
+
|
| 1004 |
+
if remaining_retries > 0:
|
| 1005 |
+
return self._retry_request(
|
| 1006 |
+
input_options,
|
| 1007 |
+
cast_to,
|
| 1008 |
+
retries_taken=retries_taken,
|
| 1009 |
+
stream=stream,
|
| 1010 |
+
stream_cls=stream_cls,
|
| 1011 |
+
response_headers=None,
|
| 1012 |
+
)
|
| 1013 |
+
|
| 1014 |
+
log.debug("Raising timeout error")
|
| 1015 |
+
raise APITimeoutError(request=request) from err
|
| 1016 |
+
except Exception as err:
|
| 1017 |
+
log.debug("Encountered Exception", exc_info=True)
|
| 1018 |
+
|
| 1019 |
+
if remaining_retries > 0:
|
| 1020 |
+
return self._retry_request(
|
| 1021 |
+
input_options,
|
| 1022 |
+
cast_to,
|
| 1023 |
+
retries_taken=retries_taken,
|
| 1024 |
+
stream=stream,
|
| 1025 |
+
stream_cls=stream_cls,
|
| 1026 |
+
response_headers=None,
|
| 1027 |
+
)
|
| 1028 |
+
|
| 1029 |
+
log.debug("Raising connection error")
|
| 1030 |
+
raise APIConnectionError(request=request) from err
|
| 1031 |
+
|
| 1032 |
+
log.debug(
|
| 1033 |
+
'HTTP Response: %s %s "%i %s" %s',
|
| 1034 |
+
request.method,
|
| 1035 |
+
request.url,
|
| 1036 |
+
response.status_code,
|
| 1037 |
+
response.reason_phrase,
|
| 1038 |
+
response.headers,
|
| 1039 |
+
)
|
| 1040 |
+
log.debug("request_id: %s", response.headers.get("x-request-id"))
|
| 1041 |
+
|
| 1042 |
+
try:
|
| 1043 |
+
response.raise_for_status()
|
| 1044 |
+
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
|
| 1045 |
+
log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
|
| 1046 |
+
|
| 1047 |
+
if remaining_retries > 0 and self._should_retry(err.response):
|
| 1048 |
+
err.response.close()
|
| 1049 |
+
return self._retry_request(
|
| 1050 |
+
input_options,
|
| 1051 |
+
cast_to,
|
| 1052 |
+
retries_taken=retries_taken,
|
| 1053 |
+
response_headers=err.response.headers,
|
| 1054 |
+
stream=stream,
|
| 1055 |
+
stream_cls=stream_cls,
|
| 1056 |
+
)
|
| 1057 |
+
|
| 1058 |
+
# If the response is streamed then we need to explicitly read the response
|
| 1059 |
+
# to completion before attempting to access the response text.
|
| 1060 |
+
if not err.response.is_closed:
|
| 1061 |
+
err.response.read()
|
| 1062 |
+
|
| 1063 |
+
log.debug("Re-raising status error")
|
| 1064 |
+
raise self._make_status_error_from_response(err.response) from None
|
| 1065 |
+
|
| 1066 |
+
return self._process_response(
|
| 1067 |
+
cast_to=cast_to,
|
| 1068 |
+
options=options,
|
| 1069 |
+
response=response,
|
| 1070 |
+
stream=stream,
|
| 1071 |
+
stream_cls=stream_cls,
|
| 1072 |
+
retries_taken=retries_taken,
|
| 1073 |
+
)
|
| 1074 |
+
|
| 1075 |
+
def _retry_request(
|
| 1076 |
+
self,
|
| 1077 |
+
options: FinalRequestOptions,
|
| 1078 |
+
cast_to: Type[ResponseT],
|
| 1079 |
+
*,
|
| 1080 |
+
retries_taken: int,
|
| 1081 |
+
response_headers: httpx.Headers | None,
|
| 1082 |
+
stream: bool,
|
| 1083 |
+
stream_cls: type[_StreamT] | None,
|
| 1084 |
+
) -> ResponseT | _StreamT:
|
| 1085 |
+
remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
|
| 1086 |
+
if remaining_retries == 1:
|
| 1087 |
+
log.debug("1 retry left")
|
| 1088 |
+
else:
|
| 1089 |
+
log.debug("%i retries left", remaining_retries)
|
| 1090 |
+
|
| 1091 |
+
timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
|
| 1092 |
+
log.info("Retrying request to %s in %f seconds", options.url, timeout)
|
| 1093 |
+
|
| 1094 |
+
# In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
|
| 1095 |
+
# different thread if necessary.
|
| 1096 |
+
time.sleep(timeout)
|
| 1097 |
+
|
| 1098 |
+
return self._request(
|
| 1099 |
+
options=options,
|
| 1100 |
+
cast_to=cast_to,
|
| 1101 |
+
retries_taken=retries_taken + 1,
|
| 1102 |
+
stream=stream,
|
| 1103 |
+
stream_cls=stream_cls,
|
| 1104 |
+
)
|
| 1105 |
+
|
| 1106 |
+
def _process_response(
|
| 1107 |
+
self,
|
| 1108 |
+
*,
|
| 1109 |
+
cast_to: Type[ResponseT],
|
| 1110 |
+
options: FinalRequestOptions,
|
| 1111 |
+
response: httpx.Response,
|
| 1112 |
+
stream: bool,
|
| 1113 |
+
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
| 1114 |
+
retries_taken: int = 0,
|
| 1115 |
+
) -> ResponseT:
|
| 1116 |
+
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
|
| 1117 |
+
return cast(
|
| 1118 |
+
ResponseT,
|
| 1119 |
+
LegacyAPIResponse(
|
| 1120 |
+
raw=response,
|
| 1121 |
+
client=self,
|
| 1122 |
+
cast_to=cast_to,
|
| 1123 |
+
stream=stream,
|
| 1124 |
+
stream_cls=stream_cls,
|
| 1125 |
+
options=options,
|
| 1126 |
+
retries_taken=retries_taken,
|
| 1127 |
+
),
|
| 1128 |
+
)
|
| 1129 |
+
|
| 1130 |
+
origin = get_origin(cast_to) or cast_to
|
| 1131 |
+
|
| 1132 |
+
if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
|
| 1133 |
+
if not issubclass(origin, APIResponse):
|
| 1134 |
+
raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")
|
| 1135 |
+
|
| 1136 |
+
response_cls = cast("type[BaseAPIResponse[Any]]", cast_to)
|
| 1137 |
+
return cast(
|
| 1138 |
+
ResponseT,
|
| 1139 |
+
response_cls(
|
| 1140 |
+
raw=response,
|
| 1141 |
+
client=self,
|
| 1142 |
+
cast_to=extract_response_type(response_cls),
|
| 1143 |
+
stream=stream,
|
| 1144 |
+
stream_cls=stream_cls,
|
| 1145 |
+
options=options,
|
| 1146 |
+
retries_taken=retries_taken,
|
| 1147 |
+
),
|
| 1148 |
+
)
|
| 1149 |
+
|
| 1150 |
+
if cast_to == httpx.Response:
|
| 1151 |
+
return cast(ResponseT, response)
|
| 1152 |
+
|
| 1153 |
+
api_response = APIResponse(
|
| 1154 |
+
raw=response,
|
| 1155 |
+
client=self,
|
| 1156 |
+
cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast]
|
| 1157 |
+
stream=stream,
|
| 1158 |
+
stream_cls=stream_cls,
|
| 1159 |
+
options=options,
|
| 1160 |
+
retries_taken=retries_taken,
|
| 1161 |
+
)
|
| 1162 |
+
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
|
| 1163 |
+
return cast(ResponseT, api_response)
|
| 1164 |
+
|
| 1165 |
+
return api_response.parse()
|
| 1166 |
+
|
| 1167 |
+
def _request_api_list(
|
| 1168 |
+
self,
|
| 1169 |
+
model: Type[object],
|
| 1170 |
+
page: Type[SyncPageT],
|
| 1171 |
+
options: FinalRequestOptions,
|
| 1172 |
+
) -> SyncPageT:
|
| 1173 |
+
def _parser(resp: SyncPageT) -> SyncPageT:
|
| 1174 |
+
resp._set_private_attributes(
|
| 1175 |
+
client=self,
|
| 1176 |
+
model=model,
|
| 1177 |
+
options=options,
|
| 1178 |
+
)
|
| 1179 |
+
return resp
|
| 1180 |
+
|
| 1181 |
+
options.post_parser = _parser
|
| 1182 |
+
|
| 1183 |
+
return self.request(page, options, stream=False)
|
| 1184 |
+
|
| 1185 |
+
@overload
|
| 1186 |
+
def get(
|
| 1187 |
+
self,
|
| 1188 |
+
path: str,
|
| 1189 |
+
*,
|
| 1190 |
+
cast_to: Type[ResponseT],
|
| 1191 |
+
options: RequestOptions = {},
|
| 1192 |
+
stream: Literal[False] = False,
|
| 1193 |
+
) -> ResponseT: ...
|
| 1194 |
+
|
| 1195 |
+
@overload
|
| 1196 |
+
def get(
|
| 1197 |
+
self,
|
| 1198 |
+
path: str,
|
| 1199 |
+
*,
|
| 1200 |
+
cast_to: Type[ResponseT],
|
| 1201 |
+
options: RequestOptions = {},
|
| 1202 |
+
stream: Literal[True],
|
| 1203 |
+
stream_cls: type[_StreamT],
|
| 1204 |
+
) -> _StreamT: ...
|
| 1205 |
+
|
| 1206 |
+
@overload
|
| 1207 |
+
def get(
|
| 1208 |
+
self,
|
| 1209 |
+
path: str,
|
| 1210 |
+
*,
|
| 1211 |
+
cast_to: Type[ResponseT],
|
| 1212 |
+
options: RequestOptions = {},
|
| 1213 |
+
stream: bool,
|
| 1214 |
+
stream_cls: type[_StreamT] | None = None,
|
| 1215 |
+
) -> ResponseT | _StreamT: ...
|
| 1216 |
+
|
| 1217 |
+
def get(
|
| 1218 |
+
self,
|
| 1219 |
+
path: str,
|
| 1220 |
+
*,
|
| 1221 |
+
cast_to: Type[ResponseT],
|
| 1222 |
+
options: RequestOptions = {},
|
| 1223 |
+
stream: bool = False,
|
| 1224 |
+
stream_cls: type[_StreamT] | None = None,
|
| 1225 |
+
) -> ResponseT | _StreamT:
|
| 1226 |
+
opts = FinalRequestOptions.construct(method="get", url=path, **options)
|
| 1227 |
+
# cast is required because mypy complains about returning Any even though
|
| 1228 |
+
# it understands the type variables
|
| 1229 |
+
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
|
| 1230 |
+
|
| 1231 |
+
@overload
|
| 1232 |
+
def post(
|
| 1233 |
+
self,
|
| 1234 |
+
path: str,
|
| 1235 |
+
*,
|
| 1236 |
+
cast_to: Type[ResponseT],
|
| 1237 |
+
body: Body | None = None,
|
| 1238 |
+
options: RequestOptions = {},
|
| 1239 |
+
files: RequestFiles | None = None,
|
| 1240 |
+
stream: Literal[False] = False,
|
| 1241 |
+
) -> ResponseT: ...
|
| 1242 |
+
|
| 1243 |
+
@overload
|
| 1244 |
+
def post(
|
| 1245 |
+
self,
|
| 1246 |
+
path: str,
|
| 1247 |
+
*,
|
| 1248 |
+
cast_to: Type[ResponseT],
|
| 1249 |
+
body: Body | None = None,
|
| 1250 |
+
options: RequestOptions = {},
|
| 1251 |
+
files: RequestFiles | None = None,
|
| 1252 |
+
stream: Literal[True],
|
| 1253 |
+
stream_cls: type[_StreamT],
|
| 1254 |
+
) -> _StreamT: ...
|
| 1255 |
+
|
| 1256 |
+
@overload
|
| 1257 |
+
def post(
|
| 1258 |
+
self,
|
| 1259 |
+
path: str,
|
| 1260 |
+
*,
|
| 1261 |
+
cast_to: Type[ResponseT],
|
| 1262 |
+
body: Body | None = None,
|
| 1263 |
+
options: RequestOptions = {},
|
| 1264 |
+
files: RequestFiles | None = None,
|
| 1265 |
+
stream: bool,
|
| 1266 |
+
stream_cls: type[_StreamT] | None = None,
|
| 1267 |
+
) -> ResponseT | _StreamT: ...
|
| 1268 |
+
|
| 1269 |
+
def post(
|
| 1270 |
+
self,
|
| 1271 |
+
path: str,
|
| 1272 |
+
*,
|
| 1273 |
+
cast_to: Type[ResponseT],
|
| 1274 |
+
body: Body | None = None,
|
| 1275 |
+
options: RequestOptions = {},
|
| 1276 |
+
files: RequestFiles | None = None,
|
| 1277 |
+
stream: bool = False,
|
| 1278 |
+
stream_cls: type[_StreamT] | None = None,
|
| 1279 |
+
) -> ResponseT | _StreamT:
|
| 1280 |
+
opts = FinalRequestOptions.construct(
|
| 1281 |
+
method="post", url=path, json_data=body, files=to_httpx_files(files), **options
|
| 1282 |
+
)
|
| 1283 |
+
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
|
| 1284 |
+
|
| 1285 |
+
def patch(
|
| 1286 |
+
self,
|
| 1287 |
+
path: str,
|
| 1288 |
+
*,
|
| 1289 |
+
cast_to: Type[ResponseT],
|
| 1290 |
+
body: Body | None = None,
|
| 1291 |
+
options: RequestOptions = {},
|
| 1292 |
+
) -> ResponseT:
|
| 1293 |
+
opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
|
| 1294 |
+
return self.request(cast_to, opts)
|
| 1295 |
+
|
| 1296 |
+
def put(
|
| 1297 |
+
self,
|
| 1298 |
+
path: str,
|
| 1299 |
+
*,
|
| 1300 |
+
cast_to: Type[ResponseT],
|
| 1301 |
+
body: Body | None = None,
|
| 1302 |
+
files: RequestFiles | None = None,
|
| 1303 |
+
options: RequestOptions = {},
|
| 1304 |
+
) -> ResponseT:
|
| 1305 |
+
opts = FinalRequestOptions.construct(
|
| 1306 |
+
method="put", url=path, json_data=body, files=to_httpx_files(files), **options
|
| 1307 |
+
)
|
| 1308 |
+
return self.request(cast_to, opts)
|
| 1309 |
+
|
| 1310 |
+
def delete(
|
| 1311 |
+
self,
|
| 1312 |
+
path: str,
|
| 1313 |
+
*,
|
| 1314 |
+
cast_to: Type[ResponseT],
|
| 1315 |
+
body: Body | None = None,
|
| 1316 |
+
options: RequestOptions = {},
|
| 1317 |
+
) -> ResponseT:
|
| 1318 |
+
opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
|
| 1319 |
+
return self.request(cast_to, opts)
|
| 1320 |
+
|
| 1321 |
+
def get_api_list(
|
| 1322 |
+
self,
|
| 1323 |
+
path: str,
|
| 1324 |
+
*,
|
| 1325 |
+
model: Type[object],
|
| 1326 |
+
page: Type[SyncPageT],
|
| 1327 |
+
body: Body | None = None,
|
| 1328 |
+
options: RequestOptions = {},
|
| 1329 |
+
method: str = "get",
|
| 1330 |
+
) -> SyncPageT:
|
| 1331 |
+
opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options)
|
| 1332 |
+
return self._request_api_list(model, page, opts)
|
| 1333 |
+
|
| 1334 |
+
|
| 1335 |
+
class _DefaultAsyncHttpxClient(httpx.AsyncClient):
|
| 1336 |
+
def __init__(self, **kwargs: Any) -> None:
|
| 1337 |
+
kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
|
| 1338 |
+
kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
|
| 1339 |
+
kwargs.setdefault("follow_redirects", True)
|
| 1340 |
+
super().__init__(**kwargs)
|
| 1341 |
+
|
| 1342 |
+
|
| 1343 |
+
if TYPE_CHECKING:
|
| 1344 |
+
DefaultAsyncHttpxClient = httpx.AsyncClient
|
| 1345 |
+
"""An alias to `httpx.AsyncClient` that provides the same defaults that this SDK
|
| 1346 |
+
uses internally.
|
| 1347 |
+
|
| 1348 |
+
This is useful because overriding the `http_client` with your own instance of
|
| 1349 |
+
`httpx.AsyncClient` will result in httpx's defaults being used, not ours.
|
| 1350 |
+
"""
|
| 1351 |
+
else:
|
| 1352 |
+
DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient
|
| 1353 |
+
|
| 1354 |
+
|
| 1355 |
+
class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient):
|
| 1356 |
+
def __del__(self) -> None:
|
| 1357 |
+
if self.is_closed:
|
| 1358 |
+
return
|
| 1359 |
+
|
| 1360 |
+
try:
|
| 1361 |
+
# TODO(someday): support non asyncio runtimes here
|
| 1362 |
+
asyncio.get_running_loop().create_task(self.aclose())
|
| 1363 |
+
except Exception:
|
| 1364 |
+
pass
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
|
| 1368 |
+
_client: httpx.AsyncClient
|
| 1369 |
+
_default_stream_cls: type[AsyncStream[Any]] | None = None
|
| 1370 |
+
|
| 1371 |
+
def __init__(
|
| 1372 |
+
self,
|
| 1373 |
+
*,
|
| 1374 |
+
version: str,
|
| 1375 |
+
base_url: str | URL,
|
| 1376 |
+
_strict_response_validation: bool,
|
| 1377 |
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
| 1378 |
+
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
|
| 1379 |
+
transport: AsyncTransport | None = None,
|
| 1380 |
+
proxies: ProxiesTypes | None = None,
|
| 1381 |
+
limits: Limits | None = None,
|
| 1382 |
+
http_client: httpx.AsyncClient | None = None,
|
| 1383 |
+
custom_headers: Mapping[str, str] | None = None,
|
| 1384 |
+
custom_query: Mapping[str, object] | None = None,
|
| 1385 |
+
) -> None:
|
| 1386 |
+
kwargs: dict[str, Any] = {}
|
| 1387 |
+
if limits is not None:
|
| 1388 |
+
warnings.warn(
|
| 1389 |
+
"The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead",
|
| 1390 |
+
category=DeprecationWarning,
|
| 1391 |
+
stacklevel=3,
|
| 1392 |
+
)
|
| 1393 |
+
if http_client is not None:
|
| 1394 |
+
raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`")
|
| 1395 |
+
else:
|
| 1396 |
+
limits = DEFAULT_CONNECTION_LIMITS
|
| 1397 |
+
|
| 1398 |
+
if transport is not None:
|
| 1399 |
+
kwargs["transport"] = transport
|
| 1400 |
+
warnings.warn(
|
| 1401 |
+
"The `transport` argument is deprecated. The `http_client` argument should be passed instead",
|
| 1402 |
+
category=DeprecationWarning,
|
| 1403 |
+
stacklevel=3,
|
| 1404 |
+
)
|
| 1405 |
+
if http_client is not None:
|
| 1406 |
+
raise ValueError("The `http_client` argument is mutually exclusive with `transport`")
|
| 1407 |
+
|
| 1408 |
+
if proxies is not None:
|
| 1409 |
+
kwargs["proxies"] = proxies
|
| 1410 |
+
warnings.warn(
|
| 1411 |
+
"The `proxies` argument is deprecated. The `http_client` argument should be passed instead",
|
| 1412 |
+
category=DeprecationWarning,
|
| 1413 |
+
stacklevel=3,
|
| 1414 |
+
)
|
| 1415 |
+
if http_client is not None:
|
| 1416 |
+
raise ValueError("The `http_client` argument is mutually exclusive with `proxies`")
|
| 1417 |
+
|
| 1418 |
+
if not is_given(timeout):
|
| 1419 |
+
# if the user passed in a custom http client with a non-default
|
| 1420 |
+
# timeout set then we use that timeout.
|
| 1421 |
+
#
|
| 1422 |
+
# note: there is an edge case here where the user passes in a client
|
| 1423 |
+
# where they've explicitly set the timeout to match the default timeout
|
| 1424 |
+
# as this check is structural, meaning that we'll think they didn't
|
| 1425 |
+
# pass in a timeout and will ignore it
|
| 1426 |
+
if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT:
|
| 1427 |
+
timeout = http_client.timeout
|
| 1428 |
+
else:
|
| 1429 |
+
timeout = DEFAULT_TIMEOUT
|
| 1430 |
+
|
| 1431 |
+
if http_client is not None and not isinstance(http_client, httpx.AsyncClient): # pyright: ignore[reportUnnecessaryIsInstance]
|
| 1432 |
+
raise TypeError(
|
| 1433 |
+
f"Invalid `http_client` argument; Expected an instance of `httpx.AsyncClient` but got {type(http_client)}"
|
| 1434 |
+
)
|
| 1435 |
+
|
| 1436 |
+
super().__init__(
|
| 1437 |
+
version=version,
|
| 1438 |
+
base_url=base_url,
|
| 1439 |
+
limits=limits,
|
| 1440 |
+
# cast to a valid type because mypy doesn't understand our type narrowing
|
| 1441 |
+
timeout=cast(Timeout, timeout),
|
| 1442 |
+
proxies=proxies,
|
| 1443 |
+
transport=transport,
|
| 1444 |
+
max_retries=max_retries,
|
| 1445 |
+
custom_query=custom_query,
|
| 1446 |
+
custom_headers=custom_headers,
|
| 1447 |
+
_strict_response_validation=_strict_response_validation,
|
| 1448 |
+
)
|
| 1449 |
+
self._client = http_client or AsyncHttpxClientWrapper(
|
| 1450 |
+
base_url=base_url,
|
| 1451 |
+
# cast to a valid type because mypy doesn't understand our type narrowing
|
| 1452 |
+
timeout=cast(Timeout, timeout),
|
| 1453 |
+
limits=limits,
|
| 1454 |
+
follow_redirects=True,
|
| 1455 |
+
**kwargs, # type: ignore
|
| 1456 |
+
)
|
| 1457 |
+
|
| 1458 |
+
def is_closed(self) -> bool:
|
| 1459 |
+
return self._client.is_closed
|
| 1460 |
+
|
| 1461 |
+
async def close(self) -> None:
|
| 1462 |
+
"""Close the underlying HTTPX client.
|
| 1463 |
+
|
| 1464 |
+
The client will *not* be usable after this.
|
| 1465 |
+
"""
|
| 1466 |
+
await self._client.aclose()
|
| 1467 |
+
|
| 1468 |
+
async def __aenter__(self: _T) -> _T:
|
| 1469 |
+
return self
|
| 1470 |
+
|
| 1471 |
+
async def __aexit__(
|
| 1472 |
+
self,
|
| 1473 |
+
exc_type: type[BaseException] | None,
|
| 1474 |
+
exc: BaseException | None,
|
| 1475 |
+
exc_tb: TracebackType | None,
|
| 1476 |
+
) -> None:
|
| 1477 |
+
await self.close()
|
| 1478 |
+
|
| 1479 |
+
async def _prepare_options(
|
| 1480 |
+
self,
|
| 1481 |
+
options: FinalRequestOptions, # noqa: ARG002
|
| 1482 |
+
) -> FinalRequestOptions:
|
| 1483 |
+
"""Hook for mutating the given options"""
|
| 1484 |
+
return options
|
| 1485 |
+
|
| 1486 |
+
async def _prepare_request(
|
| 1487 |
+
self,
|
| 1488 |
+
request: httpx.Request, # noqa: ARG002
|
| 1489 |
+
) -> None:
|
| 1490 |
+
"""This method is used as a callback for mutating the `Request` object
|
| 1491 |
+
after it has been constructed.
|
| 1492 |
+
This is useful for cases where you want to add certain headers based off of
|
| 1493 |
+
the request properties, e.g. `url`, `method` etc.
|
| 1494 |
+
"""
|
| 1495 |
+
return None
|
| 1496 |
+
|
| 1497 |
+
@overload
|
| 1498 |
+
async def request(
|
| 1499 |
+
self,
|
| 1500 |
+
cast_to: Type[ResponseT],
|
| 1501 |
+
options: FinalRequestOptions,
|
| 1502 |
+
*,
|
| 1503 |
+
stream: Literal[False] = False,
|
| 1504 |
+
remaining_retries: Optional[int] = None,
|
| 1505 |
+
) -> ResponseT: ...
|
| 1506 |
+
|
| 1507 |
+
@overload
|
| 1508 |
+
async def request(
|
| 1509 |
+
self,
|
| 1510 |
+
cast_to: Type[ResponseT],
|
| 1511 |
+
options: FinalRequestOptions,
|
| 1512 |
+
*,
|
| 1513 |
+
stream: Literal[True],
|
| 1514 |
+
stream_cls: type[_AsyncStreamT],
|
| 1515 |
+
remaining_retries: Optional[int] = None,
|
| 1516 |
+
) -> _AsyncStreamT: ...
|
| 1517 |
+
|
| 1518 |
+
@overload
|
| 1519 |
+
async def request(
|
| 1520 |
+
self,
|
| 1521 |
+
cast_to: Type[ResponseT],
|
| 1522 |
+
options: FinalRequestOptions,
|
| 1523 |
+
*,
|
| 1524 |
+
stream: bool,
|
| 1525 |
+
stream_cls: type[_AsyncStreamT] | None = None,
|
| 1526 |
+
remaining_retries: Optional[int] = None,
|
| 1527 |
+
) -> ResponseT | _AsyncStreamT: ...
|
| 1528 |
+
|
| 1529 |
+
async def request(
|
| 1530 |
+
self,
|
| 1531 |
+
cast_to: Type[ResponseT],
|
| 1532 |
+
options: FinalRequestOptions,
|
| 1533 |
+
*,
|
| 1534 |
+
stream: bool = False,
|
| 1535 |
+
stream_cls: type[_AsyncStreamT] | None = None,
|
| 1536 |
+
remaining_retries: Optional[int] = None,
|
| 1537 |
+
) -> ResponseT | _AsyncStreamT:
|
| 1538 |
+
if remaining_retries is not None:
|
| 1539 |
+
retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
|
| 1540 |
+
else:
|
| 1541 |
+
retries_taken = 0
|
| 1542 |
+
|
| 1543 |
+
return await self._request(
|
| 1544 |
+
cast_to=cast_to,
|
| 1545 |
+
options=options,
|
| 1546 |
+
stream=stream,
|
| 1547 |
+
stream_cls=stream_cls,
|
| 1548 |
+
retries_taken=retries_taken,
|
| 1549 |
+
)
|
| 1550 |
+
|
| 1551 |
+
async def _request(
|
| 1552 |
+
self,
|
| 1553 |
+
cast_to: Type[ResponseT],
|
| 1554 |
+
options: FinalRequestOptions,
|
| 1555 |
+
*,
|
| 1556 |
+
stream: bool,
|
| 1557 |
+
stream_cls: type[_AsyncStreamT] | None,
|
| 1558 |
+
retries_taken: int,
|
| 1559 |
+
) -> ResponseT | _AsyncStreamT:
|
| 1560 |
+
if self._platform is None:
|
| 1561 |
+
# `get_platform` can make blocking IO calls so we
|
| 1562 |
+
# execute it earlier while we are in an async context
|
| 1563 |
+
self._platform = await asyncify(get_platform)()
|
| 1564 |
+
|
| 1565 |
+
# create a copy of the options we were given so that if the
|
| 1566 |
+
# options are mutated later & we then retry, the retries are
|
| 1567 |
+
# given the original options
|
| 1568 |
+
input_options = model_copy(options)
|
| 1569 |
+
|
| 1570 |
+
cast_to = self._maybe_override_cast_to(cast_to, options)
|
| 1571 |
+
options = await self._prepare_options(options)
|
| 1572 |
+
|
| 1573 |
+
remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
|
| 1574 |
+
request = self._build_request(options, retries_taken=retries_taken)
|
| 1575 |
+
await self._prepare_request(request)
|
| 1576 |
+
|
| 1577 |
+
kwargs: HttpxSendArgs = {}
|
| 1578 |
+
if self.custom_auth is not None:
|
| 1579 |
+
kwargs["auth"] = self.custom_auth
|
| 1580 |
+
|
| 1581 |
+
try:
|
| 1582 |
+
response = await self._client.send(
|
| 1583 |
+
request,
|
| 1584 |
+
stream=stream or self._should_stream_response_body(request=request),
|
| 1585 |
+
**kwargs,
|
| 1586 |
+
)
|
| 1587 |
+
except httpx.TimeoutException as err:
|
| 1588 |
+
log.debug("Encountered httpx.TimeoutException", exc_info=True)
|
| 1589 |
+
|
| 1590 |
+
if remaining_retries > 0:
|
| 1591 |
+
return await self._retry_request(
|
| 1592 |
+
input_options,
|
| 1593 |
+
cast_to,
|
| 1594 |
+
retries_taken=retries_taken,
|
| 1595 |
+
stream=stream,
|
| 1596 |
+
stream_cls=stream_cls,
|
| 1597 |
+
response_headers=None,
|
| 1598 |
+
)
|
| 1599 |
+
|
| 1600 |
+
log.debug("Raising timeout error")
|
| 1601 |
+
raise APITimeoutError(request=request) from err
|
| 1602 |
+
except Exception as err:
|
| 1603 |
+
log.debug("Encountered Exception", exc_info=True)
|
| 1604 |
+
|
| 1605 |
+
if remaining_retries > 0:
|
| 1606 |
+
return await self._retry_request(
|
| 1607 |
+
input_options,
|
| 1608 |
+
cast_to,
|
| 1609 |
+
retries_taken=retries_taken,
|
| 1610 |
+
stream=stream,
|
| 1611 |
+
stream_cls=stream_cls,
|
| 1612 |
+
response_headers=None,
|
| 1613 |
+
)
|
| 1614 |
+
|
| 1615 |
+
log.debug("Raising connection error")
|
| 1616 |
+
raise APIConnectionError(request=request) from err
|
| 1617 |
+
|
| 1618 |
+
log.debug(
|
| 1619 |
+
'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
|
| 1620 |
+
)
|
| 1621 |
+
|
| 1622 |
+
try:
|
| 1623 |
+
response.raise_for_status()
|
| 1624 |
+
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
|
| 1625 |
+
log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
|
| 1626 |
+
|
| 1627 |
+
if remaining_retries > 0 and self._should_retry(err.response):
|
| 1628 |
+
await err.response.aclose()
|
| 1629 |
+
return await self._retry_request(
|
| 1630 |
+
input_options,
|
| 1631 |
+
cast_to,
|
| 1632 |
+
retries_taken=retries_taken,
|
| 1633 |
+
response_headers=err.response.headers,
|
| 1634 |
+
stream=stream,
|
| 1635 |
+
stream_cls=stream_cls,
|
| 1636 |
+
)
|
| 1637 |
+
|
| 1638 |
+
# If the response is streamed then we need to explicitly read the response
|
| 1639 |
+
# to completion before attempting to access the response text.
|
| 1640 |
+
if not err.response.is_closed:
|
| 1641 |
+
await err.response.aread()
|
| 1642 |
+
|
| 1643 |
+
log.debug("Re-raising status error")
|
| 1644 |
+
raise self._make_status_error_from_response(err.response) from None
|
| 1645 |
+
|
| 1646 |
+
return await self._process_response(
|
| 1647 |
+
cast_to=cast_to,
|
| 1648 |
+
options=options,
|
| 1649 |
+
response=response,
|
| 1650 |
+
stream=stream,
|
| 1651 |
+
stream_cls=stream_cls,
|
| 1652 |
+
retries_taken=retries_taken,
|
| 1653 |
+
)
|
| 1654 |
+
|
| 1655 |
+
async def _retry_request(
|
| 1656 |
+
self,
|
| 1657 |
+
options: FinalRequestOptions,
|
| 1658 |
+
cast_to: Type[ResponseT],
|
| 1659 |
+
*,
|
| 1660 |
+
retries_taken: int,
|
| 1661 |
+
response_headers: httpx.Headers | None,
|
| 1662 |
+
stream: bool,
|
| 1663 |
+
stream_cls: type[_AsyncStreamT] | None,
|
| 1664 |
+
) -> ResponseT | _AsyncStreamT:
|
| 1665 |
+
remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
|
| 1666 |
+
if remaining_retries == 1:
|
| 1667 |
+
log.debug("1 retry left")
|
| 1668 |
+
else:
|
| 1669 |
+
log.debug("%i retries left", remaining_retries)
|
| 1670 |
+
|
| 1671 |
+
timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
|
| 1672 |
+
log.info("Retrying request to %s in %f seconds", options.url, timeout)
|
| 1673 |
+
|
| 1674 |
+
await anyio.sleep(timeout)
|
| 1675 |
+
|
| 1676 |
+
return await self._request(
|
| 1677 |
+
options=options,
|
| 1678 |
+
cast_to=cast_to,
|
| 1679 |
+
retries_taken=retries_taken + 1,
|
| 1680 |
+
stream=stream,
|
| 1681 |
+
stream_cls=stream_cls,
|
| 1682 |
+
)
|
| 1683 |
+
|
| 1684 |
+
async def _process_response(
|
| 1685 |
+
self,
|
| 1686 |
+
*,
|
| 1687 |
+
cast_to: Type[ResponseT],
|
| 1688 |
+
options: FinalRequestOptions,
|
| 1689 |
+
response: httpx.Response,
|
| 1690 |
+
stream: bool,
|
| 1691 |
+
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
| 1692 |
+
retries_taken: int = 0,
|
| 1693 |
+
) -> ResponseT:
|
| 1694 |
+
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
|
| 1695 |
+
return cast(
|
| 1696 |
+
ResponseT,
|
| 1697 |
+
LegacyAPIResponse(
|
| 1698 |
+
raw=response,
|
| 1699 |
+
client=self,
|
| 1700 |
+
cast_to=cast_to,
|
| 1701 |
+
stream=stream,
|
| 1702 |
+
stream_cls=stream_cls,
|
| 1703 |
+
options=options,
|
| 1704 |
+
retries_taken=retries_taken,
|
| 1705 |
+
),
|
| 1706 |
+
)
|
| 1707 |
+
|
| 1708 |
+
origin = get_origin(cast_to) or cast_to
|
| 1709 |
+
|
| 1710 |
+
if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
|
| 1711 |
+
if not issubclass(origin, AsyncAPIResponse):
|
| 1712 |
+
raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}")
|
| 1713 |
+
|
| 1714 |
+
response_cls = cast("type[BaseAPIResponse[Any]]", cast_to)
|
| 1715 |
+
return cast(
|
| 1716 |
+
"ResponseT",
|
| 1717 |
+
response_cls(
|
| 1718 |
+
raw=response,
|
| 1719 |
+
client=self,
|
| 1720 |
+
cast_to=extract_response_type(response_cls),
|
| 1721 |
+
stream=stream,
|
| 1722 |
+
stream_cls=stream_cls,
|
| 1723 |
+
options=options,
|
| 1724 |
+
retries_taken=retries_taken,
|
| 1725 |
+
),
|
| 1726 |
+
)
|
| 1727 |
+
|
| 1728 |
+
if cast_to == httpx.Response:
|
| 1729 |
+
return cast(ResponseT, response)
|
| 1730 |
+
|
| 1731 |
+
api_response = AsyncAPIResponse(
|
| 1732 |
+
raw=response,
|
| 1733 |
+
client=self,
|
| 1734 |
+
cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast]
|
| 1735 |
+
stream=stream,
|
| 1736 |
+
stream_cls=stream_cls,
|
| 1737 |
+
options=options,
|
| 1738 |
+
retries_taken=retries_taken,
|
| 1739 |
+
)
|
| 1740 |
+
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
|
| 1741 |
+
return cast(ResponseT, api_response)
|
| 1742 |
+
|
| 1743 |
+
return await api_response.parse()
|
| 1744 |
+
|
| 1745 |
+
def _request_api_list(
|
| 1746 |
+
self,
|
| 1747 |
+
model: Type[_T],
|
| 1748 |
+
page: Type[AsyncPageT],
|
| 1749 |
+
options: FinalRequestOptions,
|
| 1750 |
+
) -> AsyncPaginator[_T, AsyncPageT]:
|
| 1751 |
+
return AsyncPaginator(client=self, options=options, page_cls=page, model=model)
|
| 1752 |
+
|
| 1753 |
+
@overload
|
| 1754 |
+
async def get(
|
| 1755 |
+
self,
|
| 1756 |
+
path: str,
|
| 1757 |
+
*,
|
| 1758 |
+
cast_to: Type[ResponseT],
|
| 1759 |
+
options: RequestOptions = {},
|
| 1760 |
+
stream: Literal[False] = False,
|
| 1761 |
+
) -> ResponseT: ...
|
| 1762 |
+
|
| 1763 |
+
@overload
|
| 1764 |
+
async def get(
|
| 1765 |
+
self,
|
| 1766 |
+
path: str,
|
| 1767 |
+
*,
|
| 1768 |
+
cast_to: Type[ResponseT],
|
| 1769 |
+
options: RequestOptions = {},
|
| 1770 |
+
stream: Literal[True],
|
| 1771 |
+
stream_cls: type[_AsyncStreamT],
|
| 1772 |
+
) -> _AsyncStreamT: ...
|
| 1773 |
+
|
| 1774 |
+
@overload
|
| 1775 |
+
async def get(
|
| 1776 |
+
self,
|
| 1777 |
+
path: str,
|
| 1778 |
+
*,
|
| 1779 |
+
cast_to: Type[ResponseT],
|
| 1780 |
+
options: RequestOptions = {},
|
| 1781 |
+
stream: bool,
|
| 1782 |
+
stream_cls: type[_AsyncStreamT] | None = None,
|
| 1783 |
+
) -> ResponseT | _AsyncStreamT: ...
|
| 1784 |
+
|
| 1785 |
+
async def get(
|
| 1786 |
+
self,
|
| 1787 |
+
path: str,
|
| 1788 |
+
*,
|
| 1789 |
+
cast_to: Type[ResponseT],
|
| 1790 |
+
options: RequestOptions = {},
|
| 1791 |
+
stream: bool = False,
|
| 1792 |
+
stream_cls: type[_AsyncStreamT] | None = None,
|
| 1793 |
+
) -> ResponseT | _AsyncStreamT:
|
| 1794 |
+
opts = FinalRequestOptions.construct(method="get", url=path, **options)
|
| 1795 |
+
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
|
| 1796 |
+
|
| 1797 |
+
@overload
|
| 1798 |
+
async def post(
|
| 1799 |
+
self,
|
| 1800 |
+
path: str,
|
| 1801 |
+
*,
|
| 1802 |
+
cast_to: Type[ResponseT],
|
| 1803 |
+
body: Body | None = None,
|
| 1804 |
+
files: RequestFiles | None = None,
|
| 1805 |
+
options: RequestOptions = {},
|
| 1806 |
+
stream: Literal[False] = False,
|
| 1807 |
+
) -> ResponseT: ...
|
| 1808 |
+
|
| 1809 |
+
@overload
|
| 1810 |
+
async def post(
|
| 1811 |
+
self,
|
| 1812 |
+
path: str,
|
| 1813 |
+
*,
|
| 1814 |
+
cast_to: Type[ResponseT],
|
| 1815 |
+
body: Body | None = None,
|
| 1816 |
+
files: RequestFiles | None = None,
|
| 1817 |
+
options: RequestOptions = {},
|
| 1818 |
+
stream: Literal[True],
|
| 1819 |
+
stream_cls: type[_AsyncStreamT],
|
| 1820 |
+
) -> _AsyncStreamT: ...
|
| 1821 |
+
|
| 1822 |
+
@overload
|
| 1823 |
+
async def post(
|
| 1824 |
+
self,
|
| 1825 |
+
path: str,
|
| 1826 |
+
*,
|
| 1827 |
+
cast_to: Type[ResponseT],
|
| 1828 |
+
body: Body | None = None,
|
| 1829 |
+
files: RequestFiles | None = None,
|
| 1830 |
+
options: RequestOptions = {},
|
| 1831 |
+
stream: bool,
|
| 1832 |
+
stream_cls: type[_AsyncStreamT] | None = None,
|
| 1833 |
+
) -> ResponseT | _AsyncStreamT: ...
|
| 1834 |
+
|
| 1835 |
+
async def post(
|
| 1836 |
+
self,
|
| 1837 |
+
path: str,
|
| 1838 |
+
*,
|
| 1839 |
+
cast_to: Type[ResponseT],
|
| 1840 |
+
body: Body | None = None,
|
| 1841 |
+
files: RequestFiles | None = None,
|
| 1842 |
+
options: RequestOptions = {},
|
| 1843 |
+
stream: bool = False,
|
| 1844 |
+
stream_cls: type[_AsyncStreamT] | None = None,
|
| 1845 |
+
) -> ResponseT | _AsyncStreamT:
|
| 1846 |
+
opts = FinalRequestOptions.construct(
|
| 1847 |
+
method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options
|
| 1848 |
+
)
|
| 1849 |
+
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
|
| 1850 |
+
|
| 1851 |
+
async def patch(
|
| 1852 |
+
self,
|
| 1853 |
+
path: str,
|
| 1854 |
+
*,
|
| 1855 |
+
cast_to: Type[ResponseT],
|
| 1856 |
+
body: Body | None = None,
|
| 1857 |
+
options: RequestOptions = {},
|
| 1858 |
+
) -> ResponseT:
|
| 1859 |
+
opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
|
| 1860 |
+
return await self.request(cast_to, opts)
|
| 1861 |
+
|
| 1862 |
+
async def put(
|
| 1863 |
+
self,
|
| 1864 |
+
path: str,
|
| 1865 |
+
*,
|
| 1866 |
+
cast_to: Type[ResponseT],
|
| 1867 |
+
body: Body | None = None,
|
| 1868 |
+
files: RequestFiles | None = None,
|
| 1869 |
+
options: RequestOptions = {},
|
| 1870 |
+
) -> ResponseT:
|
| 1871 |
+
opts = FinalRequestOptions.construct(
|
| 1872 |
+
method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options
|
| 1873 |
+
)
|
| 1874 |
+
return await self.request(cast_to, opts)
|
| 1875 |
+
|
| 1876 |
+
async def delete(
|
| 1877 |
+
self,
|
| 1878 |
+
path: str,
|
| 1879 |
+
*,
|
| 1880 |
+
cast_to: Type[ResponseT],
|
| 1881 |
+
body: Body | None = None,
|
| 1882 |
+
options: RequestOptions = {},
|
| 1883 |
+
) -> ResponseT:
|
| 1884 |
+
opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
|
| 1885 |
+
return await self.request(cast_to, opts)
|
| 1886 |
+
|
| 1887 |
+
def get_api_list(
|
| 1888 |
+
self,
|
| 1889 |
+
path: str,
|
| 1890 |
+
*,
|
| 1891 |
+
model: Type[_T],
|
| 1892 |
+
page: Type[AsyncPageT],
|
| 1893 |
+
body: Body | None = None,
|
| 1894 |
+
options: RequestOptions = {},
|
| 1895 |
+
method: str = "get",
|
| 1896 |
+
) -> AsyncPaginator[_T, AsyncPageT]:
|
| 1897 |
+
opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options)
|
| 1898 |
+
return self._request_api_list(model, page, opts)
|
| 1899 |
+
|
| 1900 |
+
|
| 1901 |
+
def make_request_options(
|
| 1902 |
+
*,
|
| 1903 |
+
query: Query | None = None,
|
| 1904 |
+
extra_headers: Headers | None = None,
|
| 1905 |
+
extra_query: Query | None = None,
|
| 1906 |
+
extra_body: Body | None = None,
|
| 1907 |
+
idempotency_key: str | None = None,
|
| 1908 |
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
| 1909 |
+
post_parser: PostParser | NotGiven = NOT_GIVEN,
|
| 1910 |
+
) -> RequestOptions:
|
| 1911 |
+
"""Create a dict of type RequestOptions without keys of NotGiven values."""
|
| 1912 |
+
options: RequestOptions = {}
|
| 1913 |
+
if extra_headers is not None:
|
| 1914 |
+
options["headers"] = extra_headers
|
| 1915 |
+
|
| 1916 |
+
if extra_body is not None:
|
| 1917 |
+
options["extra_json"] = cast(AnyMapping, extra_body)
|
| 1918 |
+
|
| 1919 |
+
if query is not None:
|
| 1920 |
+
options["params"] = query
|
| 1921 |
+
|
| 1922 |
+
if extra_query is not None:
|
| 1923 |
+
options["params"] = {**options.get("params", {}), **extra_query}
|
| 1924 |
+
|
| 1925 |
+
if not isinstance(timeout, NotGiven):
|
| 1926 |
+
options["timeout"] = timeout
|
| 1927 |
+
|
| 1928 |
+
if idempotency_key is not None:
|
| 1929 |
+
options["idempotency_key"] = idempotency_key
|
| 1930 |
+
|
| 1931 |
+
if is_given(post_parser):
|
| 1932 |
+
# internal
|
| 1933 |
+
options["post_parser"] = post_parser # type: ignore
|
| 1934 |
+
|
| 1935 |
+
return options
|
| 1936 |
+
|
| 1937 |
+
|
| 1938 |
+
class ForceMultipartDict(Dict[str, None]):
|
| 1939 |
+
def __bool__(self) -> bool:
|
| 1940 |
+
return True
|
| 1941 |
+
|
| 1942 |
+
|
| 1943 |
+
class OtherPlatform:
|
| 1944 |
+
def __init__(self, name: str) -> None:
|
| 1945 |
+
self.name = name
|
| 1946 |
+
|
| 1947 |
+
@override
|
| 1948 |
+
def __str__(self) -> str:
|
| 1949 |
+
return f"Other:{self.name}"
|
| 1950 |
+
|
| 1951 |
+
|
| 1952 |
+
Platform = Union[
|
| 1953 |
+
OtherPlatform,
|
| 1954 |
+
Literal[
|
| 1955 |
+
"MacOS",
|
| 1956 |
+
"Linux",
|
| 1957 |
+
"Windows",
|
| 1958 |
+
"FreeBSD",
|
| 1959 |
+
"OpenBSD",
|
| 1960 |
+
"iOS",
|
| 1961 |
+
"Android",
|
| 1962 |
+
"Unknown",
|
| 1963 |
+
],
|
| 1964 |
+
]
|
| 1965 |
+
|
| 1966 |
+
|
| 1967 |
+
def get_platform() -> Platform:
|
| 1968 |
+
try:
|
| 1969 |
+
system = platform.system().lower()
|
| 1970 |
+
platform_name = platform.platform().lower()
|
| 1971 |
+
except Exception:
|
| 1972 |
+
return "Unknown"
|
| 1973 |
+
|
| 1974 |
+
if "iphone" in platform_name or "ipad" in platform_name:
|
| 1975 |
+
# Tested using Python3IDE on an iPhone 11 and Pythonista on an iPad 7
|
| 1976 |
+
# system is Darwin and platform_name is a string like:
|
| 1977 |
+
# - Darwin-21.6.0-iPhone12,1-64bit
|
| 1978 |
+
# - Darwin-21.6.0-iPad7,11-64bit
|
| 1979 |
+
return "iOS"
|
| 1980 |
+
|
| 1981 |
+
if system == "darwin":
|
| 1982 |
+
return "MacOS"
|
| 1983 |
+
|
| 1984 |
+
if system == "windows":
|
| 1985 |
+
return "Windows"
|
| 1986 |
+
|
| 1987 |
+
if "android" in platform_name:
|
| 1988 |
+
# Tested using Pydroid 3
|
| 1989 |
+
# system is Linux and platform_name is a string like 'Linux-5.10.81-android12-9-00001-geba40aecb3b7-ab8534902-aarch64-with-libc'
|
| 1990 |
+
return "Android"
|
| 1991 |
+
|
| 1992 |
+
if system == "linux":
|
| 1993 |
+
# https://distro.readthedocs.io/en/latest/#distro.id
|
| 1994 |
+
distro_id = distro.id()
|
| 1995 |
+
if distro_id == "freebsd":
|
| 1996 |
+
return "FreeBSD"
|
| 1997 |
+
|
| 1998 |
+
if distro_id == "openbsd":
|
| 1999 |
+
return "OpenBSD"
|
| 2000 |
+
|
| 2001 |
+
return "Linux"
|
| 2002 |
+
|
| 2003 |
+
if platform_name:
|
| 2004 |
+
return OtherPlatform(platform_name)
|
| 2005 |
+
|
| 2006 |
+
return "Unknown"
|
| 2007 |
+
|
| 2008 |
+
|
| 2009 |
+
@lru_cache(maxsize=None)
|
| 2010 |
+
def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]:
|
| 2011 |
+
return {
|
| 2012 |
+
"X-Stainless-Lang": "python",
|
| 2013 |
+
"X-Stainless-Package-Version": version,
|
| 2014 |
+
"X-Stainless-OS": str(platform or get_platform()),
|
| 2015 |
+
"X-Stainless-Arch": str(get_architecture()),
|
| 2016 |
+
"X-Stainless-Runtime": get_python_runtime(),
|
| 2017 |
+
"X-Stainless-Runtime-Version": get_python_version(),
|
| 2018 |
+
}
|
| 2019 |
+
|
| 2020 |
+
|
| 2021 |
+
class OtherArch:
|
| 2022 |
+
def __init__(self, name: str) -> None:
|
| 2023 |
+
self.name = name
|
| 2024 |
+
|
| 2025 |
+
@override
|
| 2026 |
+
def __str__(self) -> str:
|
| 2027 |
+
return f"other:{self.name}"
|
| 2028 |
+
|
| 2029 |
+
|
| 2030 |
+
Arch = Union[OtherArch, Literal["x32", "x64", "arm", "arm64", "unknown"]]
|
| 2031 |
+
|
| 2032 |
+
|
| 2033 |
+
def get_python_runtime() -> str:
|
| 2034 |
+
try:
|
| 2035 |
+
return platform.python_implementation()
|
| 2036 |
+
except Exception:
|
| 2037 |
+
return "unknown"
|
| 2038 |
+
|
| 2039 |
+
|
| 2040 |
+
def get_python_version() -> str:
|
| 2041 |
+
try:
|
| 2042 |
+
return platform.python_version()
|
| 2043 |
+
except Exception:
|
| 2044 |
+
return "unknown"
|
| 2045 |
+
|
| 2046 |
+
|
| 2047 |
+
def get_architecture() -> Arch:
|
| 2048 |
+
try:
|
| 2049 |
+
machine = platform.machine().lower()
|
| 2050 |
+
except Exception:
|
| 2051 |
+
return "unknown"
|
| 2052 |
+
|
| 2053 |
+
if machine in ("arm64", "aarch64"):
|
| 2054 |
+
return "arm64"
|
| 2055 |
+
|
| 2056 |
+
# TODO: untested
|
| 2057 |
+
if machine == "arm":
|
| 2058 |
+
return "arm"
|
| 2059 |
+
|
| 2060 |
+
if machine == "x86_64":
|
| 2061 |
+
return "x64"
|
| 2062 |
+
|
| 2063 |
+
# TODO: untested
|
| 2064 |
+
if sys.maxsize <= 2**32:
|
| 2065 |
+
return "x32"
|
| 2066 |
+
|
| 2067 |
+
if machine:
|
| 2068 |
+
return OtherArch(machine)
|
| 2069 |
+
|
| 2070 |
+
return "unknown"
|
| 2071 |
+
|
| 2072 |
+
|
| 2073 |
+
def _merge_mappings(
|
| 2074 |
+
obj1: Mapping[_T_co, Union[_T, Omit]],
|
| 2075 |
+
obj2: Mapping[_T_co, Union[_T, Omit]],
|
| 2076 |
+
) -> Dict[_T_co, _T]:
|
| 2077 |
+
"""Merge two mappings of the same type, removing any values that are instances of `Omit`.
|
| 2078 |
+
|
| 2079 |
+
In cases with duplicate keys the second mapping takes precedence.
|
| 2080 |
+
"""
|
| 2081 |
+
merged = {**obj1, **obj2}
|
| 2082 |
+
return {key: value for key, value in merged.items() if not isinstance(value, Omit)}
|
.venv/lib/python3.11/site-packages/openai/_client.py
ADDED
|
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
from typing import Any, Union, Mapping
|
| 7 |
+
from typing_extensions import Self, override
|
| 8 |
+
|
| 9 |
+
import httpx
|
| 10 |
+
|
| 11 |
+
from . import _exceptions
|
| 12 |
+
from ._qs import Querystring
|
| 13 |
+
from ._types import (
|
| 14 |
+
NOT_GIVEN,
|
| 15 |
+
Omit,
|
| 16 |
+
Timeout,
|
| 17 |
+
NotGiven,
|
| 18 |
+
Transport,
|
| 19 |
+
ProxiesTypes,
|
| 20 |
+
RequestOptions,
|
| 21 |
+
)
|
| 22 |
+
from ._utils import (
|
| 23 |
+
is_given,
|
| 24 |
+
is_mapping,
|
| 25 |
+
get_async_library,
|
| 26 |
+
)
|
| 27 |
+
from ._version import __version__
|
| 28 |
+
from .resources import files, images, models, batches, embeddings, completions, moderations
|
| 29 |
+
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
|
| 30 |
+
from ._exceptions import OpenAIError, APIStatusError
|
| 31 |
+
from ._base_client import (
|
| 32 |
+
DEFAULT_MAX_RETRIES,
|
| 33 |
+
SyncAPIClient,
|
| 34 |
+
AsyncAPIClient,
|
| 35 |
+
)
|
| 36 |
+
from .resources.beta import beta
|
| 37 |
+
from .resources.chat import chat
|
| 38 |
+
from .resources.audio import audio
|
| 39 |
+
from .resources.uploads import uploads
|
| 40 |
+
from .resources.fine_tuning import fine_tuning
|
| 41 |
+
|
| 42 |
+
__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class OpenAI(SyncAPIClient):
|
| 46 |
+
completions: completions.Completions
|
| 47 |
+
chat: chat.Chat
|
| 48 |
+
embeddings: embeddings.Embeddings
|
| 49 |
+
files: files.Files
|
| 50 |
+
images: images.Images
|
| 51 |
+
audio: audio.Audio
|
| 52 |
+
moderations: moderations.Moderations
|
| 53 |
+
models: models.Models
|
| 54 |
+
fine_tuning: fine_tuning.FineTuning
|
| 55 |
+
beta: beta.Beta
|
| 56 |
+
batches: batches.Batches
|
| 57 |
+
uploads: uploads.Uploads
|
| 58 |
+
with_raw_response: OpenAIWithRawResponse
|
| 59 |
+
with_streaming_response: OpenAIWithStreamedResponse
|
| 60 |
+
|
| 61 |
+
# client options
|
| 62 |
+
api_key: str
|
| 63 |
+
organization: str | None
|
| 64 |
+
project: str | None
|
| 65 |
+
|
| 66 |
+
websocket_base_url: str | httpx.URL | None
|
| 67 |
+
"""Base URL for WebSocket connections.
|
| 68 |
+
|
| 69 |
+
If not specified, the default base URL will be used, with 'wss://' replacing the
|
| 70 |
+
'http://' or 'https://' scheme. For example: 'http://example.com' becomes
|
| 71 |
+
'wss://example.com'
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
*,
|
| 77 |
+
api_key: str | None = None,
|
| 78 |
+
organization: str | None = None,
|
| 79 |
+
project: str | None = None,
|
| 80 |
+
base_url: str | httpx.URL | None = None,
|
| 81 |
+
websocket_base_url: str | httpx.URL | None = None,
|
| 82 |
+
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
|
| 83 |
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
| 84 |
+
default_headers: Mapping[str, str] | None = None,
|
| 85 |
+
default_query: Mapping[str, object] | None = None,
|
| 86 |
+
# Configure a custom httpx client.
|
| 87 |
+
# We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
|
| 88 |
+
# See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
|
| 89 |
+
http_client: httpx.Client | None = None,
|
| 90 |
+
# Enable or disable schema validation for data returned by the API.
|
| 91 |
+
# When enabled an error APIResponseValidationError is raised
|
| 92 |
+
# if the API responds with invalid data for the expected schema.
|
| 93 |
+
#
|
| 94 |
+
# This parameter may be removed or changed in the future.
|
| 95 |
+
# If you rely on this feature, please open a GitHub issue
|
| 96 |
+
# outlining your use-case to help us decide if it should be
|
| 97 |
+
# part of our public interface in the future.
|
| 98 |
+
_strict_response_validation: bool = False,
|
| 99 |
+
) -> None:
|
| 100 |
+
"""Construct a new synchronous openai client instance.
|
| 101 |
+
|
| 102 |
+
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
|
| 103 |
+
- `api_key` from `OPENAI_API_KEY`
|
| 104 |
+
- `organization` from `OPENAI_ORG_ID`
|
| 105 |
+
- `project` from `OPENAI_PROJECT_ID`
|
| 106 |
+
"""
|
| 107 |
+
if api_key is None:
|
| 108 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
| 109 |
+
if api_key is None:
|
| 110 |
+
raise OpenAIError(
|
| 111 |
+
"The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
|
| 112 |
+
)
|
| 113 |
+
self.api_key = api_key
|
| 114 |
+
|
| 115 |
+
if organization is None:
|
| 116 |
+
organization = os.environ.get("OPENAI_ORG_ID")
|
| 117 |
+
self.organization = organization
|
| 118 |
+
|
| 119 |
+
if project is None:
|
| 120 |
+
project = os.environ.get("OPENAI_PROJECT_ID")
|
| 121 |
+
self.project = project
|
| 122 |
+
|
| 123 |
+
self.websocket_base_url = websocket_base_url
|
| 124 |
+
|
| 125 |
+
if base_url is None:
|
| 126 |
+
base_url = os.environ.get("OPENAI_BASE_URL")
|
| 127 |
+
if base_url is None:
|
| 128 |
+
base_url = f"https://api.openai.com/v1"
|
| 129 |
+
|
| 130 |
+
super().__init__(
|
| 131 |
+
version=__version__,
|
| 132 |
+
base_url=base_url,
|
| 133 |
+
max_retries=max_retries,
|
| 134 |
+
timeout=timeout,
|
| 135 |
+
http_client=http_client,
|
| 136 |
+
custom_headers=default_headers,
|
| 137 |
+
custom_query=default_query,
|
| 138 |
+
_strict_response_validation=_strict_response_validation,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
self._default_stream_cls = Stream
|
| 142 |
+
|
| 143 |
+
self.completions = completions.Completions(self)
|
| 144 |
+
self.chat = chat.Chat(self)
|
| 145 |
+
self.embeddings = embeddings.Embeddings(self)
|
| 146 |
+
self.files = files.Files(self)
|
| 147 |
+
self.images = images.Images(self)
|
| 148 |
+
self.audio = audio.Audio(self)
|
| 149 |
+
self.moderations = moderations.Moderations(self)
|
| 150 |
+
self.models = models.Models(self)
|
| 151 |
+
self.fine_tuning = fine_tuning.FineTuning(self)
|
| 152 |
+
self.beta = beta.Beta(self)
|
| 153 |
+
self.batches = batches.Batches(self)
|
| 154 |
+
self.uploads = uploads.Uploads(self)
|
| 155 |
+
self.with_raw_response = OpenAIWithRawResponse(self)
|
| 156 |
+
self.with_streaming_response = OpenAIWithStreamedResponse(self)
|
| 157 |
+
|
| 158 |
+
@property
|
| 159 |
+
@override
|
| 160 |
+
def qs(self) -> Querystring:
|
| 161 |
+
return Querystring(array_format="brackets")
|
| 162 |
+
|
| 163 |
+
@property
|
| 164 |
+
@override
|
| 165 |
+
def auth_headers(self) -> dict[str, str]:
|
| 166 |
+
api_key = self.api_key
|
| 167 |
+
return {"Authorization": f"Bearer {api_key}"}
|
| 168 |
+
|
| 169 |
+
@property
|
| 170 |
+
@override
|
| 171 |
+
def default_headers(self) -> dict[str, str | Omit]:
|
| 172 |
+
return {
|
| 173 |
+
**super().default_headers,
|
| 174 |
+
"X-Stainless-Async": "false",
|
| 175 |
+
"OpenAI-Organization": self.organization if self.organization is not None else Omit(),
|
| 176 |
+
"OpenAI-Project": self.project if self.project is not None else Omit(),
|
| 177 |
+
**self._custom_headers,
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
def copy(
|
| 181 |
+
self,
|
| 182 |
+
*,
|
| 183 |
+
api_key: str | None = None,
|
| 184 |
+
organization: str | None = None,
|
| 185 |
+
project: str | None = None,
|
| 186 |
+
websocket_base_url: str | httpx.URL | None = None,
|
| 187 |
+
base_url: str | httpx.URL | None = None,
|
| 188 |
+
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
|
| 189 |
+
http_client: httpx.Client | None = None,
|
| 190 |
+
max_retries: int | NotGiven = NOT_GIVEN,
|
| 191 |
+
default_headers: Mapping[str, str] | None = None,
|
| 192 |
+
set_default_headers: Mapping[str, str] | None = None,
|
| 193 |
+
default_query: Mapping[str, object] | None = None,
|
| 194 |
+
set_default_query: Mapping[str, object] | None = None,
|
| 195 |
+
_extra_kwargs: Mapping[str, Any] = {},
|
| 196 |
+
) -> Self:
|
| 197 |
+
"""
|
| 198 |
+
Create a new client instance re-using the same options given to the current client with optional overriding.
|
| 199 |
+
"""
|
| 200 |
+
if default_headers is not None and set_default_headers is not None:
|
| 201 |
+
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
| 202 |
+
|
| 203 |
+
if default_query is not None and set_default_query is not None:
|
| 204 |
+
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
| 205 |
+
|
| 206 |
+
headers = self._custom_headers
|
| 207 |
+
if default_headers is not None:
|
| 208 |
+
headers = {**headers, **default_headers}
|
| 209 |
+
elif set_default_headers is not None:
|
| 210 |
+
headers = set_default_headers
|
| 211 |
+
|
| 212 |
+
params = self._custom_query
|
| 213 |
+
if default_query is not None:
|
| 214 |
+
params = {**params, **default_query}
|
| 215 |
+
elif set_default_query is not None:
|
| 216 |
+
params = set_default_query
|
| 217 |
+
|
| 218 |
+
http_client = http_client or self._client
|
| 219 |
+
return self.__class__(
|
| 220 |
+
api_key=api_key or self.api_key,
|
| 221 |
+
organization=organization or self.organization,
|
| 222 |
+
project=project or self.project,
|
| 223 |
+
websocket_base_url=websocket_base_url or self.websocket_base_url,
|
| 224 |
+
base_url=base_url or self.base_url,
|
| 225 |
+
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
| 226 |
+
http_client=http_client,
|
| 227 |
+
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
| 228 |
+
default_headers=headers,
|
| 229 |
+
default_query=params,
|
| 230 |
+
**_extra_kwargs,
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# Alias for `copy` for nicer inline usage, e.g.
|
| 234 |
+
# client.with_options(timeout=10).foo.create(...)
|
| 235 |
+
with_options = copy
|
| 236 |
+
|
| 237 |
+
@override
|
| 238 |
+
def _make_status_error(
|
| 239 |
+
self,
|
| 240 |
+
err_msg: str,
|
| 241 |
+
*,
|
| 242 |
+
body: object,
|
| 243 |
+
response: httpx.Response,
|
| 244 |
+
) -> APIStatusError:
|
| 245 |
+
data = body.get("error", body) if is_mapping(body) else body
|
| 246 |
+
if response.status_code == 400:
|
| 247 |
+
return _exceptions.BadRequestError(err_msg, response=response, body=data)
|
| 248 |
+
|
| 249 |
+
if response.status_code == 401:
|
| 250 |
+
return _exceptions.AuthenticationError(err_msg, response=response, body=data)
|
| 251 |
+
|
| 252 |
+
if response.status_code == 403:
|
| 253 |
+
return _exceptions.PermissionDeniedError(err_msg, response=response, body=data)
|
| 254 |
+
|
| 255 |
+
if response.status_code == 404:
|
| 256 |
+
return _exceptions.NotFoundError(err_msg, response=response, body=data)
|
| 257 |
+
|
| 258 |
+
if response.status_code == 409:
|
| 259 |
+
return _exceptions.ConflictError(err_msg, response=response, body=data)
|
| 260 |
+
|
| 261 |
+
if response.status_code == 422:
|
| 262 |
+
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data)
|
| 263 |
+
|
| 264 |
+
if response.status_code == 429:
|
| 265 |
+
return _exceptions.RateLimitError(err_msg, response=response, body=data)
|
| 266 |
+
|
| 267 |
+
if response.status_code >= 500:
|
| 268 |
+
return _exceptions.InternalServerError(err_msg, response=response, body=data)
|
| 269 |
+
return APIStatusError(err_msg, response=response, body=data)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class AsyncOpenAI(AsyncAPIClient):
|
| 273 |
+
completions: completions.AsyncCompletions
|
| 274 |
+
chat: chat.AsyncChat
|
| 275 |
+
embeddings: embeddings.AsyncEmbeddings
|
| 276 |
+
files: files.AsyncFiles
|
| 277 |
+
images: images.AsyncImages
|
| 278 |
+
audio: audio.AsyncAudio
|
| 279 |
+
moderations: moderations.AsyncModerations
|
| 280 |
+
models: models.AsyncModels
|
| 281 |
+
fine_tuning: fine_tuning.AsyncFineTuning
|
| 282 |
+
beta: beta.AsyncBeta
|
| 283 |
+
batches: batches.AsyncBatches
|
| 284 |
+
uploads: uploads.AsyncUploads
|
| 285 |
+
with_raw_response: AsyncOpenAIWithRawResponse
|
| 286 |
+
with_streaming_response: AsyncOpenAIWithStreamedResponse
|
| 287 |
+
|
| 288 |
+
# client options
|
| 289 |
+
api_key: str
|
| 290 |
+
organization: str | None
|
| 291 |
+
project: str | None
|
| 292 |
+
|
| 293 |
+
websocket_base_url: str | httpx.URL | None
|
| 294 |
+
"""Base URL for WebSocket connections.
|
| 295 |
+
|
| 296 |
+
If not specified, the default base URL will be used, with 'wss://' replacing the
|
| 297 |
+
'http://' or 'https://' scheme. For example: 'http://example.com' becomes
|
| 298 |
+
'wss://example.com'
|
| 299 |
+
"""
|
| 300 |
+
|
| 301 |
+
def __init__(
|
| 302 |
+
self,
|
| 303 |
+
*,
|
| 304 |
+
api_key: str | None = None,
|
| 305 |
+
organization: str | None = None,
|
| 306 |
+
project: str | None = None,
|
| 307 |
+
base_url: str | httpx.URL | None = None,
|
| 308 |
+
websocket_base_url: str | httpx.URL | None = None,
|
| 309 |
+
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
|
| 310 |
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
| 311 |
+
default_headers: Mapping[str, str] | None = None,
|
| 312 |
+
default_query: Mapping[str, object] | None = None,
|
| 313 |
+
# Configure a custom httpx client.
|
| 314 |
+
# We provide a `DefaultAsyncHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
|
| 315 |
+
# See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details.
|
| 316 |
+
http_client: httpx.AsyncClient | None = None,
|
| 317 |
+
# Enable or disable schema validation for data returned by the API.
|
| 318 |
+
# When enabled an error APIResponseValidationError is raised
|
| 319 |
+
# if the API responds with invalid data for the expected schema.
|
| 320 |
+
#
|
| 321 |
+
# This parameter may be removed or changed in the future.
|
| 322 |
+
# If you rely on this feature, please open a GitHub issue
|
| 323 |
+
# outlining your use-case to help us decide if it should be
|
| 324 |
+
# part of our public interface in the future.
|
| 325 |
+
_strict_response_validation: bool = False,
|
| 326 |
+
) -> None:
|
| 327 |
+
"""Construct a new async openai client instance.
|
| 328 |
+
|
| 329 |
+
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
|
| 330 |
+
- `api_key` from `OPENAI_API_KEY`
|
| 331 |
+
- `organization` from `OPENAI_ORG_ID`
|
| 332 |
+
- `project` from `OPENAI_PROJECT_ID`
|
| 333 |
+
"""
|
| 334 |
+
if api_key is None:
|
| 335 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
| 336 |
+
if api_key is None:
|
| 337 |
+
raise OpenAIError(
|
| 338 |
+
"The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable"
|
| 339 |
+
)
|
| 340 |
+
self.api_key = api_key
|
| 341 |
+
|
| 342 |
+
if organization is None:
|
| 343 |
+
organization = os.environ.get("OPENAI_ORG_ID")
|
| 344 |
+
self.organization = organization
|
| 345 |
+
|
| 346 |
+
if project is None:
|
| 347 |
+
project = os.environ.get("OPENAI_PROJECT_ID")
|
| 348 |
+
self.project = project
|
| 349 |
+
|
| 350 |
+
self.websocket_base_url = websocket_base_url
|
| 351 |
+
|
| 352 |
+
if base_url is None:
|
| 353 |
+
base_url = os.environ.get("OPENAI_BASE_URL")
|
| 354 |
+
if base_url is None:
|
| 355 |
+
base_url = f"https://api.openai.com/v1"
|
| 356 |
+
|
| 357 |
+
super().__init__(
|
| 358 |
+
version=__version__,
|
| 359 |
+
base_url=base_url,
|
| 360 |
+
max_retries=max_retries,
|
| 361 |
+
timeout=timeout,
|
| 362 |
+
http_client=http_client,
|
| 363 |
+
custom_headers=default_headers,
|
| 364 |
+
custom_query=default_query,
|
| 365 |
+
_strict_response_validation=_strict_response_validation,
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
self._default_stream_cls = AsyncStream
|
| 369 |
+
|
| 370 |
+
self.completions = completions.AsyncCompletions(self)
|
| 371 |
+
self.chat = chat.AsyncChat(self)
|
| 372 |
+
self.embeddings = embeddings.AsyncEmbeddings(self)
|
| 373 |
+
self.files = files.AsyncFiles(self)
|
| 374 |
+
self.images = images.AsyncImages(self)
|
| 375 |
+
self.audio = audio.AsyncAudio(self)
|
| 376 |
+
self.moderations = moderations.AsyncModerations(self)
|
| 377 |
+
self.models = models.AsyncModels(self)
|
| 378 |
+
self.fine_tuning = fine_tuning.AsyncFineTuning(self)
|
| 379 |
+
self.beta = beta.AsyncBeta(self)
|
| 380 |
+
self.batches = batches.AsyncBatches(self)
|
| 381 |
+
self.uploads = uploads.AsyncUploads(self)
|
| 382 |
+
self.with_raw_response = AsyncOpenAIWithRawResponse(self)
|
| 383 |
+
self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self)
|
| 384 |
+
|
| 385 |
+
@property
|
| 386 |
+
@override
|
| 387 |
+
def qs(self) -> Querystring:
|
| 388 |
+
return Querystring(array_format="brackets")
|
| 389 |
+
|
| 390 |
+
@property
|
| 391 |
+
@override
|
| 392 |
+
def auth_headers(self) -> dict[str, str]:
|
| 393 |
+
api_key = self.api_key
|
| 394 |
+
return {"Authorization": f"Bearer {api_key}"}
|
| 395 |
+
|
| 396 |
+
@property
|
| 397 |
+
@override
|
| 398 |
+
def default_headers(self) -> dict[str, str | Omit]:
|
| 399 |
+
return {
|
| 400 |
+
**super().default_headers,
|
| 401 |
+
"X-Stainless-Async": f"async:{get_async_library()}",
|
| 402 |
+
"OpenAI-Organization": self.organization if self.organization is not None else Omit(),
|
| 403 |
+
"OpenAI-Project": self.project if self.project is not None else Omit(),
|
| 404 |
+
**self._custom_headers,
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
def copy(
|
| 408 |
+
self,
|
| 409 |
+
*,
|
| 410 |
+
api_key: str | None = None,
|
| 411 |
+
organization: str | None = None,
|
| 412 |
+
project: str | None = None,
|
| 413 |
+
websocket_base_url: str | httpx.URL | None = None,
|
| 414 |
+
base_url: str | httpx.URL | None = None,
|
| 415 |
+
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
|
| 416 |
+
http_client: httpx.AsyncClient | None = None,
|
| 417 |
+
max_retries: int | NotGiven = NOT_GIVEN,
|
| 418 |
+
default_headers: Mapping[str, str] | None = None,
|
| 419 |
+
set_default_headers: Mapping[str, str] | None = None,
|
| 420 |
+
default_query: Mapping[str, object] | None = None,
|
| 421 |
+
set_default_query: Mapping[str, object] | None = None,
|
| 422 |
+
_extra_kwargs: Mapping[str, Any] = {},
|
| 423 |
+
) -> Self:
|
| 424 |
+
"""
|
| 425 |
+
Create a new client instance re-using the same options given to the current client with optional overriding.
|
| 426 |
+
"""
|
| 427 |
+
if default_headers is not None and set_default_headers is not None:
|
| 428 |
+
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
|
| 429 |
+
|
| 430 |
+
if default_query is not None and set_default_query is not None:
|
| 431 |
+
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
|
| 432 |
+
|
| 433 |
+
headers = self._custom_headers
|
| 434 |
+
if default_headers is not None:
|
| 435 |
+
headers = {**headers, **default_headers}
|
| 436 |
+
elif set_default_headers is not None:
|
| 437 |
+
headers = set_default_headers
|
| 438 |
+
|
| 439 |
+
params = self._custom_query
|
| 440 |
+
if default_query is not None:
|
| 441 |
+
params = {**params, **default_query}
|
| 442 |
+
elif set_default_query is not None:
|
| 443 |
+
params = set_default_query
|
| 444 |
+
|
| 445 |
+
http_client = http_client or self._client
|
| 446 |
+
return self.__class__(
|
| 447 |
+
api_key=api_key or self.api_key,
|
| 448 |
+
organization=organization or self.organization,
|
| 449 |
+
project=project or self.project,
|
| 450 |
+
websocket_base_url=websocket_base_url or self.websocket_base_url,
|
| 451 |
+
base_url=base_url or self.base_url,
|
| 452 |
+
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
|
| 453 |
+
http_client=http_client,
|
| 454 |
+
max_retries=max_retries if is_given(max_retries) else self.max_retries,
|
| 455 |
+
default_headers=headers,
|
| 456 |
+
default_query=params,
|
| 457 |
+
**_extra_kwargs,
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
# Alias for `copy` for nicer inline usage, e.g.
|
| 461 |
+
# client.with_options(timeout=10).foo.create(...)
|
| 462 |
+
with_options = copy
|
| 463 |
+
|
| 464 |
+
@override
|
| 465 |
+
def _make_status_error(
|
| 466 |
+
self,
|
| 467 |
+
err_msg: str,
|
| 468 |
+
*,
|
| 469 |
+
body: object,
|
| 470 |
+
response: httpx.Response,
|
| 471 |
+
) -> APIStatusError:
|
| 472 |
+
data = body.get("error", body) if is_mapping(body) else body
|
| 473 |
+
if response.status_code == 400:
|
| 474 |
+
return _exceptions.BadRequestError(err_msg, response=response, body=data)
|
| 475 |
+
|
| 476 |
+
if response.status_code == 401:
|
| 477 |
+
return _exceptions.AuthenticationError(err_msg, response=response, body=data)
|
| 478 |
+
|
| 479 |
+
if response.status_code == 403:
|
| 480 |
+
return _exceptions.PermissionDeniedError(err_msg, response=response, body=data)
|
| 481 |
+
|
| 482 |
+
if response.status_code == 404:
|
| 483 |
+
return _exceptions.NotFoundError(err_msg, response=response, body=data)
|
| 484 |
+
|
| 485 |
+
if response.status_code == 409:
|
| 486 |
+
return _exceptions.ConflictError(err_msg, response=response, body=data)
|
| 487 |
+
|
| 488 |
+
if response.status_code == 422:
|
| 489 |
+
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data)
|
| 490 |
+
|
| 491 |
+
if response.status_code == 429:
|
| 492 |
+
return _exceptions.RateLimitError(err_msg, response=response, body=data)
|
| 493 |
+
|
| 494 |
+
if response.status_code >= 500:
|
| 495 |
+
return _exceptions.InternalServerError(err_msg, response=response, body=data)
|
| 496 |
+
return APIStatusError(err_msg, response=response, body=data)
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
class OpenAIWithRawResponse:
|
| 500 |
+
def __init__(self, client: OpenAI) -> None:
|
| 501 |
+
self.completions = completions.CompletionsWithRawResponse(client.completions)
|
| 502 |
+
self.chat = chat.ChatWithRawResponse(client.chat)
|
| 503 |
+
self.embeddings = embeddings.EmbeddingsWithRawResponse(client.embeddings)
|
| 504 |
+
self.files = files.FilesWithRawResponse(client.files)
|
| 505 |
+
self.images = images.ImagesWithRawResponse(client.images)
|
| 506 |
+
self.audio = audio.AudioWithRawResponse(client.audio)
|
| 507 |
+
self.moderations = moderations.ModerationsWithRawResponse(client.moderations)
|
| 508 |
+
self.models = models.ModelsWithRawResponse(client.models)
|
| 509 |
+
self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning)
|
| 510 |
+
self.beta = beta.BetaWithRawResponse(client.beta)
|
| 511 |
+
self.batches = batches.BatchesWithRawResponse(client.batches)
|
| 512 |
+
self.uploads = uploads.UploadsWithRawResponse(client.uploads)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
class AsyncOpenAIWithRawResponse:
|
| 516 |
+
def __init__(self, client: AsyncOpenAI) -> None:
|
| 517 |
+
self.completions = completions.AsyncCompletionsWithRawResponse(client.completions)
|
| 518 |
+
self.chat = chat.AsyncChatWithRawResponse(client.chat)
|
| 519 |
+
self.embeddings = embeddings.AsyncEmbeddingsWithRawResponse(client.embeddings)
|
| 520 |
+
self.files = files.AsyncFilesWithRawResponse(client.files)
|
| 521 |
+
self.images = images.AsyncImagesWithRawResponse(client.images)
|
| 522 |
+
self.audio = audio.AsyncAudioWithRawResponse(client.audio)
|
| 523 |
+
self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations)
|
| 524 |
+
self.models = models.AsyncModelsWithRawResponse(client.models)
|
| 525 |
+
self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning)
|
| 526 |
+
self.beta = beta.AsyncBetaWithRawResponse(client.beta)
|
| 527 |
+
self.batches = batches.AsyncBatchesWithRawResponse(client.batches)
|
| 528 |
+
self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
class OpenAIWithStreamedResponse:
|
| 532 |
+
def __init__(self, client: OpenAI) -> None:
|
| 533 |
+
self.completions = completions.CompletionsWithStreamingResponse(client.completions)
|
| 534 |
+
self.chat = chat.ChatWithStreamingResponse(client.chat)
|
| 535 |
+
self.embeddings = embeddings.EmbeddingsWithStreamingResponse(client.embeddings)
|
| 536 |
+
self.files = files.FilesWithStreamingResponse(client.files)
|
| 537 |
+
self.images = images.ImagesWithStreamingResponse(client.images)
|
| 538 |
+
self.audio = audio.AudioWithStreamingResponse(client.audio)
|
| 539 |
+
self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations)
|
| 540 |
+
self.models = models.ModelsWithStreamingResponse(client.models)
|
| 541 |
+
self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning)
|
| 542 |
+
self.beta = beta.BetaWithStreamingResponse(client.beta)
|
| 543 |
+
self.batches = batches.BatchesWithStreamingResponse(client.batches)
|
| 544 |
+
self.uploads = uploads.UploadsWithStreamingResponse(client.uploads)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
class AsyncOpenAIWithStreamedResponse:
|
| 548 |
+
def __init__(self, client: AsyncOpenAI) -> None:
|
| 549 |
+
self.completions = completions.AsyncCompletionsWithStreamingResponse(client.completions)
|
| 550 |
+
self.chat = chat.AsyncChatWithStreamingResponse(client.chat)
|
| 551 |
+
self.embeddings = embeddings.AsyncEmbeddingsWithStreamingResponse(client.embeddings)
|
| 552 |
+
self.files = files.AsyncFilesWithStreamingResponse(client.files)
|
| 553 |
+
self.images = images.AsyncImagesWithStreamingResponse(client.images)
|
| 554 |
+
self.audio = audio.AsyncAudioWithStreamingResponse(client.audio)
|
| 555 |
+
self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations)
|
| 556 |
+
self.models = models.AsyncModelsWithStreamingResponse(client.models)
|
| 557 |
+
self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning)
|
| 558 |
+
self.beta = beta.AsyncBetaWithStreamingResponse(client.beta)
|
| 559 |
+
self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches)
|
| 560 |
+
self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads)
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
Client = OpenAI
|
| 564 |
+
|
| 565 |
+
AsyncClient = AsyncOpenAI
|
.venv/lib/python3.11/site-packages/openai/_compat.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
|
| 4 |
+
from datetime import date, datetime
|
| 5 |
+
from typing_extensions import Self, Literal
|
| 6 |
+
|
| 7 |
+
import pydantic
|
| 8 |
+
from pydantic.fields import FieldInfo
|
| 9 |
+
|
| 10 |
+
from ._types import IncEx, StrBytesIntFloat
|
| 11 |
+
|
| 12 |
+
_T = TypeVar("_T")
|
| 13 |
+
_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel)
|
| 14 |
+
|
| 15 |
+
# --------------- Pydantic v2 compatibility ---------------
|
| 16 |
+
|
| 17 |
+
# Pyright incorrectly reports some of our functions as overriding a method when they don't
|
| 18 |
+
# pyright: reportIncompatibleMethodOverride=false
|
| 19 |
+
|
| 20 |
+
PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
|
| 21 |
+
|
| 22 |
+
# v1 re-exports
|
| 23 |
+
if TYPE_CHECKING:
|
| 24 |
+
|
| 25 |
+
def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001
|
| 26 |
+
...
|
| 27 |
+
|
| 28 |
+
def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: # noqa: ARG001
|
| 29 |
+
...
|
| 30 |
+
|
| 31 |
+
def get_args(t: type[Any]) -> tuple[Any, ...]: # noqa: ARG001
|
| 32 |
+
...
|
| 33 |
+
|
| 34 |
+
def is_union(tp: type[Any] | None) -> bool: # noqa: ARG001
|
| 35 |
+
...
|
| 36 |
+
|
| 37 |
+
def get_origin(t: type[Any]) -> type[Any] | None: # noqa: ARG001
|
| 38 |
+
...
|
| 39 |
+
|
| 40 |
+
def is_literal_type(type_: type[Any]) -> bool: # noqa: ARG001
|
| 41 |
+
...
|
| 42 |
+
|
| 43 |
+
def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001
|
| 44 |
+
...
|
| 45 |
+
|
| 46 |
+
else:
|
| 47 |
+
if PYDANTIC_V2:
|
| 48 |
+
from pydantic.v1.typing import (
|
| 49 |
+
get_args as get_args,
|
| 50 |
+
is_union as is_union,
|
| 51 |
+
get_origin as get_origin,
|
| 52 |
+
is_typeddict as is_typeddict,
|
| 53 |
+
is_literal_type as is_literal_type,
|
| 54 |
+
)
|
| 55 |
+
from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
|
| 56 |
+
else:
|
| 57 |
+
from pydantic.typing import (
|
| 58 |
+
get_args as get_args,
|
| 59 |
+
is_union as is_union,
|
| 60 |
+
get_origin as get_origin,
|
| 61 |
+
is_typeddict as is_typeddict,
|
| 62 |
+
is_literal_type as is_literal_type,
|
| 63 |
+
)
|
| 64 |
+
from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# refactored config
|
| 68 |
+
if TYPE_CHECKING:
|
| 69 |
+
from pydantic import ConfigDict as ConfigDict
|
| 70 |
+
else:
|
| 71 |
+
if PYDANTIC_V2:
|
| 72 |
+
from pydantic import ConfigDict
|
| 73 |
+
else:
|
| 74 |
+
# TODO: provide an error message here?
|
| 75 |
+
ConfigDict = None
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# renamed methods / properties
|
| 79 |
+
def parse_obj(model: type[_ModelT], value: object) -> _ModelT:
|
| 80 |
+
if PYDANTIC_V2:
|
| 81 |
+
return model.model_validate(value)
|
| 82 |
+
else:
|
| 83 |
+
return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def field_is_required(field: FieldInfo) -> bool:
|
| 87 |
+
if PYDANTIC_V2:
|
| 88 |
+
return field.is_required()
|
| 89 |
+
return field.required # type: ignore
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def field_get_default(field: FieldInfo) -> Any:
|
| 93 |
+
value = field.get_default()
|
| 94 |
+
if PYDANTIC_V2:
|
| 95 |
+
from pydantic_core import PydanticUndefined
|
| 96 |
+
|
| 97 |
+
if value == PydanticUndefined:
|
| 98 |
+
return None
|
| 99 |
+
return value
|
| 100 |
+
return value
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def field_outer_type(field: FieldInfo) -> Any:
|
| 104 |
+
if PYDANTIC_V2:
|
| 105 |
+
return field.annotation
|
| 106 |
+
return field.outer_type_ # type: ignore
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def get_model_config(model: type[pydantic.BaseModel]) -> Any:
|
| 110 |
+
if PYDANTIC_V2:
|
| 111 |
+
return model.model_config
|
| 112 |
+
return model.__config__ # type: ignore
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
|
| 116 |
+
if PYDANTIC_V2:
|
| 117 |
+
return model.model_fields
|
| 118 |
+
return model.__fields__ # type: ignore
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
|
| 122 |
+
if PYDANTIC_V2:
|
| 123 |
+
return model.model_copy(deep=deep)
|
| 124 |
+
return model.copy(deep=deep) # type: ignore
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
|
| 128 |
+
if PYDANTIC_V2:
|
| 129 |
+
return model.model_dump_json(indent=indent)
|
| 130 |
+
return model.json(indent=indent) # type: ignore
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def model_dump(
|
| 134 |
+
model: pydantic.BaseModel,
|
| 135 |
+
*,
|
| 136 |
+
exclude: IncEx | None = None,
|
| 137 |
+
exclude_unset: bool = False,
|
| 138 |
+
exclude_defaults: bool = False,
|
| 139 |
+
warnings: bool = True,
|
| 140 |
+
mode: Literal["json", "python"] = "python",
|
| 141 |
+
) -> dict[str, Any]:
|
| 142 |
+
if PYDANTIC_V2 or hasattr(model, "model_dump"):
|
| 143 |
+
return model.model_dump(
|
| 144 |
+
mode=mode,
|
| 145 |
+
exclude=exclude,
|
| 146 |
+
exclude_unset=exclude_unset,
|
| 147 |
+
exclude_defaults=exclude_defaults,
|
| 148 |
+
# warnings are not supported in Pydantic v1
|
| 149 |
+
warnings=warnings if PYDANTIC_V2 else True,
|
| 150 |
+
)
|
| 151 |
+
return cast(
|
| 152 |
+
"dict[str, Any]",
|
| 153 |
+
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
|
| 154 |
+
exclude=exclude,
|
| 155 |
+
exclude_unset=exclude_unset,
|
| 156 |
+
exclude_defaults=exclude_defaults,
|
| 157 |
+
),
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def model_parse(model: type[_ModelT], data: Any) -> _ModelT:
|
| 162 |
+
if PYDANTIC_V2:
|
| 163 |
+
return model.model_validate(data)
|
| 164 |
+
return model.parse_obj(data) # pyright: ignore[reportDeprecated]
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def model_parse_json(model: type[_ModelT], data: str | bytes) -> _ModelT:
|
| 168 |
+
if PYDANTIC_V2:
|
| 169 |
+
return model.model_validate_json(data)
|
| 170 |
+
return model.parse_raw(data) # pyright: ignore[reportDeprecated]
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def model_json_schema(model: type[_ModelT]) -> dict[str, Any]:
|
| 174 |
+
if PYDANTIC_V2:
|
| 175 |
+
return model.model_json_schema()
|
| 176 |
+
return model.schema() # pyright: ignore[reportDeprecated]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# generic models
|
| 180 |
+
if TYPE_CHECKING:
|
| 181 |
+
|
| 182 |
+
class GenericModel(pydantic.BaseModel): ...
|
| 183 |
+
|
| 184 |
+
else:
|
| 185 |
+
if PYDANTIC_V2:
|
| 186 |
+
# there no longer needs to be a distinction in v2 but
|
| 187 |
+
# we still have to create our own subclass to avoid
|
| 188 |
+
# inconsistent MRO ordering errors
|
| 189 |
+
class GenericModel(pydantic.BaseModel): ...
|
| 190 |
+
|
| 191 |
+
else:
|
| 192 |
+
import pydantic.generics
|
| 193 |
+
|
| 194 |
+
class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ...
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
# cached properties
|
| 198 |
+
if TYPE_CHECKING:
|
| 199 |
+
cached_property = property
|
| 200 |
+
|
| 201 |
+
# we define a separate type (copied from typeshed)
|
| 202 |
+
# that represents that `cached_property` is `set`able
|
| 203 |
+
# at runtime, which differs from `@property`.
|
| 204 |
+
#
|
| 205 |
+
# this is a separate type as editors likely special case
|
| 206 |
+
# `@property` and we don't want to cause issues just to have
|
| 207 |
+
# more helpful internal types.
|
| 208 |
+
|
| 209 |
+
class typed_cached_property(Generic[_T]):
|
| 210 |
+
func: Callable[[Any], _T]
|
| 211 |
+
attrname: str | None
|
| 212 |
+
|
| 213 |
+
def __init__(self, func: Callable[[Any], _T]) -> None: ...
|
| 214 |
+
|
| 215 |
+
@overload
|
| 216 |
+
def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ...
|
| 217 |
+
|
| 218 |
+
@overload
|
| 219 |
+
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ...
|
| 220 |
+
|
| 221 |
+
def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self:
|
| 222 |
+
raise NotImplementedError()
|
| 223 |
+
|
| 224 |
+
def __set_name__(self, owner: type[Any], name: str) -> None: ...
|
| 225 |
+
|
| 226 |
+
# __set__ is not defined at runtime, but @cached_property is designed to be settable
|
| 227 |
+
def __set__(self, instance: object, value: _T) -> None: ...
|
| 228 |
+
else:
|
| 229 |
+
from functools import cached_property as cached_property
|
| 230 |
+
|
| 231 |
+
typed_cached_property = cached_property
|
.venv/lib/python3.11/site-packages/openai/_constants.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
import httpx
|
| 4 |
+
|
| 5 |
+
RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response"
|
| 6 |
+
OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to"
|
| 7 |
+
|
| 8 |
+
# default timeout is 10 minutes
|
| 9 |
+
DEFAULT_TIMEOUT = httpx.Timeout(timeout=600, connect=5.0)
|
| 10 |
+
DEFAULT_MAX_RETRIES = 2
|
| 11 |
+
DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
| 12 |
+
|
| 13 |
+
INITIAL_RETRY_DELAY = 0.5
|
| 14 |
+
MAX_RETRY_DELAY = 8.0
|
.venv/lib/python3.11/site-packages/openai/_exceptions.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import TYPE_CHECKING, Any, Optional, cast
|
| 6 |
+
from typing_extensions import Literal
|
| 7 |
+
|
| 8 |
+
import httpx
|
| 9 |
+
|
| 10 |
+
from ._utils import is_dict
|
| 11 |
+
from ._models import construct_type
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from .types.chat import ChatCompletion
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"BadRequestError",
|
| 18 |
+
"AuthenticationError",
|
| 19 |
+
"PermissionDeniedError",
|
| 20 |
+
"NotFoundError",
|
| 21 |
+
"ConflictError",
|
| 22 |
+
"UnprocessableEntityError",
|
| 23 |
+
"RateLimitError",
|
| 24 |
+
"InternalServerError",
|
| 25 |
+
"LengthFinishReasonError",
|
| 26 |
+
"ContentFilterFinishReasonError",
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class OpenAIError(Exception):
|
| 31 |
+
pass
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class APIError(OpenAIError):
|
| 35 |
+
message: str
|
| 36 |
+
request: httpx.Request
|
| 37 |
+
|
| 38 |
+
body: object | None
|
| 39 |
+
"""The API response body.
|
| 40 |
+
|
| 41 |
+
If the API responded with a valid JSON structure then this property will be the
|
| 42 |
+
decoded result.
|
| 43 |
+
|
| 44 |
+
If it isn't a valid JSON structure then this will be the raw response.
|
| 45 |
+
|
| 46 |
+
If there was no response associated with this error then it will be `None`.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
code: Optional[str] = None
|
| 50 |
+
param: Optional[str] = None
|
| 51 |
+
type: Optional[str]
|
| 52 |
+
|
| 53 |
+
def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None:
|
| 54 |
+
super().__init__(message)
|
| 55 |
+
self.request = request
|
| 56 |
+
self.message = message
|
| 57 |
+
self.body = body
|
| 58 |
+
|
| 59 |
+
if is_dict(body):
|
| 60 |
+
self.code = cast(Any, construct_type(type_=Optional[str], value=body.get("code")))
|
| 61 |
+
self.param = cast(Any, construct_type(type_=Optional[str], value=body.get("param")))
|
| 62 |
+
self.type = cast(Any, construct_type(type_=str, value=body.get("type")))
|
| 63 |
+
else:
|
| 64 |
+
self.code = None
|
| 65 |
+
self.param = None
|
| 66 |
+
self.type = None
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class APIResponseValidationError(APIError):
|
| 70 |
+
response: httpx.Response
|
| 71 |
+
status_code: int
|
| 72 |
+
|
| 73 |
+
def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None:
|
| 74 |
+
super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body)
|
| 75 |
+
self.response = response
|
| 76 |
+
self.status_code = response.status_code
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class APIStatusError(APIError):
|
| 80 |
+
"""Raised when an API response has a status code of 4xx or 5xx."""
|
| 81 |
+
|
| 82 |
+
response: httpx.Response
|
| 83 |
+
status_code: int
|
| 84 |
+
request_id: str | None
|
| 85 |
+
|
| 86 |
+
def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
|
| 87 |
+
super().__init__(message, response.request, body=body)
|
| 88 |
+
self.response = response
|
| 89 |
+
self.status_code = response.status_code
|
| 90 |
+
self.request_id = response.headers.get("x-request-id")
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class APIConnectionError(APIError):
|
| 94 |
+
def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None:
|
| 95 |
+
super().__init__(message, request, body=None)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class APITimeoutError(APIConnectionError):
|
| 99 |
+
def __init__(self, request: httpx.Request) -> None:
|
| 100 |
+
super().__init__(message="Request timed out.", request=request)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class BadRequestError(APIStatusError):
|
| 104 |
+
status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride]
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class AuthenticationError(APIStatusError):
|
| 108 |
+
status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride]
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class PermissionDeniedError(APIStatusError):
|
| 112 |
+
status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride]
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class NotFoundError(APIStatusError):
|
| 116 |
+
status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride]
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class ConflictError(APIStatusError):
|
| 120 |
+
status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride]
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class UnprocessableEntityError(APIStatusError):
|
| 124 |
+
status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride]
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class RateLimitError(APIStatusError):
|
| 128 |
+
status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride]
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class InternalServerError(APIStatusError):
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class LengthFinishReasonError(OpenAIError):
|
| 136 |
+
completion: ChatCompletion
|
| 137 |
+
"""The completion that caused this error.
|
| 138 |
+
|
| 139 |
+
Note: this will *not* be a complete `ChatCompletion` object when streaming as `usage`
|
| 140 |
+
will not be included.
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
def __init__(self, *, completion: ChatCompletion) -> None:
|
| 144 |
+
msg = "Could not parse response content as the length limit was reached"
|
| 145 |
+
if completion.usage:
|
| 146 |
+
msg += f" - {completion.usage}"
|
| 147 |
+
|
| 148 |
+
super().__init__(msg)
|
| 149 |
+
self.completion = completion
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class ContentFilterFinishReasonError(OpenAIError):
|
| 153 |
+
def __init__(self) -> None:
|
| 154 |
+
super().__init__(
|
| 155 |
+
f"Could not parse response content as the request was rejected by the content filter",
|
| 156 |
+
)
|
.venv/lib/python3.11/site-packages/openai/_files.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import io
|
| 4 |
+
import os
|
| 5 |
+
import pathlib
|
| 6 |
+
from typing import overload
|
| 7 |
+
from typing_extensions import TypeGuard
|
| 8 |
+
|
| 9 |
+
import anyio
|
| 10 |
+
|
| 11 |
+
from ._types import (
|
| 12 |
+
FileTypes,
|
| 13 |
+
FileContent,
|
| 14 |
+
RequestFiles,
|
| 15 |
+
HttpxFileTypes,
|
| 16 |
+
Base64FileInput,
|
| 17 |
+
HttpxFileContent,
|
| 18 |
+
HttpxRequestFiles,
|
| 19 |
+
)
|
| 20 |
+
from ._utils import is_tuple_t, is_mapping_t, is_sequence_t
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]:
|
| 24 |
+
return isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def is_file_content(obj: object) -> TypeGuard[FileContent]:
|
| 28 |
+
return (
|
| 29 |
+
isinstance(obj, bytes) or isinstance(obj, tuple) or isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike)
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
|
| 34 |
+
if not is_file_content(obj):
|
| 35 |
+
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
|
| 36 |
+
raise RuntimeError(
|
| 37 |
+
f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/openai/openai-python/tree/main#file-uploads"
|
| 38 |
+
) from None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@overload
|
| 42 |
+
def to_httpx_files(files: None) -> None: ...
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@overload
|
| 46 |
+
def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
|
| 50 |
+
if files is None:
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
if is_mapping_t(files):
|
| 54 |
+
files = {key: _transform_file(file) for key, file in files.items()}
|
| 55 |
+
elif is_sequence_t(files):
|
| 56 |
+
files = [(key, _transform_file(file)) for key, file in files]
|
| 57 |
+
else:
|
| 58 |
+
raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence")
|
| 59 |
+
|
| 60 |
+
return files
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _transform_file(file: FileTypes) -> HttpxFileTypes:
|
| 64 |
+
if is_file_content(file):
|
| 65 |
+
if isinstance(file, os.PathLike):
|
| 66 |
+
path = pathlib.Path(file)
|
| 67 |
+
return (path.name, path.read_bytes())
|
| 68 |
+
|
| 69 |
+
return file
|
| 70 |
+
|
| 71 |
+
if is_tuple_t(file):
|
| 72 |
+
return (file[0], _read_file_content(file[1]), *file[2:])
|
| 73 |
+
|
| 74 |
+
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def _read_file_content(file: FileContent) -> HttpxFileContent:
|
| 78 |
+
if isinstance(file, os.PathLike):
|
| 79 |
+
return pathlib.Path(file).read_bytes()
|
| 80 |
+
return file
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@overload
|
| 84 |
+
async def async_to_httpx_files(files: None) -> None: ...
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@overload
|
| 88 |
+
async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ...
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None:
|
| 92 |
+
if files is None:
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
if is_mapping_t(files):
|
| 96 |
+
files = {key: await _async_transform_file(file) for key, file in files.items()}
|
| 97 |
+
elif is_sequence_t(files):
|
| 98 |
+
files = [(key, await _async_transform_file(file)) for key, file in files]
|
| 99 |
+
else:
|
| 100 |
+
raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence")
|
| 101 |
+
|
| 102 |
+
return files
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
async def _async_transform_file(file: FileTypes) -> HttpxFileTypes:
|
| 106 |
+
if is_file_content(file):
|
| 107 |
+
if isinstance(file, os.PathLike):
|
| 108 |
+
path = anyio.Path(file)
|
| 109 |
+
return (path.name, await path.read_bytes())
|
| 110 |
+
|
| 111 |
+
return file
|
| 112 |
+
|
| 113 |
+
if is_tuple_t(file):
|
| 114 |
+
return (file[0], await _async_read_file_content(file[1]), *file[2:])
|
| 115 |
+
|
| 116 |
+
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
async def _async_read_file_content(file: FileContent) -> HttpxFileContent:
|
| 120 |
+
if isinstance(file, os.PathLike):
|
| 121 |
+
return await anyio.Path(file).read_bytes()
|
| 122 |
+
|
| 123 |
+
return file
|
.venv/lib/python3.11/site-packages/openai/_legacy_response.py
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import inspect
|
| 5 |
+
import logging
|
| 6 |
+
import datetime
|
| 7 |
+
import functools
|
| 8 |
+
from typing import (
|
| 9 |
+
TYPE_CHECKING,
|
| 10 |
+
Any,
|
| 11 |
+
Union,
|
| 12 |
+
Generic,
|
| 13 |
+
TypeVar,
|
| 14 |
+
Callable,
|
| 15 |
+
Iterator,
|
| 16 |
+
AsyncIterator,
|
| 17 |
+
cast,
|
| 18 |
+
overload,
|
| 19 |
+
)
|
| 20 |
+
from typing_extensions import Awaitable, ParamSpec, override, deprecated, get_origin
|
| 21 |
+
|
| 22 |
+
import anyio
|
| 23 |
+
import httpx
|
| 24 |
+
import pydantic
|
| 25 |
+
|
| 26 |
+
from ._types import NoneType
|
| 27 |
+
from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type
|
| 28 |
+
from ._models import BaseModel, is_basemodel, add_request_id
|
| 29 |
+
from ._constants import RAW_RESPONSE_HEADER
|
| 30 |
+
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
|
| 31 |
+
from ._exceptions import APIResponseValidationError
|
| 32 |
+
|
| 33 |
+
if TYPE_CHECKING:
|
| 34 |
+
from ._models import FinalRequestOptions
|
| 35 |
+
from ._base_client import BaseClient
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
P = ParamSpec("P")
|
| 39 |
+
R = TypeVar("R")
|
| 40 |
+
_T = TypeVar("_T")
|
| 41 |
+
|
| 42 |
+
log: logging.Logger = logging.getLogger(__name__)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class LegacyAPIResponse(Generic[R]):
|
| 46 |
+
"""This is a legacy class as it will be replaced by `APIResponse`
|
| 47 |
+
and `AsyncAPIResponse` in the `_response.py` file in the next major
|
| 48 |
+
release.
|
| 49 |
+
|
| 50 |
+
For the sync client this will mostly be the same with the exception
|
| 51 |
+
of `content` & `text` will be methods instead of properties. In the
|
| 52 |
+
async client, all methods will be async.
|
| 53 |
+
|
| 54 |
+
A migration script will be provided & the migration in general should
|
| 55 |
+
be smooth.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
_cast_to: type[R]
|
| 59 |
+
_client: BaseClient[Any, Any]
|
| 60 |
+
_parsed_by_type: dict[type[Any], Any]
|
| 61 |
+
_stream: bool
|
| 62 |
+
_stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
|
| 63 |
+
_options: FinalRequestOptions
|
| 64 |
+
|
| 65 |
+
http_response: httpx.Response
|
| 66 |
+
|
| 67 |
+
retries_taken: int
|
| 68 |
+
"""The number of retries made. If no retries happened this will be `0`"""
|
| 69 |
+
|
| 70 |
+
def __init__(
|
| 71 |
+
self,
|
| 72 |
+
*,
|
| 73 |
+
raw: httpx.Response,
|
| 74 |
+
cast_to: type[R],
|
| 75 |
+
client: BaseClient[Any, Any],
|
| 76 |
+
stream: bool,
|
| 77 |
+
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
| 78 |
+
options: FinalRequestOptions,
|
| 79 |
+
retries_taken: int = 0,
|
| 80 |
+
) -> None:
|
| 81 |
+
self._cast_to = cast_to
|
| 82 |
+
self._client = client
|
| 83 |
+
self._parsed_by_type = {}
|
| 84 |
+
self._stream = stream
|
| 85 |
+
self._stream_cls = stream_cls
|
| 86 |
+
self._options = options
|
| 87 |
+
self.http_response = raw
|
| 88 |
+
self.retries_taken = retries_taken
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def request_id(self) -> str | None:
|
| 92 |
+
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
|
| 93 |
+
|
| 94 |
+
@overload
|
| 95 |
+
def parse(self, *, to: type[_T]) -> _T: ...
|
| 96 |
+
|
| 97 |
+
@overload
|
| 98 |
+
def parse(self) -> R: ...
|
| 99 |
+
|
| 100 |
+
def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
| 101 |
+
"""Returns the rich python representation of this response's data.
|
| 102 |
+
|
| 103 |
+
NOTE: For the async client: this will become a coroutine in the next major version.
|
| 104 |
+
|
| 105 |
+
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
| 106 |
+
|
| 107 |
+
You can customise the type that the response is parsed into through
|
| 108 |
+
the `to` argument, e.g.
|
| 109 |
+
|
| 110 |
+
```py
|
| 111 |
+
from openai import BaseModel
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class MyModel(BaseModel):
|
| 115 |
+
foo: str
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
obj = response.parse(to=MyModel)
|
| 119 |
+
print(obj.foo)
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
We support parsing:
|
| 123 |
+
- `BaseModel`
|
| 124 |
+
- `dict`
|
| 125 |
+
- `list`
|
| 126 |
+
- `Union`
|
| 127 |
+
- `str`
|
| 128 |
+
- `int`
|
| 129 |
+
- `float`
|
| 130 |
+
- `httpx.Response`
|
| 131 |
+
"""
|
| 132 |
+
cache_key = to if to is not None else self._cast_to
|
| 133 |
+
cached = self._parsed_by_type.get(cache_key)
|
| 134 |
+
if cached is not None:
|
| 135 |
+
return cached # type: ignore[no-any-return]
|
| 136 |
+
|
| 137 |
+
parsed = self._parse(to=to)
|
| 138 |
+
if is_given(self._options.post_parser):
|
| 139 |
+
parsed = self._options.post_parser(parsed)
|
| 140 |
+
|
| 141 |
+
if isinstance(parsed, BaseModel):
|
| 142 |
+
add_request_id(parsed, self.request_id)
|
| 143 |
+
|
| 144 |
+
self._parsed_by_type[cache_key] = parsed
|
| 145 |
+
return cast(R, parsed)
|
| 146 |
+
|
| 147 |
+
@property
|
| 148 |
+
def headers(self) -> httpx.Headers:
|
| 149 |
+
return self.http_response.headers
|
| 150 |
+
|
| 151 |
+
@property
|
| 152 |
+
def http_request(self) -> httpx.Request:
|
| 153 |
+
return self.http_response.request
|
| 154 |
+
|
| 155 |
+
@property
|
| 156 |
+
def status_code(self) -> int:
|
| 157 |
+
return self.http_response.status_code
|
| 158 |
+
|
| 159 |
+
@property
|
| 160 |
+
def url(self) -> httpx.URL:
|
| 161 |
+
return self.http_response.url
|
| 162 |
+
|
| 163 |
+
@property
|
| 164 |
+
def method(self) -> str:
|
| 165 |
+
return self.http_request.method
|
| 166 |
+
|
| 167 |
+
@property
|
| 168 |
+
def content(self) -> bytes:
|
| 169 |
+
"""Return the binary response content.
|
| 170 |
+
|
| 171 |
+
NOTE: this will be removed in favour of `.read()` in the
|
| 172 |
+
next major version.
|
| 173 |
+
"""
|
| 174 |
+
return self.http_response.content
|
| 175 |
+
|
| 176 |
+
@property
|
| 177 |
+
def text(self) -> str:
|
| 178 |
+
"""Return the decoded response content.
|
| 179 |
+
|
| 180 |
+
NOTE: this will be turned into a method in the next major version.
|
| 181 |
+
"""
|
| 182 |
+
return self.http_response.text
|
| 183 |
+
|
| 184 |
+
@property
|
| 185 |
+
def http_version(self) -> str:
|
| 186 |
+
return self.http_response.http_version
|
| 187 |
+
|
| 188 |
+
@property
|
| 189 |
+
def is_closed(self) -> bool:
|
| 190 |
+
return self.http_response.is_closed
|
| 191 |
+
|
| 192 |
+
@property
|
| 193 |
+
def elapsed(self) -> datetime.timedelta:
|
| 194 |
+
"""The time taken for the complete request/response cycle to complete."""
|
| 195 |
+
return self.http_response.elapsed
|
| 196 |
+
|
| 197 |
+
def _parse(self, *, to: type[_T] | None = None) -> R | _T:
|
| 198 |
+
cast_to = to if to is not None else self._cast_to
|
| 199 |
+
|
| 200 |
+
# unwrap `TypeAlias('Name', T)` -> `T`
|
| 201 |
+
if is_type_alias_type(cast_to):
|
| 202 |
+
cast_to = cast_to.__value__ # type: ignore[unreachable]
|
| 203 |
+
|
| 204 |
+
# unwrap `Annotated[T, ...]` -> `T`
|
| 205 |
+
if cast_to and is_annotated_type(cast_to):
|
| 206 |
+
cast_to = extract_type_arg(cast_to, 0)
|
| 207 |
+
|
| 208 |
+
origin = get_origin(cast_to) or cast_to
|
| 209 |
+
|
| 210 |
+
if self._stream:
|
| 211 |
+
if to:
|
| 212 |
+
if not is_stream_class_type(to):
|
| 213 |
+
raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
|
| 214 |
+
|
| 215 |
+
return cast(
|
| 216 |
+
_T,
|
| 217 |
+
to(
|
| 218 |
+
cast_to=extract_stream_chunk_type(
|
| 219 |
+
to,
|
| 220 |
+
failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
|
| 221 |
+
),
|
| 222 |
+
response=self.http_response,
|
| 223 |
+
client=cast(Any, self._client),
|
| 224 |
+
),
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
if self._stream_cls:
|
| 228 |
+
return cast(
|
| 229 |
+
R,
|
| 230 |
+
self._stream_cls(
|
| 231 |
+
cast_to=extract_stream_chunk_type(self._stream_cls),
|
| 232 |
+
response=self.http_response,
|
| 233 |
+
client=cast(Any, self._client),
|
| 234 |
+
),
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
|
| 238 |
+
if stream_cls is None:
|
| 239 |
+
raise MissingStreamClassError()
|
| 240 |
+
|
| 241 |
+
return cast(
|
| 242 |
+
R,
|
| 243 |
+
stream_cls(
|
| 244 |
+
cast_to=cast_to,
|
| 245 |
+
response=self.http_response,
|
| 246 |
+
client=cast(Any, self._client),
|
| 247 |
+
),
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
if cast_to is NoneType:
|
| 251 |
+
return cast(R, None)
|
| 252 |
+
|
| 253 |
+
response = self.http_response
|
| 254 |
+
if cast_to == str:
|
| 255 |
+
return cast(R, response.text)
|
| 256 |
+
|
| 257 |
+
if cast_to == int:
|
| 258 |
+
return cast(R, int(response.text))
|
| 259 |
+
|
| 260 |
+
if cast_to == float:
|
| 261 |
+
return cast(R, float(response.text))
|
| 262 |
+
|
| 263 |
+
if cast_to == bool:
|
| 264 |
+
return cast(R, response.text.lower() == "true")
|
| 265 |
+
|
| 266 |
+
if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent):
|
| 267 |
+
return cast(R, cast_to(response)) # type: ignore
|
| 268 |
+
|
| 269 |
+
if origin == LegacyAPIResponse:
|
| 270 |
+
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
|
| 271 |
+
|
| 272 |
+
if inspect.isclass(
|
| 273 |
+
origin # pyright: ignore[reportUnknownArgumentType]
|
| 274 |
+
) and issubclass(origin, httpx.Response):
|
| 275 |
+
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
|
| 276 |
+
# and pass that class to our request functions. We cannot change the variance to be either
|
| 277 |
+
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
|
| 278 |
+
# the response class ourselves but that is something that should be supported directly in httpx
|
| 279 |
+
# as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
|
| 280 |
+
if cast_to != httpx.Response:
|
| 281 |
+
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
|
| 282 |
+
return cast(R, response)
|
| 283 |
+
|
| 284 |
+
if (
|
| 285 |
+
inspect.isclass(
|
| 286 |
+
origin # pyright: ignore[reportUnknownArgumentType]
|
| 287 |
+
)
|
| 288 |
+
and not issubclass(origin, BaseModel)
|
| 289 |
+
and issubclass(origin, pydantic.BaseModel)
|
| 290 |
+
):
|
| 291 |
+
raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
|
| 292 |
+
|
| 293 |
+
if (
|
| 294 |
+
cast_to is not object
|
| 295 |
+
and not origin is list
|
| 296 |
+
and not origin is dict
|
| 297 |
+
and not origin is Union
|
| 298 |
+
and not issubclass(origin, BaseModel)
|
| 299 |
+
):
|
| 300 |
+
raise RuntimeError(
|
| 301 |
+
f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
# split is required to handle cases where additional information is included
|
| 305 |
+
# in the response, e.g. application/json; charset=utf-8
|
| 306 |
+
content_type, *_ = response.headers.get("content-type", "*").split(";")
|
| 307 |
+
if content_type != "application/json":
|
| 308 |
+
if is_basemodel(cast_to):
|
| 309 |
+
try:
|
| 310 |
+
data = response.json()
|
| 311 |
+
except Exception as exc:
|
| 312 |
+
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
|
| 313 |
+
else:
|
| 314 |
+
return self._client._process_response_data(
|
| 315 |
+
data=data,
|
| 316 |
+
cast_to=cast_to, # type: ignore
|
| 317 |
+
response=response,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
if self._client._strict_response_validation:
|
| 321 |
+
raise APIResponseValidationError(
|
| 322 |
+
response=response,
|
| 323 |
+
message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
|
| 324 |
+
body=response.text,
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
# If the API responds with content that isn't JSON then we just return
|
| 328 |
+
# the (decoded) text without performing any parsing so that you can still
|
| 329 |
+
# handle the response however you need to.
|
| 330 |
+
return response.text # type: ignore
|
| 331 |
+
|
| 332 |
+
data = response.json()
|
| 333 |
+
|
| 334 |
+
return self._client._process_response_data(
|
| 335 |
+
data=data,
|
| 336 |
+
cast_to=cast_to, # type: ignore
|
| 337 |
+
response=response,
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
@override
|
| 341 |
+
def __repr__(self) -> str:
|
| 342 |
+
return f"<APIResponse [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
class MissingStreamClassError(TypeError):
|
| 346 |
+
def __init__(self) -> None:
|
| 347 |
+
super().__init__(
|
| 348 |
+
"The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference",
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, LegacyAPIResponse[R]]:
|
| 353 |
+
"""Higher order function that takes one of our bound API methods and wraps it
|
| 354 |
+
to support returning the raw `APIResponse` object directly.
|
| 355 |
+
"""
|
| 356 |
+
|
| 357 |
+
@functools.wraps(func)
|
| 358 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
|
| 359 |
+
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 360 |
+
extra_headers[RAW_RESPONSE_HEADER] = "true"
|
| 361 |
+
|
| 362 |
+
kwargs["extra_headers"] = extra_headers
|
| 363 |
+
|
| 364 |
+
return cast(LegacyAPIResponse[R], func(*args, **kwargs))
|
| 365 |
+
|
| 366 |
+
return wrapped
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[LegacyAPIResponse[R]]]:
|
| 370 |
+
"""Higher order function that takes one of our bound API methods and wraps it
|
| 371 |
+
to support returning the raw `APIResponse` object directly.
|
| 372 |
+
"""
|
| 373 |
+
|
| 374 |
+
@functools.wraps(func)
|
| 375 |
+
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]:
|
| 376 |
+
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 377 |
+
extra_headers[RAW_RESPONSE_HEADER] = "true"
|
| 378 |
+
|
| 379 |
+
kwargs["extra_headers"] = extra_headers
|
| 380 |
+
|
| 381 |
+
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
|
| 382 |
+
|
| 383 |
+
return wrapped
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
class HttpxBinaryResponseContent:
|
| 387 |
+
response: httpx.Response
|
| 388 |
+
|
| 389 |
+
def __init__(self, response: httpx.Response) -> None:
|
| 390 |
+
self.response = response
|
| 391 |
+
|
| 392 |
+
@property
|
| 393 |
+
def content(self) -> bytes:
|
| 394 |
+
return self.response.content
|
| 395 |
+
|
| 396 |
+
@property
|
| 397 |
+
def text(self) -> str:
|
| 398 |
+
return self.response.text
|
| 399 |
+
|
| 400 |
+
@property
|
| 401 |
+
def encoding(self) -> str | None:
|
| 402 |
+
return self.response.encoding
|
| 403 |
+
|
| 404 |
+
@property
|
| 405 |
+
def charset_encoding(self) -> str | None:
|
| 406 |
+
return self.response.charset_encoding
|
| 407 |
+
|
| 408 |
+
def json(self, **kwargs: Any) -> Any:
|
| 409 |
+
return self.response.json(**kwargs)
|
| 410 |
+
|
| 411 |
+
def read(self) -> bytes:
|
| 412 |
+
return self.response.read()
|
| 413 |
+
|
| 414 |
+
def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]:
|
| 415 |
+
return self.response.iter_bytes(chunk_size)
|
| 416 |
+
|
| 417 |
+
def iter_text(self, chunk_size: int | None = None) -> Iterator[str]:
|
| 418 |
+
return self.response.iter_text(chunk_size)
|
| 419 |
+
|
| 420 |
+
def iter_lines(self) -> Iterator[str]:
|
| 421 |
+
return self.response.iter_lines()
|
| 422 |
+
|
| 423 |
+
def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]:
|
| 424 |
+
return self.response.iter_raw(chunk_size)
|
| 425 |
+
|
| 426 |
+
def write_to_file(
|
| 427 |
+
self,
|
| 428 |
+
file: str | os.PathLike[str],
|
| 429 |
+
) -> None:
|
| 430 |
+
"""Write the output to the given file.
|
| 431 |
+
|
| 432 |
+
Accepts a filename or any path-like object, e.g. pathlib.Path
|
| 433 |
+
|
| 434 |
+
Note: if you want to stream the data to the file instead of writing
|
| 435 |
+
all at once then you should use `.with_streaming_response` when making
|
| 436 |
+
the API request, e.g. `client.with_streaming_response.foo().stream_to_file('my_filename.txt')`
|
| 437 |
+
"""
|
| 438 |
+
with open(file, mode="wb") as f:
|
| 439 |
+
for data in self.response.iter_bytes():
|
| 440 |
+
f.write(data)
|
| 441 |
+
|
| 442 |
+
@deprecated(
|
| 443 |
+
"Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead"
|
| 444 |
+
)
|
| 445 |
+
def stream_to_file(
|
| 446 |
+
self,
|
| 447 |
+
file: str | os.PathLike[str],
|
| 448 |
+
*,
|
| 449 |
+
chunk_size: int | None = None,
|
| 450 |
+
) -> None:
|
| 451 |
+
with open(file, mode="wb") as f:
|
| 452 |
+
for data in self.response.iter_bytes(chunk_size):
|
| 453 |
+
f.write(data)
|
| 454 |
+
|
| 455 |
+
def close(self) -> None:
|
| 456 |
+
return self.response.close()
|
| 457 |
+
|
| 458 |
+
async def aread(self) -> bytes:
|
| 459 |
+
return await self.response.aread()
|
| 460 |
+
|
| 461 |
+
async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
|
| 462 |
+
return self.response.aiter_bytes(chunk_size)
|
| 463 |
+
|
| 464 |
+
async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
|
| 465 |
+
return self.response.aiter_text(chunk_size)
|
| 466 |
+
|
| 467 |
+
async def aiter_lines(self) -> AsyncIterator[str]:
|
| 468 |
+
return self.response.aiter_lines()
|
| 469 |
+
|
| 470 |
+
async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
|
| 471 |
+
return self.response.aiter_raw(chunk_size)
|
| 472 |
+
|
| 473 |
+
@deprecated(
|
| 474 |
+
"Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead"
|
| 475 |
+
)
|
| 476 |
+
async def astream_to_file(
|
| 477 |
+
self,
|
| 478 |
+
file: str | os.PathLike[str],
|
| 479 |
+
*,
|
| 480 |
+
chunk_size: int | None = None,
|
| 481 |
+
) -> None:
|
| 482 |
+
path = anyio.Path(file)
|
| 483 |
+
async with await path.open(mode="wb") as f:
|
| 484 |
+
async for data in self.response.aiter_bytes(chunk_size):
|
| 485 |
+
await f.write(data)
|
| 486 |
+
|
| 487 |
+
async def aclose(self) -> None:
|
| 488 |
+
return await self.response.aclose()
|
.venv/lib/python3.11/site-packages/openai/_models.py
ADDED
|
@@ -0,0 +1,835 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import inspect
|
| 5 |
+
from typing import TYPE_CHECKING, Any, Type, Tuple, Union, Generic, TypeVar, Callable, Optional, cast
|
| 6 |
+
from datetime import date, datetime
|
| 7 |
+
from typing_extensions import (
|
| 8 |
+
Unpack,
|
| 9 |
+
Literal,
|
| 10 |
+
ClassVar,
|
| 11 |
+
Protocol,
|
| 12 |
+
Required,
|
| 13 |
+
Sequence,
|
| 14 |
+
ParamSpec,
|
| 15 |
+
TypedDict,
|
| 16 |
+
TypeGuard,
|
| 17 |
+
final,
|
| 18 |
+
override,
|
| 19 |
+
runtime_checkable,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
import pydantic
|
| 23 |
+
import pydantic.generics
|
| 24 |
+
from pydantic.fields import FieldInfo
|
| 25 |
+
|
| 26 |
+
from ._types import (
|
| 27 |
+
Body,
|
| 28 |
+
IncEx,
|
| 29 |
+
Query,
|
| 30 |
+
ModelT,
|
| 31 |
+
Headers,
|
| 32 |
+
Timeout,
|
| 33 |
+
NotGiven,
|
| 34 |
+
AnyMapping,
|
| 35 |
+
HttpxRequestFiles,
|
| 36 |
+
)
|
| 37 |
+
from ._utils import (
|
| 38 |
+
PropertyInfo,
|
| 39 |
+
is_list,
|
| 40 |
+
is_given,
|
| 41 |
+
json_safe,
|
| 42 |
+
lru_cache,
|
| 43 |
+
is_mapping,
|
| 44 |
+
parse_date,
|
| 45 |
+
coerce_boolean,
|
| 46 |
+
parse_datetime,
|
| 47 |
+
strip_not_given,
|
| 48 |
+
extract_type_arg,
|
| 49 |
+
is_annotated_type,
|
| 50 |
+
is_type_alias_type,
|
| 51 |
+
strip_annotated_type,
|
| 52 |
+
)
|
| 53 |
+
from ._compat import (
|
| 54 |
+
PYDANTIC_V2,
|
| 55 |
+
ConfigDict,
|
| 56 |
+
GenericModel as BaseGenericModel,
|
| 57 |
+
get_args,
|
| 58 |
+
is_union,
|
| 59 |
+
parse_obj,
|
| 60 |
+
get_origin,
|
| 61 |
+
is_literal_type,
|
| 62 |
+
get_model_config,
|
| 63 |
+
get_model_fields,
|
| 64 |
+
field_get_default,
|
| 65 |
+
)
|
| 66 |
+
from ._constants import RAW_RESPONSE_HEADER
|
| 67 |
+
|
| 68 |
+
if TYPE_CHECKING:
|
| 69 |
+
from pydantic_core.core_schema import ModelField, LiteralSchema, ModelFieldsSchema
|
| 70 |
+
|
| 71 |
+
__all__ = ["BaseModel", "GenericModel"]
|
| 72 |
+
|
| 73 |
+
_T = TypeVar("_T")
|
| 74 |
+
_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel")
|
| 75 |
+
|
| 76 |
+
P = ParamSpec("P")
|
| 77 |
+
|
| 78 |
+
ReprArgs = Sequence[Tuple[Optional[str], Any]]
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@runtime_checkable
|
| 82 |
+
class _ConfigProtocol(Protocol):
|
| 83 |
+
allow_population_by_field_name: bool
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class BaseModel(pydantic.BaseModel):
|
| 87 |
+
if PYDANTIC_V2:
|
| 88 |
+
model_config: ClassVar[ConfigDict] = ConfigDict(
|
| 89 |
+
extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true"))
|
| 90 |
+
)
|
| 91 |
+
else:
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
@override
|
| 95 |
+
def model_fields_set(self) -> set[str]:
|
| 96 |
+
# a forwards-compat shim for pydantic v2
|
| 97 |
+
return self.__fields_set__ # type: ignore
|
| 98 |
+
|
| 99 |
+
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
|
| 100 |
+
extra: Any = pydantic.Extra.allow # type: ignore
|
| 101 |
+
|
| 102 |
+
@override
|
| 103 |
+
def __repr_args__(self) -> ReprArgs:
|
| 104 |
+
# we don't want these attributes to be included when something like `rich.print` is used
|
| 105 |
+
return [arg for arg in super().__repr_args__() if arg[0] not in {"_request_id", "__exclude_fields__"}]
|
| 106 |
+
|
| 107 |
+
if TYPE_CHECKING:
|
| 108 |
+
_request_id: Optional[str] = None
|
| 109 |
+
"""The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.
|
| 110 |
+
|
| 111 |
+
This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:
|
| 112 |
+
|
| 113 |
+
```py
|
| 114 |
+
completion = await client.chat.completions.create(...)
|
| 115 |
+
completion._request_id # req_id_xxx
|
| 116 |
+
completion.usage._request_id # raises `AttributeError`
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
Note: unlike other properties that use an `_` prefix, this property
|
| 120 |
+
*is* public. Unless documented otherwise, all other `_` prefix properties,
|
| 121 |
+
methods and modules are *private*.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def to_dict(
|
| 125 |
+
self,
|
| 126 |
+
*,
|
| 127 |
+
mode: Literal["json", "python"] = "python",
|
| 128 |
+
use_api_names: bool = True,
|
| 129 |
+
exclude_unset: bool = True,
|
| 130 |
+
exclude_defaults: bool = False,
|
| 131 |
+
exclude_none: bool = False,
|
| 132 |
+
warnings: bool = True,
|
| 133 |
+
) -> dict[str, object]:
|
| 134 |
+
"""Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
|
| 135 |
+
|
| 136 |
+
By default, fields that were not set by the API will not be included,
|
| 137 |
+
and keys will match the API response, *not* the property names from the model.
|
| 138 |
+
|
| 139 |
+
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
|
| 140 |
+
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
mode:
|
| 144 |
+
If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`.
|
| 145 |
+
If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)`
|
| 146 |
+
|
| 147 |
+
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
|
| 148 |
+
exclude_unset: Whether to exclude fields that have not been explicitly set.
|
| 149 |
+
exclude_defaults: Whether to exclude fields that are set to their default value from the output.
|
| 150 |
+
exclude_none: Whether to exclude fields that have a value of `None` from the output.
|
| 151 |
+
warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2.
|
| 152 |
+
"""
|
| 153 |
+
return self.model_dump(
|
| 154 |
+
mode=mode,
|
| 155 |
+
by_alias=use_api_names,
|
| 156 |
+
exclude_unset=exclude_unset,
|
| 157 |
+
exclude_defaults=exclude_defaults,
|
| 158 |
+
exclude_none=exclude_none,
|
| 159 |
+
warnings=warnings,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
def to_json(
|
| 163 |
+
self,
|
| 164 |
+
*,
|
| 165 |
+
indent: int | None = 2,
|
| 166 |
+
use_api_names: bool = True,
|
| 167 |
+
exclude_unset: bool = True,
|
| 168 |
+
exclude_defaults: bool = False,
|
| 169 |
+
exclude_none: bool = False,
|
| 170 |
+
warnings: bool = True,
|
| 171 |
+
) -> str:
|
| 172 |
+
"""Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation).
|
| 173 |
+
|
| 174 |
+
By default, fields that were not set by the API will not be included,
|
| 175 |
+
and keys will match the API response, *not* the property names from the model.
|
| 176 |
+
|
| 177 |
+
For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property,
|
| 178 |
+
the output will use the `"fooBar"` key (unless `use_api_names=False` is passed).
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2`
|
| 182 |
+
use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`.
|
| 183 |
+
exclude_unset: Whether to exclude fields that have not been explicitly set.
|
| 184 |
+
exclude_defaults: Whether to exclude fields that have the default value.
|
| 185 |
+
exclude_none: Whether to exclude fields that have a value of `None`.
|
| 186 |
+
warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2.
|
| 187 |
+
"""
|
| 188 |
+
return self.model_dump_json(
|
| 189 |
+
indent=indent,
|
| 190 |
+
by_alias=use_api_names,
|
| 191 |
+
exclude_unset=exclude_unset,
|
| 192 |
+
exclude_defaults=exclude_defaults,
|
| 193 |
+
exclude_none=exclude_none,
|
| 194 |
+
warnings=warnings,
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
@override
|
| 198 |
+
def __str__(self) -> str:
|
| 199 |
+
# mypy complains about an invalid self arg
|
| 200 |
+
return f"{self.__repr_name__()}({self.__repr_str__(', ')})" # type: ignore[misc]
|
| 201 |
+
|
| 202 |
+
# Override the 'construct' method in a way that supports recursive parsing without validation.
|
| 203 |
+
# Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836.
|
| 204 |
+
@classmethod
|
| 205 |
+
@override
|
| 206 |
+
def construct( # pyright: ignore[reportIncompatibleMethodOverride]
|
| 207 |
+
__cls: Type[ModelT],
|
| 208 |
+
_fields_set: set[str] | None = None,
|
| 209 |
+
**values: object,
|
| 210 |
+
) -> ModelT:
|
| 211 |
+
m = __cls.__new__(__cls)
|
| 212 |
+
fields_values: dict[str, object] = {}
|
| 213 |
+
|
| 214 |
+
config = get_model_config(__cls)
|
| 215 |
+
populate_by_name = (
|
| 216 |
+
config.allow_population_by_field_name
|
| 217 |
+
if isinstance(config, _ConfigProtocol)
|
| 218 |
+
else config.get("populate_by_name")
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
if _fields_set is None:
|
| 222 |
+
_fields_set = set()
|
| 223 |
+
|
| 224 |
+
model_fields = get_model_fields(__cls)
|
| 225 |
+
for name, field in model_fields.items():
|
| 226 |
+
key = field.alias
|
| 227 |
+
if key is None or (key not in values and populate_by_name):
|
| 228 |
+
key = name
|
| 229 |
+
|
| 230 |
+
if key in values:
|
| 231 |
+
fields_values[name] = _construct_field(value=values[key], field=field, key=key)
|
| 232 |
+
_fields_set.add(name)
|
| 233 |
+
else:
|
| 234 |
+
fields_values[name] = field_get_default(field)
|
| 235 |
+
|
| 236 |
+
_extra = {}
|
| 237 |
+
for key, value in values.items():
|
| 238 |
+
if key not in model_fields:
|
| 239 |
+
if PYDANTIC_V2:
|
| 240 |
+
_extra[key] = value
|
| 241 |
+
else:
|
| 242 |
+
_fields_set.add(key)
|
| 243 |
+
fields_values[key] = value
|
| 244 |
+
|
| 245 |
+
object.__setattr__(m, "__dict__", fields_values)
|
| 246 |
+
|
| 247 |
+
if PYDANTIC_V2:
|
| 248 |
+
# these properties are copied from Pydantic's `model_construct()` method
|
| 249 |
+
object.__setattr__(m, "__pydantic_private__", None)
|
| 250 |
+
object.__setattr__(m, "__pydantic_extra__", _extra)
|
| 251 |
+
object.__setattr__(m, "__pydantic_fields_set__", _fields_set)
|
| 252 |
+
else:
|
| 253 |
+
# init_private_attributes() does not exist in v2
|
| 254 |
+
m._init_private_attributes() # type: ignore
|
| 255 |
+
|
| 256 |
+
# copied from Pydantic v1's `construct()` method
|
| 257 |
+
object.__setattr__(m, "__fields_set__", _fields_set)
|
| 258 |
+
|
| 259 |
+
return m
|
| 260 |
+
|
| 261 |
+
if not TYPE_CHECKING:
|
| 262 |
+
# type checkers incorrectly complain about this assignment
|
| 263 |
+
# because the type signatures are technically different
|
| 264 |
+
# although not in practice
|
| 265 |
+
model_construct = construct
|
| 266 |
+
|
| 267 |
+
if not PYDANTIC_V2:
|
| 268 |
+
# we define aliases for some of the new pydantic v2 methods so
|
| 269 |
+
# that we can just document these methods without having to specify
|
| 270 |
+
# a specific pydantic version as some users may not know which
|
| 271 |
+
# pydantic version they are currently using
|
| 272 |
+
|
| 273 |
+
@override
|
| 274 |
+
def model_dump(
|
| 275 |
+
self,
|
| 276 |
+
*,
|
| 277 |
+
mode: Literal["json", "python"] | str = "python",
|
| 278 |
+
include: IncEx | None = None,
|
| 279 |
+
exclude: IncEx | None = None,
|
| 280 |
+
by_alias: bool = False,
|
| 281 |
+
exclude_unset: bool = False,
|
| 282 |
+
exclude_defaults: bool = False,
|
| 283 |
+
exclude_none: bool = False,
|
| 284 |
+
round_trip: bool = False,
|
| 285 |
+
warnings: bool | Literal["none", "warn", "error"] = True,
|
| 286 |
+
context: dict[str, Any] | None = None,
|
| 287 |
+
serialize_as_any: bool = False,
|
| 288 |
+
) -> dict[str, Any]:
|
| 289 |
+
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
|
| 290 |
+
|
| 291 |
+
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
mode: The mode in which `to_python` should run.
|
| 295 |
+
If mode is 'json', the dictionary will only contain JSON serializable types.
|
| 296 |
+
If mode is 'python', the dictionary may contain any Python objects.
|
| 297 |
+
include: A list of fields to include in the output.
|
| 298 |
+
exclude: A list of fields to exclude from the output.
|
| 299 |
+
by_alias: Whether to use the field's alias in the dictionary key if defined.
|
| 300 |
+
exclude_unset: Whether to exclude fields that are unset or None from the output.
|
| 301 |
+
exclude_defaults: Whether to exclude fields that are set to their default value from the output.
|
| 302 |
+
exclude_none: Whether to exclude fields that have a value of `None` from the output.
|
| 303 |
+
round_trip: Whether to enable serialization and deserialization round-trip support.
|
| 304 |
+
warnings: Whether to log warnings when invalid fields are encountered.
|
| 305 |
+
|
| 306 |
+
Returns:
|
| 307 |
+
A dictionary representation of the model.
|
| 308 |
+
"""
|
| 309 |
+
if mode not in {"json", "python"}:
|
| 310 |
+
raise ValueError("mode must be either 'json' or 'python'")
|
| 311 |
+
if round_trip != False:
|
| 312 |
+
raise ValueError("round_trip is only supported in Pydantic v2")
|
| 313 |
+
if warnings != True:
|
| 314 |
+
raise ValueError("warnings is only supported in Pydantic v2")
|
| 315 |
+
if context is not None:
|
| 316 |
+
raise ValueError("context is only supported in Pydantic v2")
|
| 317 |
+
if serialize_as_any != False:
|
| 318 |
+
raise ValueError("serialize_as_any is only supported in Pydantic v2")
|
| 319 |
+
dumped = super().dict( # pyright: ignore[reportDeprecated]
|
| 320 |
+
include=include,
|
| 321 |
+
exclude=exclude,
|
| 322 |
+
by_alias=by_alias,
|
| 323 |
+
exclude_unset=exclude_unset,
|
| 324 |
+
exclude_defaults=exclude_defaults,
|
| 325 |
+
exclude_none=exclude_none,
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped
|
| 329 |
+
|
| 330 |
+
@override
|
| 331 |
+
def model_dump_json(
|
| 332 |
+
self,
|
| 333 |
+
*,
|
| 334 |
+
indent: int | None = None,
|
| 335 |
+
include: IncEx | None = None,
|
| 336 |
+
exclude: IncEx | None = None,
|
| 337 |
+
by_alias: bool = False,
|
| 338 |
+
exclude_unset: bool = False,
|
| 339 |
+
exclude_defaults: bool = False,
|
| 340 |
+
exclude_none: bool = False,
|
| 341 |
+
round_trip: bool = False,
|
| 342 |
+
warnings: bool | Literal["none", "warn", "error"] = True,
|
| 343 |
+
context: dict[str, Any] | None = None,
|
| 344 |
+
serialize_as_any: bool = False,
|
| 345 |
+
) -> str:
|
| 346 |
+
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
|
| 347 |
+
|
| 348 |
+
Generates a JSON representation of the model using Pydantic's `to_json` method.
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
indent: Indentation to use in the JSON output. If None is passed, the output will be compact.
|
| 352 |
+
include: Field(s) to include in the JSON output. Can take either a string or set of strings.
|
| 353 |
+
exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings.
|
| 354 |
+
by_alias: Whether to serialize using field aliases.
|
| 355 |
+
exclude_unset: Whether to exclude fields that have not been explicitly set.
|
| 356 |
+
exclude_defaults: Whether to exclude fields that have the default value.
|
| 357 |
+
exclude_none: Whether to exclude fields that have a value of `None`.
|
| 358 |
+
round_trip: Whether to use serialization/deserialization between JSON and class instance.
|
| 359 |
+
warnings: Whether to show any warnings that occurred during serialization.
|
| 360 |
+
|
| 361 |
+
Returns:
|
| 362 |
+
A JSON string representation of the model.
|
| 363 |
+
"""
|
| 364 |
+
if round_trip != False:
|
| 365 |
+
raise ValueError("round_trip is only supported in Pydantic v2")
|
| 366 |
+
if warnings != True:
|
| 367 |
+
raise ValueError("warnings is only supported in Pydantic v2")
|
| 368 |
+
if context is not None:
|
| 369 |
+
raise ValueError("context is only supported in Pydantic v2")
|
| 370 |
+
if serialize_as_any != False:
|
| 371 |
+
raise ValueError("serialize_as_any is only supported in Pydantic v2")
|
| 372 |
+
return super().json( # type: ignore[reportDeprecated]
|
| 373 |
+
indent=indent,
|
| 374 |
+
include=include,
|
| 375 |
+
exclude=exclude,
|
| 376 |
+
by_alias=by_alias,
|
| 377 |
+
exclude_unset=exclude_unset,
|
| 378 |
+
exclude_defaults=exclude_defaults,
|
| 379 |
+
exclude_none=exclude_none,
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def _construct_field(value: object, field: FieldInfo, key: str) -> object:
|
| 384 |
+
if value is None:
|
| 385 |
+
return field_get_default(field)
|
| 386 |
+
|
| 387 |
+
if PYDANTIC_V2:
|
| 388 |
+
type_ = field.annotation
|
| 389 |
+
else:
|
| 390 |
+
type_ = cast(type, field.outer_type_) # type: ignore
|
| 391 |
+
|
| 392 |
+
if type_ is None:
|
| 393 |
+
raise RuntimeError(f"Unexpected field type is None for {key}")
|
| 394 |
+
|
| 395 |
+
return construct_type(value=value, type_=type_)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def is_basemodel(type_: type) -> bool:
|
| 399 |
+
"""Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`"""
|
| 400 |
+
if is_union(type_):
|
| 401 |
+
for variant in get_args(type_):
|
| 402 |
+
if is_basemodel(variant):
|
| 403 |
+
return True
|
| 404 |
+
|
| 405 |
+
return False
|
| 406 |
+
|
| 407 |
+
return is_basemodel_type(type_)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]:
|
| 411 |
+
origin = get_origin(type_) or type_
|
| 412 |
+
if not inspect.isclass(origin):
|
| 413 |
+
return False
|
| 414 |
+
return issubclass(origin, BaseModel) or issubclass(origin, GenericModel)
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def build(
|
| 418 |
+
base_model_cls: Callable[P, _BaseModelT],
|
| 419 |
+
*args: P.args,
|
| 420 |
+
**kwargs: P.kwargs,
|
| 421 |
+
) -> _BaseModelT:
|
| 422 |
+
"""Construct a BaseModel class without validation.
|
| 423 |
+
|
| 424 |
+
This is useful for cases where you need to instantiate a `BaseModel`
|
| 425 |
+
from an API response as this provides type-safe params which isn't supported
|
| 426 |
+
by helpers like `construct_type()`.
|
| 427 |
+
|
| 428 |
+
```py
|
| 429 |
+
build(MyModel, my_field_a="foo", my_field_b=123)
|
| 430 |
+
```
|
| 431 |
+
"""
|
| 432 |
+
if args:
|
| 433 |
+
raise TypeError(
|
| 434 |
+
"Received positional arguments which are not supported; Keyword arguments must be used instead",
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs))
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
|
| 441 |
+
"""Loose coercion to the expected type with construction of nested values.
|
| 442 |
+
|
| 443 |
+
Note: the returned value from this function is not guaranteed to match the
|
| 444 |
+
given type.
|
| 445 |
+
"""
|
| 446 |
+
return cast(_T, construct_type(value=value, type_=type_))
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def construct_type(*, value: object, type_: object) -> object:
|
| 450 |
+
"""Loose coercion to the expected type with construction of nested values.
|
| 451 |
+
|
| 452 |
+
If the given value does not match the expected type then it is returned as-is.
|
| 453 |
+
"""
|
| 454 |
+
# we allow `object` as the input type because otherwise, passing things like
|
| 455 |
+
# `Literal['value']` will be reported as a type error by type checkers
|
| 456 |
+
type_ = cast("type[object]", type_)
|
| 457 |
+
if is_type_alias_type(type_):
|
| 458 |
+
type_ = type_.__value__ # type: ignore[unreachable]
|
| 459 |
+
|
| 460 |
+
# unwrap `Annotated[T, ...]` -> `T`
|
| 461 |
+
if is_annotated_type(type_):
|
| 462 |
+
meta: tuple[Any, ...] = get_args(type_)[1:]
|
| 463 |
+
type_ = extract_type_arg(type_, 0)
|
| 464 |
+
else:
|
| 465 |
+
meta = tuple()
|
| 466 |
+
|
| 467 |
+
# we need to use the origin class for any types that are subscripted generics
|
| 468 |
+
# e.g. Dict[str, object]
|
| 469 |
+
origin = get_origin(type_) or type_
|
| 470 |
+
args = get_args(type_)
|
| 471 |
+
|
| 472 |
+
if is_union(origin):
|
| 473 |
+
try:
|
| 474 |
+
return validate_type(type_=cast("type[object]", type_), value=value)
|
| 475 |
+
except Exception:
|
| 476 |
+
pass
|
| 477 |
+
|
| 478 |
+
# if the type is a discriminated union then we want to construct the right variant
|
| 479 |
+
# in the union, even if the data doesn't match exactly, otherwise we'd break code
|
| 480 |
+
# that relies on the constructed class types, e.g.
|
| 481 |
+
#
|
| 482 |
+
# class FooType:
|
| 483 |
+
# kind: Literal['foo']
|
| 484 |
+
# value: str
|
| 485 |
+
#
|
| 486 |
+
# class BarType:
|
| 487 |
+
# kind: Literal['bar']
|
| 488 |
+
# value: int
|
| 489 |
+
#
|
| 490 |
+
# without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then
|
| 491 |
+
# we'd end up constructing `FooType` when it should be `BarType`.
|
| 492 |
+
discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta)
|
| 493 |
+
if discriminator and is_mapping(value):
|
| 494 |
+
variant_value = value.get(discriminator.field_alias_from or discriminator.field_name)
|
| 495 |
+
if variant_value and isinstance(variant_value, str):
|
| 496 |
+
variant_type = discriminator.mapping.get(variant_value)
|
| 497 |
+
if variant_type:
|
| 498 |
+
return construct_type(type_=variant_type, value=value)
|
| 499 |
+
|
| 500 |
+
# if the data is not valid, use the first variant that doesn't fail while deserializing
|
| 501 |
+
for variant in args:
|
| 502 |
+
try:
|
| 503 |
+
return construct_type(value=value, type_=variant)
|
| 504 |
+
except Exception:
|
| 505 |
+
continue
|
| 506 |
+
|
| 507 |
+
raise RuntimeError(f"Could not convert data into a valid instance of {type_}")
|
| 508 |
+
|
| 509 |
+
if origin == dict:
|
| 510 |
+
if not is_mapping(value):
|
| 511 |
+
return value
|
| 512 |
+
|
| 513 |
+
_, items_type = get_args(type_) # Dict[_, items_type]
|
| 514 |
+
return {key: construct_type(value=item, type_=items_type) for key, item in value.items()}
|
| 515 |
+
|
| 516 |
+
if (
|
| 517 |
+
not is_literal_type(type_)
|
| 518 |
+
and inspect.isclass(origin)
|
| 519 |
+
and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel))
|
| 520 |
+
):
|
| 521 |
+
if is_list(value):
|
| 522 |
+
return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value]
|
| 523 |
+
|
| 524 |
+
if is_mapping(value):
|
| 525 |
+
if issubclass(type_, BaseModel):
|
| 526 |
+
return type_.construct(**value) # type: ignore[arg-type]
|
| 527 |
+
|
| 528 |
+
return cast(Any, type_).construct(**value)
|
| 529 |
+
|
| 530 |
+
if origin == list:
|
| 531 |
+
if not is_list(value):
|
| 532 |
+
return value
|
| 533 |
+
|
| 534 |
+
inner_type = args[0] # List[inner_type]
|
| 535 |
+
return [construct_type(value=entry, type_=inner_type) for entry in value]
|
| 536 |
+
|
| 537 |
+
if origin == float:
|
| 538 |
+
if isinstance(value, int):
|
| 539 |
+
coerced = float(value)
|
| 540 |
+
if coerced != value:
|
| 541 |
+
return value
|
| 542 |
+
return coerced
|
| 543 |
+
|
| 544 |
+
return value
|
| 545 |
+
|
| 546 |
+
if type_ == datetime:
|
| 547 |
+
try:
|
| 548 |
+
return parse_datetime(value) # type: ignore
|
| 549 |
+
except Exception:
|
| 550 |
+
return value
|
| 551 |
+
|
| 552 |
+
if type_ == date:
|
| 553 |
+
try:
|
| 554 |
+
return parse_date(value) # type: ignore
|
| 555 |
+
except Exception:
|
| 556 |
+
return value
|
| 557 |
+
|
| 558 |
+
return value
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
@runtime_checkable
|
| 562 |
+
class CachedDiscriminatorType(Protocol):
|
| 563 |
+
__discriminator__: DiscriminatorDetails
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
class DiscriminatorDetails:
|
| 567 |
+
field_name: str
|
| 568 |
+
"""The name of the discriminator field in the variant class, e.g.
|
| 569 |
+
|
| 570 |
+
```py
|
| 571 |
+
class Foo(BaseModel):
|
| 572 |
+
type: Literal['foo']
|
| 573 |
+
```
|
| 574 |
+
|
| 575 |
+
Will result in field_name='type'
|
| 576 |
+
"""
|
| 577 |
+
|
| 578 |
+
field_alias_from: str | None
|
| 579 |
+
"""The name of the discriminator field in the API response, e.g.
|
| 580 |
+
|
| 581 |
+
```py
|
| 582 |
+
class Foo(BaseModel):
|
| 583 |
+
type: Literal['foo'] = Field(alias='type_from_api')
|
| 584 |
+
```
|
| 585 |
+
|
| 586 |
+
Will result in field_alias_from='type_from_api'
|
| 587 |
+
"""
|
| 588 |
+
|
| 589 |
+
mapping: dict[str, type]
|
| 590 |
+
"""Mapping of discriminator value to variant type, e.g.
|
| 591 |
+
|
| 592 |
+
{'foo': FooVariant, 'bar': BarVariant}
|
| 593 |
+
"""
|
| 594 |
+
|
| 595 |
+
def __init__(
|
| 596 |
+
self,
|
| 597 |
+
*,
|
| 598 |
+
mapping: dict[str, type],
|
| 599 |
+
discriminator_field: str,
|
| 600 |
+
discriminator_alias: str | None,
|
| 601 |
+
) -> None:
|
| 602 |
+
self.mapping = mapping
|
| 603 |
+
self.field_name = discriminator_field
|
| 604 |
+
self.field_alias_from = discriminator_alias
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None:
|
| 608 |
+
if isinstance(union, CachedDiscriminatorType):
|
| 609 |
+
return union.__discriminator__
|
| 610 |
+
|
| 611 |
+
discriminator_field_name: str | None = None
|
| 612 |
+
|
| 613 |
+
for annotation in meta_annotations:
|
| 614 |
+
if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None:
|
| 615 |
+
discriminator_field_name = annotation.discriminator
|
| 616 |
+
break
|
| 617 |
+
|
| 618 |
+
if not discriminator_field_name:
|
| 619 |
+
return None
|
| 620 |
+
|
| 621 |
+
mapping: dict[str, type] = {}
|
| 622 |
+
discriminator_alias: str | None = None
|
| 623 |
+
|
| 624 |
+
for variant in get_args(union):
|
| 625 |
+
variant = strip_annotated_type(variant)
|
| 626 |
+
if is_basemodel_type(variant):
|
| 627 |
+
if PYDANTIC_V2:
|
| 628 |
+
field = _extract_field_schema_pv2(variant, discriminator_field_name)
|
| 629 |
+
if not field:
|
| 630 |
+
continue
|
| 631 |
+
|
| 632 |
+
# Note: if one variant defines an alias then they all should
|
| 633 |
+
discriminator_alias = field.get("serialization_alias")
|
| 634 |
+
|
| 635 |
+
field_schema = field["schema"]
|
| 636 |
+
|
| 637 |
+
if field_schema["type"] == "literal":
|
| 638 |
+
for entry in cast("LiteralSchema", field_schema)["expected"]:
|
| 639 |
+
if isinstance(entry, str):
|
| 640 |
+
mapping[entry] = variant
|
| 641 |
+
else:
|
| 642 |
+
field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
|
| 643 |
+
if not field_info:
|
| 644 |
+
continue
|
| 645 |
+
|
| 646 |
+
# Note: if one variant defines an alias then they all should
|
| 647 |
+
discriminator_alias = field_info.alias
|
| 648 |
+
|
| 649 |
+
if field_info.annotation and is_literal_type(field_info.annotation):
|
| 650 |
+
for entry in get_args(field_info.annotation):
|
| 651 |
+
if isinstance(entry, str):
|
| 652 |
+
mapping[entry] = variant
|
| 653 |
+
|
| 654 |
+
if not mapping:
|
| 655 |
+
return None
|
| 656 |
+
|
| 657 |
+
details = DiscriminatorDetails(
|
| 658 |
+
mapping=mapping,
|
| 659 |
+
discriminator_field=discriminator_field_name,
|
| 660 |
+
discriminator_alias=discriminator_alias,
|
| 661 |
+
)
|
| 662 |
+
cast(CachedDiscriminatorType, union).__discriminator__ = details
|
| 663 |
+
return details
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None:
|
| 667 |
+
schema = model.__pydantic_core_schema__
|
| 668 |
+
if schema["type"] != "model":
|
| 669 |
+
return None
|
| 670 |
+
|
| 671 |
+
fields_schema = schema["schema"]
|
| 672 |
+
if fields_schema["type"] != "model-fields":
|
| 673 |
+
return None
|
| 674 |
+
|
| 675 |
+
fields_schema = cast("ModelFieldsSchema", fields_schema)
|
| 676 |
+
|
| 677 |
+
field = fields_schema["fields"].get(field_name)
|
| 678 |
+
if not field:
|
| 679 |
+
return None
|
| 680 |
+
|
| 681 |
+
return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast]
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def validate_type(*, type_: type[_T], value: object) -> _T:
|
| 685 |
+
"""Strict validation that the given value matches the expected type"""
|
| 686 |
+
if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel):
|
| 687 |
+
return cast(_T, parse_obj(type_, value))
|
| 688 |
+
|
| 689 |
+
return cast(_T, _validate_non_model_type(type_=type_, value=value))
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
|
| 693 |
+
"""Add a pydantic config for the given type.
|
| 694 |
+
|
| 695 |
+
Note: this is a no-op on Pydantic v1.
|
| 696 |
+
"""
|
| 697 |
+
setattr(typ, "__pydantic_config__", config) # noqa: B010
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def add_request_id(obj: BaseModel, request_id: str | None) -> None:
|
| 701 |
+
obj._request_id = request_id
|
| 702 |
+
|
| 703 |
+
# in Pydantic v1, using setattr like we do above causes the attribute
|
| 704 |
+
# to be included when serializing the model which we don't want in this
|
| 705 |
+
# case so we need to explicitly exclude it
|
| 706 |
+
if not PYDANTIC_V2:
|
| 707 |
+
try:
|
| 708 |
+
exclude_fields = obj.__exclude_fields__ # type: ignore
|
| 709 |
+
except AttributeError:
|
| 710 |
+
cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"}
|
| 711 |
+
else:
|
| 712 |
+
cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"}
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
# our use of subclasssing here causes weirdness for type checkers,
|
| 716 |
+
# so we just pretend that we don't subclass
|
| 717 |
+
if TYPE_CHECKING:
|
| 718 |
+
GenericModel = BaseModel
|
| 719 |
+
else:
|
| 720 |
+
|
| 721 |
+
class GenericModel(BaseGenericModel, BaseModel):
|
| 722 |
+
pass
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
if PYDANTIC_V2:
|
| 726 |
+
from pydantic import TypeAdapter as _TypeAdapter
|
| 727 |
+
|
| 728 |
+
_CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
|
| 729 |
+
|
| 730 |
+
if TYPE_CHECKING:
|
| 731 |
+
from pydantic import TypeAdapter
|
| 732 |
+
else:
|
| 733 |
+
TypeAdapter = _CachedTypeAdapter
|
| 734 |
+
|
| 735 |
+
def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
|
| 736 |
+
return TypeAdapter(type_).validate_python(value)
|
| 737 |
+
|
| 738 |
+
elif not TYPE_CHECKING: # TODO: condition is weird
|
| 739 |
+
|
| 740 |
+
class RootModel(GenericModel, Generic[_T]):
|
| 741 |
+
"""Used as a placeholder to easily convert runtime types to a Pydantic format
|
| 742 |
+
to provide validation.
|
| 743 |
+
|
| 744 |
+
For example:
|
| 745 |
+
```py
|
| 746 |
+
validated = RootModel[int](__root__="5").__root__
|
| 747 |
+
# validated: 5
|
| 748 |
+
```
|
| 749 |
+
"""
|
| 750 |
+
|
| 751 |
+
__root__: _T
|
| 752 |
+
|
| 753 |
+
def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
|
| 754 |
+
model = _create_pydantic_model(type_).validate(value)
|
| 755 |
+
return cast(_T, model.__root__)
|
| 756 |
+
|
| 757 |
+
def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]:
|
| 758 |
+
return RootModel[type_] # type: ignore
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
class FinalRequestOptionsInput(TypedDict, total=False):
|
| 762 |
+
method: Required[str]
|
| 763 |
+
url: Required[str]
|
| 764 |
+
params: Query
|
| 765 |
+
headers: Headers
|
| 766 |
+
max_retries: int
|
| 767 |
+
timeout: float | Timeout | None
|
| 768 |
+
files: HttpxRequestFiles | None
|
| 769 |
+
idempotency_key: str
|
| 770 |
+
json_data: Body
|
| 771 |
+
extra_json: AnyMapping
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
@final
|
| 775 |
+
class FinalRequestOptions(pydantic.BaseModel):
|
| 776 |
+
method: str
|
| 777 |
+
url: str
|
| 778 |
+
params: Query = {}
|
| 779 |
+
headers: Union[Headers, NotGiven] = NotGiven()
|
| 780 |
+
max_retries: Union[int, NotGiven] = NotGiven()
|
| 781 |
+
timeout: Union[float, Timeout, None, NotGiven] = NotGiven()
|
| 782 |
+
files: Union[HttpxRequestFiles, None] = None
|
| 783 |
+
idempotency_key: Union[str, None] = None
|
| 784 |
+
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
|
| 785 |
+
|
| 786 |
+
# It should be noted that we cannot use `json` here as that would override
|
| 787 |
+
# a BaseModel method in an incompatible fashion.
|
| 788 |
+
json_data: Union[Body, None] = None
|
| 789 |
+
extra_json: Union[AnyMapping, None] = None
|
| 790 |
+
|
| 791 |
+
if PYDANTIC_V2:
|
| 792 |
+
model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True)
|
| 793 |
+
else:
|
| 794 |
+
|
| 795 |
+
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
|
| 796 |
+
arbitrary_types_allowed: bool = True
|
| 797 |
+
|
| 798 |
+
def get_max_retries(self, max_retries: int) -> int:
|
| 799 |
+
if isinstance(self.max_retries, NotGiven):
|
| 800 |
+
return max_retries
|
| 801 |
+
return self.max_retries
|
| 802 |
+
|
| 803 |
+
def _strip_raw_response_header(self) -> None:
|
| 804 |
+
if not is_given(self.headers):
|
| 805 |
+
return
|
| 806 |
+
|
| 807 |
+
if self.headers.get(RAW_RESPONSE_HEADER):
|
| 808 |
+
self.headers = {**self.headers}
|
| 809 |
+
self.headers.pop(RAW_RESPONSE_HEADER)
|
| 810 |
+
|
| 811 |
+
# override the `construct` method so that we can run custom transformations.
|
| 812 |
+
# this is necessary as we don't want to do any actual runtime type checking
|
| 813 |
+
# (which means we can't use validators) but we do want to ensure that `NotGiven`
|
| 814 |
+
# values are not present
|
| 815 |
+
#
|
| 816 |
+
# type ignore required because we're adding explicit types to `**values`
|
| 817 |
+
@classmethod
|
| 818 |
+
def construct( # type: ignore
|
| 819 |
+
cls,
|
| 820 |
+
_fields_set: set[str] | None = None,
|
| 821 |
+
**values: Unpack[FinalRequestOptionsInput],
|
| 822 |
+
) -> FinalRequestOptions:
|
| 823 |
+
kwargs: dict[str, Any] = {
|
| 824 |
+
# we unconditionally call `strip_not_given` on any value
|
| 825 |
+
# as it will just ignore any non-mapping types
|
| 826 |
+
key: strip_not_given(value)
|
| 827 |
+
for key, value in values.items()
|
| 828 |
+
}
|
| 829 |
+
if PYDANTIC_V2:
|
| 830 |
+
return super().model_construct(_fields_set, **kwargs)
|
| 831 |
+
return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated]
|
| 832 |
+
|
| 833 |
+
if not TYPE_CHECKING:
|
| 834 |
+
# type checkers incorrectly complain about this assignment
|
| 835 |
+
model_construct = construct
|
.venv/lib/python3.11/site-packages/openai/_module_client.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing_extensions import override
|
| 4 |
+
|
| 5 |
+
from . import resources, _load_client
|
| 6 |
+
from ._utils import LazyProxy
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ChatProxy(LazyProxy[resources.Chat]):
|
| 10 |
+
@override
|
| 11 |
+
def __load__(self) -> resources.Chat:
|
| 12 |
+
return _load_client().chat
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class BetaProxy(LazyProxy[resources.Beta]):
|
| 16 |
+
@override
|
| 17 |
+
def __load__(self) -> resources.Beta:
|
| 18 |
+
return _load_client().beta
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class FilesProxy(LazyProxy[resources.Files]):
|
| 22 |
+
@override
|
| 23 |
+
def __load__(self) -> resources.Files:
|
| 24 |
+
return _load_client().files
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class AudioProxy(LazyProxy[resources.Audio]):
|
| 28 |
+
@override
|
| 29 |
+
def __load__(self) -> resources.Audio:
|
| 30 |
+
return _load_client().audio
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class ImagesProxy(LazyProxy[resources.Images]):
|
| 34 |
+
@override
|
| 35 |
+
def __load__(self) -> resources.Images:
|
| 36 |
+
return _load_client().images
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ModelsProxy(LazyProxy[resources.Models]):
|
| 40 |
+
@override
|
| 41 |
+
def __load__(self) -> resources.Models:
|
| 42 |
+
return _load_client().models
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class BatchesProxy(LazyProxy[resources.Batches]):
|
| 46 |
+
@override
|
| 47 |
+
def __load__(self) -> resources.Batches:
|
| 48 |
+
return _load_client().batches
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class EmbeddingsProxy(LazyProxy[resources.Embeddings]):
|
| 52 |
+
@override
|
| 53 |
+
def __load__(self) -> resources.Embeddings:
|
| 54 |
+
return _load_client().embeddings
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class CompletionsProxy(LazyProxy[resources.Completions]):
|
| 58 |
+
@override
|
| 59 |
+
def __load__(self) -> resources.Completions:
|
| 60 |
+
return _load_client().completions
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class ModerationsProxy(LazyProxy[resources.Moderations]):
|
| 64 |
+
@override
|
| 65 |
+
def __load__(self) -> resources.Moderations:
|
| 66 |
+
return _load_client().moderations
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class FineTuningProxy(LazyProxy[resources.FineTuning]):
|
| 70 |
+
@override
|
| 71 |
+
def __load__(self) -> resources.FineTuning:
|
| 72 |
+
return _load_client().fine_tuning
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
chat: resources.Chat = ChatProxy().__as_proxied__()
|
| 76 |
+
beta: resources.Beta = BetaProxy().__as_proxied__()
|
| 77 |
+
files: resources.Files = FilesProxy().__as_proxied__()
|
| 78 |
+
audio: resources.Audio = AudioProxy().__as_proxied__()
|
| 79 |
+
images: resources.Images = ImagesProxy().__as_proxied__()
|
| 80 |
+
models: resources.Models = ModelsProxy().__as_proxied__()
|
| 81 |
+
batches: resources.Batches = BatchesProxy().__as_proxied__()
|
| 82 |
+
embeddings: resources.Embeddings = EmbeddingsProxy().__as_proxied__()
|
| 83 |
+
completions: resources.Completions = CompletionsProxy().__as_proxied__()
|
| 84 |
+
moderations: resources.Moderations = ModerationsProxy().__as_proxied__()
|
| 85 |
+
fine_tuning: resources.FineTuning = FineTuningProxy().__as_proxied__()
|
.venv/lib/python3.11/site-packages/openai/_qs.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Any, List, Tuple, Union, Mapping, TypeVar
|
| 4 |
+
from urllib.parse import parse_qs, urlencode
|
| 5 |
+
from typing_extensions import Literal, get_args
|
| 6 |
+
|
| 7 |
+
from ._types import NOT_GIVEN, NotGiven, NotGivenOr
|
| 8 |
+
from ._utils import flatten
|
| 9 |
+
|
| 10 |
+
_T = TypeVar("_T")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
|
| 14 |
+
NestedFormat = Literal["dots", "brackets"]
|
| 15 |
+
|
| 16 |
+
PrimitiveData = Union[str, int, float, bool, None]
|
| 17 |
+
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
|
| 18 |
+
# https://github.com/microsoft/pyright/issues/3555
|
| 19 |
+
Data = Union[PrimitiveData, List[Any], Tuple[Any], "Mapping[str, Any]"]
|
| 20 |
+
Params = Mapping[str, Data]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class Querystring:
|
| 24 |
+
array_format: ArrayFormat
|
| 25 |
+
nested_format: NestedFormat
|
| 26 |
+
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
*,
|
| 30 |
+
array_format: ArrayFormat = "repeat",
|
| 31 |
+
nested_format: NestedFormat = "brackets",
|
| 32 |
+
) -> None:
|
| 33 |
+
self.array_format = array_format
|
| 34 |
+
self.nested_format = nested_format
|
| 35 |
+
|
| 36 |
+
def parse(self, query: str) -> Mapping[str, object]:
|
| 37 |
+
# Note: custom format syntax is not supported yet
|
| 38 |
+
return parse_qs(query)
|
| 39 |
+
|
| 40 |
+
def stringify(
|
| 41 |
+
self,
|
| 42 |
+
params: Params,
|
| 43 |
+
*,
|
| 44 |
+
array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
|
| 45 |
+
nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
|
| 46 |
+
) -> str:
|
| 47 |
+
return urlencode(
|
| 48 |
+
self.stringify_items(
|
| 49 |
+
params,
|
| 50 |
+
array_format=array_format,
|
| 51 |
+
nested_format=nested_format,
|
| 52 |
+
)
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def stringify_items(
|
| 56 |
+
self,
|
| 57 |
+
params: Params,
|
| 58 |
+
*,
|
| 59 |
+
array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
|
| 60 |
+
nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
|
| 61 |
+
) -> list[tuple[str, str]]:
|
| 62 |
+
opts = Options(
|
| 63 |
+
qs=self,
|
| 64 |
+
array_format=array_format,
|
| 65 |
+
nested_format=nested_format,
|
| 66 |
+
)
|
| 67 |
+
return flatten([self._stringify_item(key, value, opts) for key, value in params.items()])
|
| 68 |
+
|
| 69 |
+
def _stringify_item(
|
| 70 |
+
self,
|
| 71 |
+
key: str,
|
| 72 |
+
value: Data,
|
| 73 |
+
opts: Options,
|
| 74 |
+
) -> list[tuple[str, str]]:
|
| 75 |
+
if isinstance(value, Mapping):
|
| 76 |
+
items: list[tuple[str, str]] = []
|
| 77 |
+
nested_format = opts.nested_format
|
| 78 |
+
for subkey, subvalue in value.items():
|
| 79 |
+
items.extend(
|
| 80 |
+
self._stringify_item(
|
| 81 |
+
# TODO: error if unknown format
|
| 82 |
+
f"{key}.{subkey}" if nested_format == "dots" else f"{key}[{subkey}]",
|
| 83 |
+
subvalue,
|
| 84 |
+
opts,
|
| 85 |
+
)
|
| 86 |
+
)
|
| 87 |
+
return items
|
| 88 |
+
|
| 89 |
+
if isinstance(value, (list, tuple)):
|
| 90 |
+
array_format = opts.array_format
|
| 91 |
+
if array_format == "comma":
|
| 92 |
+
return [
|
| 93 |
+
(
|
| 94 |
+
key,
|
| 95 |
+
",".join(self._primitive_value_to_str(item) for item in value if item is not None),
|
| 96 |
+
),
|
| 97 |
+
]
|
| 98 |
+
elif array_format == "repeat":
|
| 99 |
+
items = []
|
| 100 |
+
for item in value:
|
| 101 |
+
items.extend(self._stringify_item(key, item, opts))
|
| 102 |
+
return items
|
| 103 |
+
elif array_format == "indices":
|
| 104 |
+
raise NotImplementedError("The array indices format is not supported yet")
|
| 105 |
+
elif array_format == "brackets":
|
| 106 |
+
items = []
|
| 107 |
+
key = key + "[]"
|
| 108 |
+
for item in value:
|
| 109 |
+
items.extend(self._stringify_item(key, item, opts))
|
| 110 |
+
return items
|
| 111 |
+
else:
|
| 112 |
+
raise NotImplementedError(
|
| 113 |
+
f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
serialised = self._primitive_value_to_str(value)
|
| 117 |
+
if not serialised:
|
| 118 |
+
return []
|
| 119 |
+
return [(key, serialised)]
|
| 120 |
+
|
| 121 |
+
def _primitive_value_to_str(self, value: PrimitiveData) -> str:
|
| 122 |
+
# copied from httpx
|
| 123 |
+
if value is True:
|
| 124 |
+
return "true"
|
| 125 |
+
elif value is False:
|
| 126 |
+
return "false"
|
| 127 |
+
elif value is None:
|
| 128 |
+
return ""
|
| 129 |
+
return str(value)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
_qs = Querystring()
|
| 133 |
+
parse = _qs.parse
|
| 134 |
+
stringify = _qs.stringify
|
| 135 |
+
stringify_items = _qs.stringify_items
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class Options:
|
| 139 |
+
array_format: ArrayFormat
|
| 140 |
+
nested_format: NestedFormat
|
| 141 |
+
|
| 142 |
+
def __init__(
|
| 143 |
+
self,
|
| 144 |
+
qs: Querystring = _qs,
|
| 145 |
+
*,
|
| 146 |
+
array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
|
| 147 |
+
nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
|
| 148 |
+
) -> None:
|
| 149 |
+
self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format
|
| 150 |
+
self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format
|
.venv/lib/python3.11/site-packages/openai/_resource.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import time
|
| 6 |
+
from typing import TYPE_CHECKING
|
| 7 |
+
|
| 8 |
+
import anyio
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from ._client import OpenAI, AsyncOpenAI
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SyncAPIResource:
|
| 15 |
+
_client: OpenAI
|
| 16 |
+
|
| 17 |
+
def __init__(self, client: OpenAI) -> None:
|
| 18 |
+
self._client = client
|
| 19 |
+
self._get = client.get
|
| 20 |
+
self._post = client.post
|
| 21 |
+
self._patch = client.patch
|
| 22 |
+
self._put = client.put
|
| 23 |
+
self._delete = client.delete
|
| 24 |
+
self._get_api_list = client.get_api_list
|
| 25 |
+
|
| 26 |
+
def _sleep(self, seconds: float) -> None:
|
| 27 |
+
time.sleep(seconds)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class AsyncAPIResource:
|
| 31 |
+
_client: AsyncOpenAI
|
| 32 |
+
|
| 33 |
+
def __init__(self, client: AsyncOpenAI) -> None:
|
| 34 |
+
self._client = client
|
| 35 |
+
self._get = client.get
|
| 36 |
+
self._post = client.post
|
| 37 |
+
self._patch = client.patch
|
| 38 |
+
self._put = client.put
|
| 39 |
+
self._delete = client.delete
|
| 40 |
+
self._get_api_list = client.get_api_list
|
| 41 |
+
|
| 42 |
+
async def _sleep(self, seconds: float) -> None:
|
| 43 |
+
await anyio.sleep(seconds)
|
.venv/lib/python3.11/site-packages/openai/_response.py
ADDED
|
@@ -0,0 +1,848 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import inspect
|
| 5 |
+
import logging
|
| 6 |
+
import datetime
|
| 7 |
+
import functools
|
| 8 |
+
from types import TracebackType
|
| 9 |
+
from typing import (
|
| 10 |
+
TYPE_CHECKING,
|
| 11 |
+
Any,
|
| 12 |
+
Union,
|
| 13 |
+
Generic,
|
| 14 |
+
TypeVar,
|
| 15 |
+
Callable,
|
| 16 |
+
Iterator,
|
| 17 |
+
AsyncIterator,
|
| 18 |
+
cast,
|
| 19 |
+
overload,
|
| 20 |
+
)
|
| 21 |
+
from typing_extensions import Awaitable, ParamSpec, override, get_origin
|
| 22 |
+
|
| 23 |
+
import anyio
|
| 24 |
+
import httpx
|
| 25 |
+
import pydantic
|
| 26 |
+
|
| 27 |
+
from ._types import NoneType
|
| 28 |
+
from ._utils import is_given, extract_type_arg, is_annotated_type, is_type_alias_type, extract_type_var_from_base
|
| 29 |
+
from ._models import BaseModel, is_basemodel, add_request_id
|
| 30 |
+
from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
|
| 31 |
+
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
|
| 32 |
+
from ._exceptions import OpenAIError, APIResponseValidationError
|
| 33 |
+
|
| 34 |
+
if TYPE_CHECKING:
|
| 35 |
+
from ._models import FinalRequestOptions
|
| 36 |
+
from ._base_client import BaseClient
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
P = ParamSpec("P")
|
| 40 |
+
R = TypeVar("R")
|
| 41 |
+
_T = TypeVar("_T")
|
| 42 |
+
_APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]")
|
| 43 |
+
_AsyncAPIResponseT = TypeVar("_AsyncAPIResponseT", bound="AsyncAPIResponse[Any]")
|
| 44 |
+
|
| 45 |
+
log: logging.Logger = logging.getLogger(__name__)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class BaseAPIResponse(Generic[R]):
|
| 49 |
+
_cast_to: type[R]
|
| 50 |
+
_client: BaseClient[Any, Any]
|
| 51 |
+
_parsed_by_type: dict[type[Any], Any]
|
| 52 |
+
_is_sse_stream: bool
|
| 53 |
+
_stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None
|
| 54 |
+
_options: FinalRequestOptions
|
| 55 |
+
|
| 56 |
+
http_response: httpx.Response
|
| 57 |
+
|
| 58 |
+
retries_taken: int
|
| 59 |
+
"""The number of retries made. If no retries happened this will be `0`"""
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
*,
|
| 64 |
+
raw: httpx.Response,
|
| 65 |
+
cast_to: type[R],
|
| 66 |
+
client: BaseClient[Any, Any],
|
| 67 |
+
stream: bool,
|
| 68 |
+
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
|
| 69 |
+
options: FinalRequestOptions,
|
| 70 |
+
retries_taken: int = 0,
|
| 71 |
+
) -> None:
|
| 72 |
+
self._cast_to = cast_to
|
| 73 |
+
self._client = client
|
| 74 |
+
self._parsed_by_type = {}
|
| 75 |
+
self._is_sse_stream = stream
|
| 76 |
+
self._stream_cls = stream_cls
|
| 77 |
+
self._options = options
|
| 78 |
+
self.http_response = raw
|
| 79 |
+
self.retries_taken = retries_taken
|
| 80 |
+
|
| 81 |
+
@property
|
| 82 |
+
def headers(self) -> httpx.Headers:
|
| 83 |
+
return self.http_response.headers
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
def http_request(self) -> httpx.Request:
|
| 87 |
+
"""Returns the httpx Request instance associated with the current response."""
|
| 88 |
+
return self.http_response.request
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def status_code(self) -> int:
|
| 92 |
+
return self.http_response.status_code
|
| 93 |
+
|
| 94 |
+
@property
|
| 95 |
+
def url(self) -> httpx.URL:
|
| 96 |
+
"""Returns the URL for which the request was made."""
|
| 97 |
+
return self.http_response.url
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def method(self) -> str:
|
| 101 |
+
return self.http_request.method
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def http_version(self) -> str:
|
| 105 |
+
return self.http_response.http_version
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def elapsed(self) -> datetime.timedelta:
|
| 109 |
+
"""The time taken for the complete request/response cycle to complete."""
|
| 110 |
+
return self.http_response.elapsed
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def is_closed(self) -> bool:
|
| 114 |
+
"""Whether or not the response body has been closed.
|
| 115 |
+
|
| 116 |
+
If this is False then there is response data that has not been read yet.
|
| 117 |
+
You must either fully consume the response body or call `.close()`
|
| 118 |
+
before discarding the response to prevent resource leaks.
|
| 119 |
+
"""
|
| 120 |
+
return self.http_response.is_closed
|
| 121 |
+
|
| 122 |
+
@override
|
| 123 |
+
def __repr__(self) -> str:
|
| 124 |
+
return (
|
| 125 |
+
f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def _parse(self, *, to: type[_T] | None = None) -> R | _T:
|
| 129 |
+
cast_to = to if to is not None else self._cast_to
|
| 130 |
+
|
| 131 |
+
# unwrap `TypeAlias('Name', T)` -> `T`
|
| 132 |
+
if is_type_alias_type(cast_to):
|
| 133 |
+
cast_to = cast_to.__value__ # type: ignore[unreachable]
|
| 134 |
+
|
| 135 |
+
# unwrap `Annotated[T, ...]` -> `T`
|
| 136 |
+
if cast_to and is_annotated_type(cast_to):
|
| 137 |
+
cast_to = extract_type_arg(cast_to, 0)
|
| 138 |
+
|
| 139 |
+
origin = get_origin(cast_to) or cast_to
|
| 140 |
+
|
| 141 |
+
if self._is_sse_stream:
|
| 142 |
+
if to:
|
| 143 |
+
if not is_stream_class_type(to):
|
| 144 |
+
raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}")
|
| 145 |
+
|
| 146 |
+
return cast(
|
| 147 |
+
_T,
|
| 148 |
+
to(
|
| 149 |
+
cast_to=extract_stream_chunk_type(
|
| 150 |
+
to,
|
| 151 |
+
failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]",
|
| 152 |
+
),
|
| 153 |
+
response=self.http_response,
|
| 154 |
+
client=cast(Any, self._client),
|
| 155 |
+
),
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
if self._stream_cls:
|
| 159 |
+
return cast(
|
| 160 |
+
R,
|
| 161 |
+
self._stream_cls(
|
| 162 |
+
cast_to=extract_stream_chunk_type(self._stream_cls),
|
| 163 |
+
response=self.http_response,
|
| 164 |
+
client=cast(Any, self._client),
|
| 165 |
+
),
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls)
|
| 169 |
+
if stream_cls is None:
|
| 170 |
+
raise MissingStreamClassError()
|
| 171 |
+
|
| 172 |
+
return cast(
|
| 173 |
+
R,
|
| 174 |
+
stream_cls(
|
| 175 |
+
cast_to=cast_to,
|
| 176 |
+
response=self.http_response,
|
| 177 |
+
client=cast(Any, self._client),
|
| 178 |
+
),
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
if cast_to is NoneType:
|
| 182 |
+
return cast(R, None)
|
| 183 |
+
|
| 184 |
+
response = self.http_response
|
| 185 |
+
if cast_to == str:
|
| 186 |
+
return cast(R, response.text)
|
| 187 |
+
|
| 188 |
+
if cast_to == bytes:
|
| 189 |
+
return cast(R, response.content)
|
| 190 |
+
|
| 191 |
+
if cast_to == int:
|
| 192 |
+
return cast(R, int(response.text))
|
| 193 |
+
|
| 194 |
+
if cast_to == float:
|
| 195 |
+
return cast(R, float(response.text))
|
| 196 |
+
|
| 197 |
+
if cast_to == bool:
|
| 198 |
+
return cast(R, response.text.lower() == "true")
|
| 199 |
+
|
| 200 |
+
# handle the legacy binary response case
|
| 201 |
+
if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent":
|
| 202 |
+
return cast(R, cast_to(response)) # type: ignore
|
| 203 |
+
|
| 204 |
+
if origin == APIResponse:
|
| 205 |
+
raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
|
| 206 |
+
|
| 207 |
+
if inspect.isclass(origin) and issubclass(origin, httpx.Response):
|
| 208 |
+
# Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
|
| 209 |
+
# and pass that class to our request functions. We cannot change the variance to be either
|
| 210 |
+
# covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
|
| 211 |
+
# the response class ourselves but that is something that should be supported directly in httpx
|
| 212 |
+
# as it would be easy to incorrectly construct the Response object due to the multitude of arguments.
|
| 213 |
+
if cast_to != httpx.Response:
|
| 214 |
+
raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
|
| 215 |
+
return cast(R, response)
|
| 216 |
+
|
| 217 |
+
if (
|
| 218 |
+
inspect.isclass(
|
| 219 |
+
origin # pyright: ignore[reportUnknownArgumentType]
|
| 220 |
+
)
|
| 221 |
+
and not issubclass(origin, BaseModel)
|
| 222 |
+
and issubclass(origin, pydantic.BaseModel)
|
| 223 |
+
):
|
| 224 |
+
raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
|
| 225 |
+
|
| 226 |
+
if (
|
| 227 |
+
cast_to is not object
|
| 228 |
+
and not origin is list
|
| 229 |
+
and not origin is dict
|
| 230 |
+
and not origin is Union
|
| 231 |
+
and not issubclass(origin, BaseModel)
|
| 232 |
+
):
|
| 233 |
+
raise RuntimeError(
|
| 234 |
+
f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}."
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# split is required to handle cases where additional information is included
|
| 238 |
+
# in the response, e.g. application/json; charset=utf-8
|
| 239 |
+
content_type, *_ = response.headers.get("content-type", "*").split(";")
|
| 240 |
+
if content_type != "application/json":
|
| 241 |
+
if is_basemodel(cast_to):
|
| 242 |
+
try:
|
| 243 |
+
data = response.json()
|
| 244 |
+
except Exception as exc:
|
| 245 |
+
log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc)
|
| 246 |
+
else:
|
| 247 |
+
return self._client._process_response_data(
|
| 248 |
+
data=data,
|
| 249 |
+
cast_to=cast_to, # type: ignore
|
| 250 |
+
response=response,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
if self._client._strict_response_validation:
|
| 254 |
+
raise APIResponseValidationError(
|
| 255 |
+
response=response,
|
| 256 |
+
message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.",
|
| 257 |
+
body=response.text,
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
# If the API responds with content that isn't JSON then we just return
|
| 261 |
+
# the (decoded) text without performing any parsing so that you can still
|
| 262 |
+
# handle the response however you need to.
|
| 263 |
+
return response.text # type: ignore
|
| 264 |
+
|
| 265 |
+
data = response.json()
|
| 266 |
+
|
| 267 |
+
return self._client._process_response_data(
|
| 268 |
+
data=data,
|
| 269 |
+
cast_to=cast_to, # type: ignore
|
| 270 |
+
response=response,
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
class APIResponse(BaseAPIResponse[R]):
|
| 275 |
+
@property
|
| 276 |
+
def request_id(self) -> str | None:
|
| 277 |
+
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
|
| 278 |
+
|
| 279 |
+
@overload
|
| 280 |
+
def parse(self, *, to: type[_T]) -> _T: ...
|
| 281 |
+
|
| 282 |
+
@overload
|
| 283 |
+
def parse(self) -> R: ...
|
| 284 |
+
|
| 285 |
+
def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
| 286 |
+
"""Returns the rich python representation of this response's data.
|
| 287 |
+
|
| 288 |
+
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
| 289 |
+
|
| 290 |
+
You can customise the type that the response is parsed into through
|
| 291 |
+
the `to` argument, e.g.
|
| 292 |
+
|
| 293 |
+
```py
|
| 294 |
+
from openai import BaseModel
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class MyModel(BaseModel):
|
| 298 |
+
foo: str
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
obj = response.parse(to=MyModel)
|
| 302 |
+
print(obj.foo)
|
| 303 |
+
```
|
| 304 |
+
|
| 305 |
+
We support parsing:
|
| 306 |
+
- `BaseModel`
|
| 307 |
+
- `dict`
|
| 308 |
+
- `list`
|
| 309 |
+
- `Union`
|
| 310 |
+
- `str`
|
| 311 |
+
- `int`
|
| 312 |
+
- `float`
|
| 313 |
+
- `httpx.Response`
|
| 314 |
+
"""
|
| 315 |
+
cache_key = to if to is not None else self._cast_to
|
| 316 |
+
cached = self._parsed_by_type.get(cache_key)
|
| 317 |
+
if cached is not None:
|
| 318 |
+
return cached # type: ignore[no-any-return]
|
| 319 |
+
|
| 320 |
+
if not self._is_sse_stream:
|
| 321 |
+
self.read()
|
| 322 |
+
|
| 323 |
+
parsed = self._parse(to=to)
|
| 324 |
+
if is_given(self._options.post_parser):
|
| 325 |
+
parsed = self._options.post_parser(parsed)
|
| 326 |
+
|
| 327 |
+
if isinstance(parsed, BaseModel):
|
| 328 |
+
add_request_id(parsed, self.request_id)
|
| 329 |
+
|
| 330 |
+
self._parsed_by_type[cache_key] = parsed
|
| 331 |
+
return cast(R, parsed)
|
| 332 |
+
|
| 333 |
+
def read(self) -> bytes:
|
| 334 |
+
"""Read and return the binary response content."""
|
| 335 |
+
try:
|
| 336 |
+
return self.http_response.read()
|
| 337 |
+
except httpx.StreamConsumed as exc:
|
| 338 |
+
# The default error raised by httpx isn't very
|
| 339 |
+
# helpful in our case so we re-raise it with
|
| 340 |
+
# a different error message.
|
| 341 |
+
raise StreamAlreadyConsumed() from exc
|
| 342 |
+
|
| 343 |
+
def text(self) -> str:
|
| 344 |
+
"""Read and decode the response content into a string."""
|
| 345 |
+
self.read()
|
| 346 |
+
return self.http_response.text
|
| 347 |
+
|
| 348 |
+
def json(self) -> object:
|
| 349 |
+
"""Read and decode the JSON response content."""
|
| 350 |
+
self.read()
|
| 351 |
+
return self.http_response.json()
|
| 352 |
+
|
| 353 |
+
def close(self) -> None:
|
| 354 |
+
"""Close the response and release the connection.
|
| 355 |
+
|
| 356 |
+
Automatically called if the response body is read to completion.
|
| 357 |
+
"""
|
| 358 |
+
self.http_response.close()
|
| 359 |
+
|
| 360 |
+
def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]:
|
| 361 |
+
"""
|
| 362 |
+
A byte-iterator over the decoded response content.
|
| 363 |
+
|
| 364 |
+
This automatically handles gzip, deflate and brotli encoded responses.
|
| 365 |
+
"""
|
| 366 |
+
for chunk in self.http_response.iter_bytes(chunk_size):
|
| 367 |
+
yield chunk
|
| 368 |
+
|
| 369 |
+
def iter_text(self, chunk_size: int | None = None) -> Iterator[str]:
|
| 370 |
+
"""A str-iterator over the decoded response content
|
| 371 |
+
that handles both gzip, deflate, etc but also detects the content's
|
| 372 |
+
string encoding.
|
| 373 |
+
"""
|
| 374 |
+
for chunk in self.http_response.iter_text(chunk_size):
|
| 375 |
+
yield chunk
|
| 376 |
+
|
| 377 |
+
def iter_lines(self) -> Iterator[str]:
|
| 378 |
+
"""Like `iter_text()` but will only yield chunks for each line"""
|
| 379 |
+
for chunk in self.http_response.iter_lines():
|
| 380 |
+
yield chunk
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class AsyncAPIResponse(BaseAPIResponse[R]):
|
| 384 |
+
@property
|
| 385 |
+
def request_id(self) -> str | None:
|
| 386 |
+
return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return]
|
| 387 |
+
|
| 388 |
+
@overload
|
| 389 |
+
async def parse(self, *, to: type[_T]) -> _T: ...
|
| 390 |
+
|
| 391 |
+
@overload
|
| 392 |
+
async def parse(self) -> R: ...
|
| 393 |
+
|
| 394 |
+
async def parse(self, *, to: type[_T] | None = None) -> R | _T:
|
| 395 |
+
"""Returns the rich python representation of this response's data.
|
| 396 |
+
|
| 397 |
+
For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`.
|
| 398 |
+
|
| 399 |
+
You can customise the type that the response is parsed into through
|
| 400 |
+
the `to` argument, e.g.
|
| 401 |
+
|
| 402 |
+
```py
|
| 403 |
+
from openai import BaseModel
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
class MyModel(BaseModel):
|
| 407 |
+
foo: str
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
obj = response.parse(to=MyModel)
|
| 411 |
+
print(obj.foo)
|
| 412 |
+
```
|
| 413 |
+
|
| 414 |
+
We support parsing:
|
| 415 |
+
- `BaseModel`
|
| 416 |
+
- `dict`
|
| 417 |
+
- `list`
|
| 418 |
+
- `Union`
|
| 419 |
+
- `str`
|
| 420 |
+
- `httpx.Response`
|
| 421 |
+
"""
|
| 422 |
+
cache_key = to if to is not None else self._cast_to
|
| 423 |
+
cached = self._parsed_by_type.get(cache_key)
|
| 424 |
+
if cached is not None:
|
| 425 |
+
return cached # type: ignore[no-any-return]
|
| 426 |
+
|
| 427 |
+
if not self._is_sse_stream:
|
| 428 |
+
await self.read()
|
| 429 |
+
|
| 430 |
+
parsed = self._parse(to=to)
|
| 431 |
+
if is_given(self._options.post_parser):
|
| 432 |
+
parsed = self._options.post_parser(parsed)
|
| 433 |
+
|
| 434 |
+
if isinstance(parsed, BaseModel):
|
| 435 |
+
add_request_id(parsed, self.request_id)
|
| 436 |
+
|
| 437 |
+
self._parsed_by_type[cache_key] = parsed
|
| 438 |
+
return cast(R, parsed)
|
| 439 |
+
|
| 440 |
+
async def read(self) -> bytes:
|
| 441 |
+
"""Read and return the binary response content."""
|
| 442 |
+
try:
|
| 443 |
+
return await self.http_response.aread()
|
| 444 |
+
except httpx.StreamConsumed as exc:
|
| 445 |
+
# the default error raised by httpx isn't very
|
| 446 |
+
# helpful in our case so we re-raise it with
|
| 447 |
+
# a different error message
|
| 448 |
+
raise StreamAlreadyConsumed() from exc
|
| 449 |
+
|
| 450 |
+
async def text(self) -> str:
|
| 451 |
+
"""Read and decode the response content into a string."""
|
| 452 |
+
await self.read()
|
| 453 |
+
return self.http_response.text
|
| 454 |
+
|
| 455 |
+
async def json(self) -> object:
|
| 456 |
+
"""Read and decode the JSON response content."""
|
| 457 |
+
await self.read()
|
| 458 |
+
return self.http_response.json()
|
| 459 |
+
|
| 460 |
+
async def close(self) -> None:
|
| 461 |
+
"""Close the response and release the connection.
|
| 462 |
+
|
| 463 |
+
Automatically called if the response body is read to completion.
|
| 464 |
+
"""
|
| 465 |
+
await self.http_response.aclose()
|
| 466 |
+
|
| 467 |
+
async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]:
|
| 468 |
+
"""
|
| 469 |
+
A byte-iterator over the decoded response content.
|
| 470 |
+
|
| 471 |
+
This automatically handles gzip, deflate and brotli encoded responses.
|
| 472 |
+
"""
|
| 473 |
+
async for chunk in self.http_response.aiter_bytes(chunk_size):
|
| 474 |
+
yield chunk
|
| 475 |
+
|
| 476 |
+
async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]:
|
| 477 |
+
"""A str-iterator over the decoded response content
|
| 478 |
+
that handles both gzip, deflate, etc but also detects the content's
|
| 479 |
+
string encoding.
|
| 480 |
+
"""
|
| 481 |
+
async for chunk in self.http_response.aiter_text(chunk_size):
|
| 482 |
+
yield chunk
|
| 483 |
+
|
| 484 |
+
async def iter_lines(self) -> AsyncIterator[str]:
|
| 485 |
+
"""Like `iter_text()` but will only yield chunks for each line"""
|
| 486 |
+
async for chunk in self.http_response.aiter_lines():
|
| 487 |
+
yield chunk
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
class BinaryAPIResponse(APIResponse[bytes]):
|
| 491 |
+
"""Subclass of APIResponse providing helpers for dealing with binary data.
|
| 492 |
+
|
| 493 |
+
Note: If you want to stream the response data instead of eagerly reading it
|
| 494 |
+
all at once then you should use `.with_streaming_response` when making
|
| 495 |
+
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
| 496 |
+
"""
|
| 497 |
+
|
| 498 |
+
def write_to_file(
|
| 499 |
+
self,
|
| 500 |
+
file: str | os.PathLike[str],
|
| 501 |
+
) -> None:
|
| 502 |
+
"""Write the output to the given file.
|
| 503 |
+
|
| 504 |
+
Accepts a filename or any path-like object, e.g. pathlib.Path
|
| 505 |
+
|
| 506 |
+
Note: if you want to stream the data to the file instead of writing
|
| 507 |
+
all at once then you should use `.with_streaming_response` when making
|
| 508 |
+
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
| 509 |
+
"""
|
| 510 |
+
with open(file, mode="wb") as f:
|
| 511 |
+
for data in self.iter_bytes():
|
| 512 |
+
f.write(data)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
class AsyncBinaryAPIResponse(AsyncAPIResponse[bytes]):
|
| 516 |
+
"""Subclass of APIResponse providing helpers for dealing with binary data.
|
| 517 |
+
|
| 518 |
+
Note: If you want to stream the response data instead of eagerly reading it
|
| 519 |
+
all at once then you should use `.with_streaming_response` when making
|
| 520 |
+
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
| 521 |
+
"""
|
| 522 |
+
|
| 523 |
+
async def write_to_file(
|
| 524 |
+
self,
|
| 525 |
+
file: str | os.PathLike[str],
|
| 526 |
+
) -> None:
|
| 527 |
+
"""Write the output to the given file.
|
| 528 |
+
|
| 529 |
+
Accepts a filename or any path-like object, e.g. pathlib.Path
|
| 530 |
+
|
| 531 |
+
Note: if you want to stream the data to the file instead of writing
|
| 532 |
+
all at once then you should use `.with_streaming_response` when making
|
| 533 |
+
the API request, e.g. `.with_streaming_response.get_binary_response()`
|
| 534 |
+
"""
|
| 535 |
+
path = anyio.Path(file)
|
| 536 |
+
async with await path.open(mode="wb") as f:
|
| 537 |
+
async for data in self.iter_bytes():
|
| 538 |
+
await f.write(data)
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
class StreamedBinaryAPIResponse(APIResponse[bytes]):
|
| 542 |
+
def stream_to_file(
|
| 543 |
+
self,
|
| 544 |
+
file: str | os.PathLike[str],
|
| 545 |
+
*,
|
| 546 |
+
chunk_size: int | None = None,
|
| 547 |
+
) -> None:
|
| 548 |
+
"""Streams the output to the given file.
|
| 549 |
+
|
| 550 |
+
Accepts a filename or any path-like object, e.g. pathlib.Path
|
| 551 |
+
"""
|
| 552 |
+
with open(file, mode="wb") as f:
|
| 553 |
+
for data in self.iter_bytes(chunk_size):
|
| 554 |
+
f.write(data)
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
class AsyncStreamedBinaryAPIResponse(AsyncAPIResponse[bytes]):
|
| 558 |
+
async def stream_to_file(
|
| 559 |
+
self,
|
| 560 |
+
file: str | os.PathLike[str],
|
| 561 |
+
*,
|
| 562 |
+
chunk_size: int | None = None,
|
| 563 |
+
) -> None:
|
| 564 |
+
"""Streams the output to the given file.
|
| 565 |
+
|
| 566 |
+
Accepts a filename or any path-like object, e.g. pathlib.Path
|
| 567 |
+
"""
|
| 568 |
+
path = anyio.Path(file)
|
| 569 |
+
async with await path.open(mode="wb") as f:
|
| 570 |
+
async for data in self.iter_bytes(chunk_size):
|
| 571 |
+
await f.write(data)
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
class MissingStreamClassError(TypeError):
|
| 575 |
+
def __init__(self) -> None:
|
| 576 |
+
super().__init__(
|
| 577 |
+
"The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference",
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
class StreamAlreadyConsumed(OpenAIError):
|
| 582 |
+
"""
|
| 583 |
+
Attempted to read or stream content, but the content has already
|
| 584 |
+
been streamed.
|
| 585 |
+
|
| 586 |
+
This can happen if you use a method like `.iter_lines()` and then attempt
|
| 587 |
+
to read th entire response body afterwards, e.g.
|
| 588 |
+
|
| 589 |
+
```py
|
| 590 |
+
response = await client.post(...)
|
| 591 |
+
async for line in response.iter_lines():
|
| 592 |
+
... # do something with `line`
|
| 593 |
+
|
| 594 |
+
content = await response.read()
|
| 595 |
+
# ^ error
|
| 596 |
+
```
|
| 597 |
+
|
| 598 |
+
If you want this behaviour you'll need to either manually accumulate the response
|
| 599 |
+
content or call `await response.read()` before iterating over the stream.
|
| 600 |
+
"""
|
| 601 |
+
|
| 602 |
+
def __init__(self) -> None:
|
| 603 |
+
message = (
|
| 604 |
+
"Attempted to read or stream some content, but the content has "
|
| 605 |
+
"already been streamed. "
|
| 606 |
+
"This could be due to attempting to stream the response "
|
| 607 |
+
"content more than once."
|
| 608 |
+
"\n\n"
|
| 609 |
+
"You can fix this by manually accumulating the response content while streaming "
|
| 610 |
+
"or by calling `.read()` before starting to stream."
|
| 611 |
+
)
|
| 612 |
+
super().__init__(message)
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
class ResponseContextManager(Generic[_APIResponseT]):
|
| 616 |
+
"""Context manager for ensuring that a request is not made
|
| 617 |
+
until it is entered and that the response will always be closed
|
| 618 |
+
when the context manager exits
|
| 619 |
+
"""
|
| 620 |
+
|
| 621 |
+
def __init__(self, request_func: Callable[[], _APIResponseT]) -> None:
|
| 622 |
+
self._request_func = request_func
|
| 623 |
+
self.__response: _APIResponseT | None = None
|
| 624 |
+
|
| 625 |
+
def __enter__(self) -> _APIResponseT:
|
| 626 |
+
self.__response = self._request_func()
|
| 627 |
+
return self.__response
|
| 628 |
+
|
| 629 |
+
def __exit__(
|
| 630 |
+
self,
|
| 631 |
+
exc_type: type[BaseException] | None,
|
| 632 |
+
exc: BaseException | None,
|
| 633 |
+
exc_tb: TracebackType | None,
|
| 634 |
+
) -> None:
|
| 635 |
+
if self.__response is not None:
|
| 636 |
+
self.__response.close()
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
class AsyncResponseContextManager(Generic[_AsyncAPIResponseT]):
|
| 640 |
+
"""Context manager for ensuring that a request is not made
|
| 641 |
+
until it is entered and that the response will always be closed
|
| 642 |
+
when the context manager exits
|
| 643 |
+
"""
|
| 644 |
+
|
| 645 |
+
def __init__(self, api_request: Awaitable[_AsyncAPIResponseT]) -> None:
|
| 646 |
+
self._api_request = api_request
|
| 647 |
+
self.__response: _AsyncAPIResponseT | None = None
|
| 648 |
+
|
| 649 |
+
async def __aenter__(self) -> _AsyncAPIResponseT:
|
| 650 |
+
self.__response = await self._api_request
|
| 651 |
+
return self.__response
|
| 652 |
+
|
| 653 |
+
async def __aexit__(
|
| 654 |
+
self,
|
| 655 |
+
exc_type: type[BaseException] | None,
|
| 656 |
+
exc: BaseException | None,
|
| 657 |
+
exc_tb: TracebackType | None,
|
| 658 |
+
) -> None:
|
| 659 |
+
if self.__response is not None:
|
| 660 |
+
await self.__response.close()
|
| 661 |
+
|
| 662 |
+
|
| 663 |
+
def to_streamed_response_wrapper(func: Callable[P, R]) -> Callable[P, ResponseContextManager[APIResponse[R]]]:
|
| 664 |
+
"""Higher order function that takes one of our bound API methods and wraps it
|
| 665 |
+
to support streaming and returning the raw `APIResponse` object directly.
|
| 666 |
+
"""
|
| 667 |
+
|
| 668 |
+
@functools.wraps(func)
|
| 669 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[APIResponse[R]]:
|
| 670 |
+
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 671 |
+
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
| 672 |
+
|
| 673 |
+
kwargs["extra_headers"] = extra_headers
|
| 674 |
+
|
| 675 |
+
make_request = functools.partial(func, *args, **kwargs)
|
| 676 |
+
|
| 677 |
+
return ResponseContextManager(cast(Callable[[], APIResponse[R]], make_request))
|
| 678 |
+
|
| 679 |
+
return wrapped
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
def async_to_streamed_response_wrapper(
|
| 683 |
+
func: Callable[P, Awaitable[R]],
|
| 684 |
+
) -> Callable[P, AsyncResponseContextManager[AsyncAPIResponse[R]]]:
|
| 685 |
+
"""Higher order function that takes one of our bound API methods and wraps it
|
| 686 |
+
to support streaming and returning the raw `APIResponse` object directly.
|
| 687 |
+
"""
|
| 688 |
+
|
| 689 |
+
@functools.wraps(func)
|
| 690 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[AsyncAPIResponse[R]]:
|
| 691 |
+
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 692 |
+
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
| 693 |
+
|
| 694 |
+
kwargs["extra_headers"] = extra_headers
|
| 695 |
+
|
| 696 |
+
make_request = func(*args, **kwargs)
|
| 697 |
+
|
| 698 |
+
return AsyncResponseContextManager(cast(Awaitable[AsyncAPIResponse[R]], make_request))
|
| 699 |
+
|
| 700 |
+
return wrapped
|
| 701 |
+
|
| 702 |
+
|
| 703 |
+
def to_custom_streamed_response_wrapper(
|
| 704 |
+
func: Callable[P, object],
|
| 705 |
+
response_cls: type[_APIResponseT],
|
| 706 |
+
) -> Callable[P, ResponseContextManager[_APIResponseT]]:
|
| 707 |
+
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
| 708 |
+
and wraps the method to support streaming and returning the given response class directly.
|
| 709 |
+
|
| 710 |
+
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
| 711 |
+
"""
|
| 712 |
+
|
| 713 |
+
@functools.wraps(func)
|
| 714 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[_APIResponseT]:
|
| 715 |
+
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 716 |
+
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
| 717 |
+
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
| 718 |
+
|
| 719 |
+
kwargs["extra_headers"] = extra_headers
|
| 720 |
+
|
| 721 |
+
make_request = functools.partial(func, *args, **kwargs)
|
| 722 |
+
|
| 723 |
+
return ResponseContextManager(cast(Callable[[], _APIResponseT], make_request))
|
| 724 |
+
|
| 725 |
+
return wrapped
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def async_to_custom_streamed_response_wrapper(
|
| 729 |
+
func: Callable[P, Awaitable[object]],
|
| 730 |
+
response_cls: type[_AsyncAPIResponseT],
|
| 731 |
+
) -> Callable[P, AsyncResponseContextManager[_AsyncAPIResponseT]]:
|
| 732 |
+
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
| 733 |
+
and wraps the method to support streaming and returning the given response class directly.
|
| 734 |
+
|
| 735 |
+
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
| 736 |
+
"""
|
| 737 |
+
|
| 738 |
+
@functools.wraps(func)
|
| 739 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[_AsyncAPIResponseT]:
|
| 740 |
+
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 741 |
+
extra_headers[RAW_RESPONSE_HEADER] = "stream"
|
| 742 |
+
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
| 743 |
+
|
| 744 |
+
kwargs["extra_headers"] = extra_headers
|
| 745 |
+
|
| 746 |
+
make_request = func(*args, **kwargs)
|
| 747 |
+
|
| 748 |
+
return AsyncResponseContextManager(cast(Awaitable[_AsyncAPIResponseT], make_request))
|
| 749 |
+
|
| 750 |
+
return wrapped
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, APIResponse[R]]:
|
| 754 |
+
"""Higher order function that takes one of our bound API methods and wraps it
|
| 755 |
+
to support returning the raw `APIResponse` object directly.
|
| 756 |
+
"""
|
| 757 |
+
|
| 758 |
+
@functools.wraps(func)
|
| 759 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> APIResponse[R]:
|
| 760 |
+
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 761 |
+
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
| 762 |
+
|
| 763 |
+
kwargs["extra_headers"] = extra_headers
|
| 764 |
+
|
| 765 |
+
return cast(APIResponse[R], func(*args, **kwargs))
|
| 766 |
+
|
| 767 |
+
return wrapped
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[AsyncAPIResponse[R]]]:
|
| 771 |
+
"""Higher order function that takes one of our bound API methods and wraps it
|
| 772 |
+
to support returning the raw `APIResponse` object directly.
|
| 773 |
+
"""
|
| 774 |
+
|
| 775 |
+
@functools.wraps(func)
|
| 776 |
+
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncAPIResponse[R]:
|
| 777 |
+
extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 778 |
+
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
| 779 |
+
|
| 780 |
+
kwargs["extra_headers"] = extra_headers
|
| 781 |
+
|
| 782 |
+
return cast(AsyncAPIResponse[R], await func(*args, **kwargs))
|
| 783 |
+
|
| 784 |
+
return wrapped
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
def to_custom_raw_response_wrapper(
|
| 788 |
+
func: Callable[P, object],
|
| 789 |
+
response_cls: type[_APIResponseT],
|
| 790 |
+
) -> Callable[P, _APIResponseT]:
|
| 791 |
+
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
| 792 |
+
and wraps the method to support returning the given response class directly.
|
| 793 |
+
|
| 794 |
+
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
| 795 |
+
"""
|
| 796 |
+
|
| 797 |
+
@functools.wraps(func)
|
| 798 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> _APIResponseT:
|
| 799 |
+
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 800 |
+
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
| 801 |
+
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
| 802 |
+
|
| 803 |
+
kwargs["extra_headers"] = extra_headers
|
| 804 |
+
|
| 805 |
+
return cast(_APIResponseT, func(*args, **kwargs))
|
| 806 |
+
|
| 807 |
+
return wrapped
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
def async_to_custom_raw_response_wrapper(
|
| 811 |
+
func: Callable[P, Awaitable[object]],
|
| 812 |
+
response_cls: type[_AsyncAPIResponseT],
|
| 813 |
+
) -> Callable[P, Awaitable[_AsyncAPIResponseT]]:
|
| 814 |
+
"""Higher order function that takes one of our bound API methods and an `APIResponse` class
|
| 815 |
+
and wraps the method to support returning the given response class directly.
|
| 816 |
+
|
| 817 |
+
Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])`
|
| 818 |
+
"""
|
| 819 |
+
|
| 820 |
+
@functools.wraps(func)
|
| 821 |
+
def wrapped(*args: P.args, **kwargs: P.kwargs) -> Awaitable[_AsyncAPIResponseT]:
|
| 822 |
+
extra_headers: dict[str, Any] = {**(cast(Any, kwargs.get("extra_headers")) or {})}
|
| 823 |
+
extra_headers[RAW_RESPONSE_HEADER] = "raw"
|
| 824 |
+
extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls
|
| 825 |
+
|
| 826 |
+
kwargs["extra_headers"] = extra_headers
|
| 827 |
+
|
| 828 |
+
return cast(Awaitable[_AsyncAPIResponseT], func(*args, **kwargs))
|
| 829 |
+
|
| 830 |
+
return wrapped
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type:
|
| 834 |
+
"""Given a type like `APIResponse[T]`, returns the generic type variable `T`.
|
| 835 |
+
|
| 836 |
+
This also handles the case where a concrete subclass is given, e.g.
|
| 837 |
+
```py
|
| 838 |
+
class MyResponse(APIResponse[bytes]):
|
| 839 |
+
...
|
| 840 |
+
|
| 841 |
+
extract_response_type(MyResponse) -> bytes
|
| 842 |
+
```
|
| 843 |
+
"""
|
| 844 |
+
return extract_type_var_from_base(
|
| 845 |
+
typ,
|
| 846 |
+
generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse, AsyncAPIResponse)),
|
| 847 |
+
index=0,
|
| 848 |
+
)
|
.venv/lib/python3.11/site-packages/openai/_streaming.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Note: initially copied from https://github.com/florimondmanca/httpx-sse/blob/master/src/httpx_sse/_decoders.py
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import json
|
| 5 |
+
import inspect
|
| 6 |
+
from types import TracebackType
|
| 7 |
+
from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
|
| 8 |
+
from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
|
| 9 |
+
|
| 10 |
+
import httpx
|
| 11 |
+
|
| 12 |
+
from ._utils import is_mapping, extract_type_var_from_base
|
| 13 |
+
from ._exceptions import APIError
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from ._client import OpenAI, AsyncOpenAI
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
_T = TypeVar("_T")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class Stream(Generic[_T]):
|
| 23 |
+
"""Provides the core interface to iterate over a synchronous stream response."""
|
| 24 |
+
|
| 25 |
+
response: httpx.Response
|
| 26 |
+
|
| 27 |
+
_decoder: SSEBytesDecoder
|
| 28 |
+
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
*,
|
| 32 |
+
cast_to: type[_T],
|
| 33 |
+
response: httpx.Response,
|
| 34 |
+
client: OpenAI,
|
| 35 |
+
) -> None:
|
| 36 |
+
self.response = response
|
| 37 |
+
self._cast_to = cast_to
|
| 38 |
+
self._client = client
|
| 39 |
+
self._decoder = client._make_sse_decoder()
|
| 40 |
+
self._iterator = self.__stream__()
|
| 41 |
+
|
| 42 |
+
def __next__(self) -> _T:
|
| 43 |
+
return self._iterator.__next__()
|
| 44 |
+
|
| 45 |
+
def __iter__(self) -> Iterator[_T]:
|
| 46 |
+
for item in self._iterator:
|
| 47 |
+
yield item
|
| 48 |
+
|
| 49 |
+
def _iter_events(self) -> Iterator[ServerSentEvent]:
|
| 50 |
+
yield from self._decoder.iter_bytes(self.response.iter_bytes())
|
| 51 |
+
|
| 52 |
+
def __stream__(self) -> Iterator[_T]:
|
| 53 |
+
cast_to = cast(Any, self._cast_to)
|
| 54 |
+
response = self.response
|
| 55 |
+
process_data = self._client._process_response_data
|
| 56 |
+
iterator = self._iter_events()
|
| 57 |
+
|
| 58 |
+
for sse in iterator:
|
| 59 |
+
if sse.data.startswith("[DONE]"):
|
| 60 |
+
break
|
| 61 |
+
|
| 62 |
+
if sse.event is None:
|
| 63 |
+
data = sse.json()
|
| 64 |
+
if is_mapping(data) and data.get("error"):
|
| 65 |
+
message = None
|
| 66 |
+
error = data.get("error")
|
| 67 |
+
if is_mapping(error):
|
| 68 |
+
message = error.get("message")
|
| 69 |
+
if not message or not isinstance(message, str):
|
| 70 |
+
message = "An error occurred during streaming"
|
| 71 |
+
|
| 72 |
+
raise APIError(
|
| 73 |
+
message=message,
|
| 74 |
+
request=self.response.request,
|
| 75 |
+
body=data["error"],
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
yield process_data(data=data, cast_to=cast_to, response=response)
|
| 79 |
+
|
| 80 |
+
else:
|
| 81 |
+
data = sse.json()
|
| 82 |
+
|
| 83 |
+
if sse.event == "error" and is_mapping(data) and data.get("error"):
|
| 84 |
+
message = None
|
| 85 |
+
error = data.get("error")
|
| 86 |
+
if is_mapping(error):
|
| 87 |
+
message = error.get("message")
|
| 88 |
+
if not message or not isinstance(message, str):
|
| 89 |
+
message = "An error occurred during streaming"
|
| 90 |
+
|
| 91 |
+
raise APIError(
|
| 92 |
+
message=message,
|
| 93 |
+
request=self.response.request,
|
| 94 |
+
body=data["error"],
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
|
| 98 |
+
|
| 99 |
+
# Ensure the entire stream is consumed
|
| 100 |
+
for _sse in iterator:
|
| 101 |
+
...
|
| 102 |
+
|
| 103 |
+
def __enter__(self) -> Self:
|
| 104 |
+
return self
|
| 105 |
+
|
| 106 |
+
def __exit__(
|
| 107 |
+
self,
|
| 108 |
+
exc_type: type[BaseException] | None,
|
| 109 |
+
exc: BaseException | None,
|
| 110 |
+
exc_tb: TracebackType | None,
|
| 111 |
+
) -> None:
|
| 112 |
+
self.close()
|
| 113 |
+
|
| 114 |
+
def close(self) -> None:
|
| 115 |
+
"""
|
| 116 |
+
Close the response and release the connection.
|
| 117 |
+
|
| 118 |
+
Automatically called if the response body is read to completion.
|
| 119 |
+
"""
|
| 120 |
+
self.response.close()
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class AsyncStream(Generic[_T]):
|
| 124 |
+
"""Provides the core interface to iterate over an asynchronous stream response."""
|
| 125 |
+
|
| 126 |
+
response: httpx.Response
|
| 127 |
+
|
| 128 |
+
_decoder: SSEDecoder | SSEBytesDecoder
|
| 129 |
+
|
| 130 |
+
def __init__(
|
| 131 |
+
self,
|
| 132 |
+
*,
|
| 133 |
+
cast_to: type[_T],
|
| 134 |
+
response: httpx.Response,
|
| 135 |
+
client: AsyncOpenAI,
|
| 136 |
+
) -> None:
|
| 137 |
+
self.response = response
|
| 138 |
+
self._cast_to = cast_to
|
| 139 |
+
self._client = client
|
| 140 |
+
self._decoder = client._make_sse_decoder()
|
| 141 |
+
self._iterator = self.__stream__()
|
| 142 |
+
|
| 143 |
+
async def __anext__(self) -> _T:
|
| 144 |
+
return await self._iterator.__anext__()
|
| 145 |
+
|
| 146 |
+
async def __aiter__(self) -> AsyncIterator[_T]:
|
| 147 |
+
async for item in self._iterator:
|
| 148 |
+
yield item
|
| 149 |
+
|
| 150 |
+
async def _iter_events(self) -> AsyncIterator[ServerSentEvent]:
|
| 151 |
+
async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()):
|
| 152 |
+
yield sse
|
| 153 |
+
|
| 154 |
+
async def __stream__(self) -> AsyncIterator[_T]:
|
| 155 |
+
cast_to = cast(Any, self._cast_to)
|
| 156 |
+
response = self.response
|
| 157 |
+
process_data = self._client._process_response_data
|
| 158 |
+
iterator = self._iter_events()
|
| 159 |
+
|
| 160 |
+
async for sse in iterator:
|
| 161 |
+
if sse.data.startswith("[DONE]"):
|
| 162 |
+
break
|
| 163 |
+
|
| 164 |
+
if sse.event is None:
|
| 165 |
+
data = sse.json()
|
| 166 |
+
if is_mapping(data) and data.get("error"):
|
| 167 |
+
message = None
|
| 168 |
+
error = data.get("error")
|
| 169 |
+
if is_mapping(error):
|
| 170 |
+
message = error.get("message")
|
| 171 |
+
if not message or not isinstance(message, str):
|
| 172 |
+
message = "An error occurred during streaming"
|
| 173 |
+
|
| 174 |
+
raise APIError(
|
| 175 |
+
message=message,
|
| 176 |
+
request=self.response.request,
|
| 177 |
+
body=data["error"],
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
yield process_data(data=data, cast_to=cast_to, response=response)
|
| 181 |
+
|
| 182 |
+
else:
|
| 183 |
+
data = sse.json()
|
| 184 |
+
|
| 185 |
+
if sse.event == "error" and is_mapping(data) and data.get("error"):
|
| 186 |
+
message = None
|
| 187 |
+
error = data.get("error")
|
| 188 |
+
if is_mapping(error):
|
| 189 |
+
message = error.get("message")
|
| 190 |
+
if not message or not isinstance(message, str):
|
| 191 |
+
message = "An error occurred during streaming"
|
| 192 |
+
|
| 193 |
+
raise APIError(
|
| 194 |
+
message=message,
|
| 195 |
+
request=self.response.request,
|
| 196 |
+
body=data["error"],
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
|
| 200 |
+
|
| 201 |
+
# Ensure the entire stream is consumed
|
| 202 |
+
async for _sse in iterator:
|
| 203 |
+
...
|
| 204 |
+
|
| 205 |
+
async def __aenter__(self) -> Self:
|
| 206 |
+
return self
|
| 207 |
+
|
| 208 |
+
async def __aexit__(
|
| 209 |
+
self,
|
| 210 |
+
exc_type: type[BaseException] | None,
|
| 211 |
+
exc: BaseException | None,
|
| 212 |
+
exc_tb: TracebackType | None,
|
| 213 |
+
) -> None:
|
| 214 |
+
await self.close()
|
| 215 |
+
|
| 216 |
+
async def close(self) -> None:
|
| 217 |
+
"""
|
| 218 |
+
Close the response and release the connection.
|
| 219 |
+
|
| 220 |
+
Automatically called if the response body is read to completion.
|
| 221 |
+
"""
|
| 222 |
+
await self.response.aclose()
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class ServerSentEvent:
|
| 226 |
+
def __init__(
|
| 227 |
+
self,
|
| 228 |
+
*,
|
| 229 |
+
event: str | None = None,
|
| 230 |
+
data: str | None = None,
|
| 231 |
+
id: str | None = None,
|
| 232 |
+
retry: int | None = None,
|
| 233 |
+
) -> None:
|
| 234 |
+
if data is None:
|
| 235 |
+
data = ""
|
| 236 |
+
|
| 237 |
+
self._id = id
|
| 238 |
+
self._data = data
|
| 239 |
+
self._event = event or None
|
| 240 |
+
self._retry = retry
|
| 241 |
+
|
| 242 |
+
@property
|
| 243 |
+
def event(self) -> str | None:
|
| 244 |
+
return self._event
|
| 245 |
+
|
| 246 |
+
@property
|
| 247 |
+
def id(self) -> str | None:
|
| 248 |
+
return self._id
|
| 249 |
+
|
| 250 |
+
@property
|
| 251 |
+
def retry(self) -> int | None:
|
| 252 |
+
return self._retry
|
| 253 |
+
|
| 254 |
+
@property
|
| 255 |
+
def data(self) -> str:
|
| 256 |
+
return self._data
|
| 257 |
+
|
| 258 |
+
def json(self) -> Any:
|
| 259 |
+
return json.loads(self.data)
|
| 260 |
+
|
| 261 |
+
@override
|
| 262 |
+
def __repr__(self) -> str:
|
| 263 |
+
return f"ServerSentEvent(event={self.event}, data={self.data}, id={self.id}, retry={self.retry})"
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class SSEDecoder:
|
| 267 |
+
_data: list[str]
|
| 268 |
+
_event: str | None
|
| 269 |
+
_retry: int | None
|
| 270 |
+
_last_event_id: str | None
|
| 271 |
+
|
| 272 |
+
def __init__(self) -> None:
|
| 273 |
+
self._event = None
|
| 274 |
+
self._data = []
|
| 275 |
+
self._last_event_id = None
|
| 276 |
+
self._retry = None
|
| 277 |
+
|
| 278 |
+
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
|
| 279 |
+
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
| 280 |
+
for chunk in self._iter_chunks(iterator):
|
| 281 |
+
# Split before decoding so splitlines() only uses \r and \n
|
| 282 |
+
for raw_line in chunk.splitlines():
|
| 283 |
+
line = raw_line.decode("utf-8")
|
| 284 |
+
sse = self.decode(line)
|
| 285 |
+
if sse:
|
| 286 |
+
yield sse
|
| 287 |
+
|
| 288 |
+
def _iter_chunks(self, iterator: Iterator[bytes]) -> Iterator[bytes]:
|
| 289 |
+
"""Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
|
| 290 |
+
data = b""
|
| 291 |
+
for chunk in iterator:
|
| 292 |
+
for line in chunk.splitlines(keepends=True):
|
| 293 |
+
data += line
|
| 294 |
+
if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
|
| 295 |
+
yield data
|
| 296 |
+
data = b""
|
| 297 |
+
if data:
|
| 298 |
+
yield data
|
| 299 |
+
|
| 300 |
+
async def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
|
| 301 |
+
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
| 302 |
+
async for chunk in self._aiter_chunks(iterator):
|
| 303 |
+
# Split before decoding so splitlines() only uses \r and \n
|
| 304 |
+
for raw_line in chunk.splitlines():
|
| 305 |
+
line = raw_line.decode("utf-8")
|
| 306 |
+
sse = self.decode(line)
|
| 307 |
+
if sse:
|
| 308 |
+
yield sse
|
| 309 |
+
|
| 310 |
+
async def _aiter_chunks(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
|
| 311 |
+
"""Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
|
| 312 |
+
data = b""
|
| 313 |
+
async for chunk in iterator:
|
| 314 |
+
for line in chunk.splitlines(keepends=True):
|
| 315 |
+
data += line
|
| 316 |
+
if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
|
| 317 |
+
yield data
|
| 318 |
+
data = b""
|
| 319 |
+
if data:
|
| 320 |
+
yield data
|
| 321 |
+
|
| 322 |
+
def decode(self, line: str) -> ServerSentEvent | None:
|
| 323 |
+
# See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501
|
| 324 |
+
|
| 325 |
+
if not line:
|
| 326 |
+
if not self._event and not self._data and not self._last_event_id and self._retry is None:
|
| 327 |
+
return None
|
| 328 |
+
|
| 329 |
+
sse = ServerSentEvent(
|
| 330 |
+
event=self._event,
|
| 331 |
+
data="\n".join(self._data),
|
| 332 |
+
id=self._last_event_id,
|
| 333 |
+
retry=self._retry,
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
# NOTE: as per the SSE spec, do not reset last_event_id.
|
| 337 |
+
self._event = None
|
| 338 |
+
self._data = []
|
| 339 |
+
self._retry = None
|
| 340 |
+
|
| 341 |
+
return sse
|
| 342 |
+
|
| 343 |
+
if line.startswith(":"):
|
| 344 |
+
return None
|
| 345 |
+
|
| 346 |
+
fieldname, _, value = line.partition(":")
|
| 347 |
+
|
| 348 |
+
if value.startswith(" "):
|
| 349 |
+
value = value[1:]
|
| 350 |
+
|
| 351 |
+
if fieldname == "event":
|
| 352 |
+
self._event = value
|
| 353 |
+
elif fieldname == "data":
|
| 354 |
+
self._data.append(value)
|
| 355 |
+
elif fieldname == "id":
|
| 356 |
+
if "\0" in value:
|
| 357 |
+
pass
|
| 358 |
+
else:
|
| 359 |
+
self._last_event_id = value
|
| 360 |
+
elif fieldname == "retry":
|
| 361 |
+
try:
|
| 362 |
+
self._retry = int(value)
|
| 363 |
+
except (TypeError, ValueError):
|
| 364 |
+
pass
|
| 365 |
+
else:
|
| 366 |
+
pass # Field is ignored.
|
| 367 |
+
|
| 368 |
+
return None
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
@runtime_checkable
|
| 372 |
+
class SSEBytesDecoder(Protocol):
|
| 373 |
+
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
|
| 374 |
+
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
| 375 |
+
...
|
| 376 |
+
|
| 377 |
+
def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
|
| 378 |
+
"""Given an async iterator that yields raw binary data, iterate over it & yield every event encountered"""
|
| 379 |
+
...
|
| 380 |
+
|
| 381 |
+
|
| 382 |
+
def is_stream_class_type(typ: type) -> TypeGuard[type[Stream[object]] | type[AsyncStream[object]]]:
|
| 383 |
+
"""TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`"""
|
| 384 |
+
origin = get_origin(typ) or typ
|
| 385 |
+
return inspect.isclass(origin) and issubclass(origin, (Stream, AsyncStream))
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def extract_stream_chunk_type(
|
| 389 |
+
stream_cls: type,
|
| 390 |
+
*,
|
| 391 |
+
failure_message: str | None = None,
|
| 392 |
+
) -> type:
|
| 393 |
+
"""Given a type like `Stream[T]`, returns the generic type variable `T`.
|
| 394 |
+
|
| 395 |
+
This also handles the case where a concrete subclass is given, e.g.
|
| 396 |
+
```py
|
| 397 |
+
class MyStream(Stream[bytes]):
|
| 398 |
+
...
|
| 399 |
+
|
| 400 |
+
extract_stream_chunk_type(MyStream) -> bytes
|
| 401 |
+
```
|
| 402 |
+
"""
|
| 403 |
+
from ._base_client import Stream, AsyncStream
|
| 404 |
+
|
| 405 |
+
return extract_type_var_from_base(
|
| 406 |
+
stream_cls,
|
| 407 |
+
index=0,
|
| 408 |
+
generic_bases=cast("tuple[type, ...]", (Stream, AsyncStream)),
|
| 409 |
+
failure_message=failure_message,
|
| 410 |
+
)
|
.venv/lib/python3.11/site-packages/openai/_types.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from os import PathLike
|
| 4 |
+
from typing import (
|
| 5 |
+
IO,
|
| 6 |
+
TYPE_CHECKING,
|
| 7 |
+
Any,
|
| 8 |
+
Dict,
|
| 9 |
+
List,
|
| 10 |
+
Type,
|
| 11 |
+
Tuple,
|
| 12 |
+
Union,
|
| 13 |
+
Mapping,
|
| 14 |
+
TypeVar,
|
| 15 |
+
Callable,
|
| 16 |
+
Optional,
|
| 17 |
+
Sequence,
|
| 18 |
+
)
|
| 19 |
+
from typing_extensions import Set, Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable
|
| 20 |
+
|
| 21 |
+
import httpx
|
| 22 |
+
import pydantic
|
| 23 |
+
from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport
|
| 24 |
+
|
| 25 |
+
if TYPE_CHECKING:
|
| 26 |
+
from ._models import BaseModel
|
| 27 |
+
from ._response import APIResponse, AsyncAPIResponse
|
| 28 |
+
from ._legacy_response import HttpxBinaryResponseContent
|
| 29 |
+
|
| 30 |
+
Transport = BaseTransport
|
| 31 |
+
AsyncTransport = AsyncBaseTransport
|
| 32 |
+
Query = Mapping[str, object]
|
| 33 |
+
Body = object
|
| 34 |
+
AnyMapping = Mapping[str, object]
|
| 35 |
+
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
|
| 36 |
+
_T = TypeVar("_T")
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# Approximates httpx internal ProxiesTypes and RequestFiles types
|
| 40 |
+
# while adding support for `PathLike` instances
|
| 41 |
+
ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]]
|
| 42 |
+
ProxiesTypes = Union[str, Proxy, ProxiesDict]
|
| 43 |
+
if TYPE_CHECKING:
|
| 44 |
+
Base64FileInput = Union[IO[bytes], PathLike[str]]
|
| 45 |
+
FileContent = Union[IO[bytes], bytes, PathLike[str]]
|
| 46 |
+
else:
|
| 47 |
+
Base64FileInput = Union[IO[bytes], PathLike]
|
| 48 |
+
FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8.
|
| 49 |
+
FileTypes = Union[
|
| 50 |
+
# file (or bytes)
|
| 51 |
+
FileContent,
|
| 52 |
+
# (filename, file (or bytes))
|
| 53 |
+
Tuple[Optional[str], FileContent],
|
| 54 |
+
# (filename, file (or bytes), content_type)
|
| 55 |
+
Tuple[Optional[str], FileContent, Optional[str]],
|
| 56 |
+
# (filename, file (or bytes), content_type, headers)
|
| 57 |
+
Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],
|
| 58 |
+
]
|
| 59 |
+
RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
|
| 60 |
+
|
| 61 |
+
# duplicate of the above but without our custom file support
|
| 62 |
+
HttpxFileContent = Union[IO[bytes], bytes]
|
| 63 |
+
HttpxFileTypes = Union[
|
| 64 |
+
# file (or bytes)
|
| 65 |
+
HttpxFileContent,
|
| 66 |
+
# (filename, file (or bytes))
|
| 67 |
+
Tuple[Optional[str], HttpxFileContent],
|
| 68 |
+
# (filename, file (or bytes), content_type)
|
| 69 |
+
Tuple[Optional[str], HttpxFileContent, Optional[str]],
|
| 70 |
+
# (filename, file (or bytes), content_type, headers)
|
| 71 |
+
Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]],
|
| 72 |
+
]
|
| 73 |
+
HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]]
|
| 74 |
+
|
| 75 |
+
# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT
|
| 76 |
+
# where ResponseT includes `None`. In order to support directly
|
| 77 |
+
# passing `None`, overloads would have to be defined for every
|
| 78 |
+
# method that uses `ResponseT` which would lead to an unacceptable
|
| 79 |
+
# amount of code duplication and make it unreadable. See _base_client.py
|
| 80 |
+
# for example usage.
|
| 81 |
+
#
|
| 82 |
+
# This unfortunately means that you will either have
|
| 83 |
+
# to import this type and pass it explicitly:
|
| 84 |
+
#
|
| 85 |
+
# from openai import NoneType
|
| 86 |
+
# client.get('/foo', cast_to=NoneType)
|
| 87 |
+
#
|
| 88 |
+
# or build it yourself:
|
| 89 |
+
#
|
| 90 |
+
# client.get('/foo', cast_to=type(None))
|
| 91 |
+
if TYPE_CHECKING:
|
| 92 |
+
NoneType: Type[None]
|
| 93 |
+
else:
|
| 94 |
+
NoneType = type(None)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class RequestOptions(TypedDict, total=False):
|
| 98 |
+
headers: Headers
|
| 99 |
+
max_retries: int
|
| 100 |
+
timeout: float | Timeout | None
|
| 101 |
+
params: Query
|
| 102 |
+
extra_json: AnyMapping
|
| 103 |
+
idempotency_key: str
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Sentinel class used until PEP 0661 is accepted
|
| 107 |
+
class NotGiven:
|
| 108 |
+
"""
|
| 109 |
+
A sentinel singleton class used to distinguish omitted keyword arguments
|
| 110 |
+
from those passed in with the value None (which may have different behavior).
|
| 111 |
+
|
| 112 |
+
For example:
|
| 113 |
+
|
| 114 |
+
```py
|
| 115 |
+
def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
get(timeout=1) # 1s timeout
|
| 119 |
+
get(timeout=None) # No timeout
|
| 120 |
+
get() # Default timeout behavior, which may not be statically known at the method definition.
|
| 121 |
+
```
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __bool__(self) -> Literal[False]:
|
| 125 |
+
return False
|
| 126 |
+
|
| 127 |
+
@override
|
| 128 |
+
def __repr__(self) -> str:
|
| 129 |
+
return "NOT_GIVEN"
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
NotGivenOr = Union[_T, NotGiven]
|
| 133 |
+
NOT_GIVEN = NotGiven()
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class Omit:
|
| 137 |
+
"""In certain situations you need to be able to represent a case where a default value has
|
| 138 |
+
to be explicitly removed and `None` is not an appropriate substitute, for example:
|
| 139 |
+
|
| 140 |
+
```py
|
| 141 |
+
# as the default `Content-Type` header is `application/json` that will be sent
|
| 142 |
+
client.post("/upload/files", files={"file": b"my raw file content"})
|
| 143 |
+
|
| 144 |
+
# you can't explicitly override the header as it has to be dynamically generated
|
| 145 |
+
# to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983'
|
| 146 |
+
client.post(..., headers={"Content-Type": "multipart/form-data"})
|
| 147 |
+
|
| 148 |
+
# instead you can remove the default `application/json` header by passing Omit
|
| 149 |
+
client.post(..., headers={"Content-Type": Omit()})
|
| 150 |
+
```
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
def __bool__(self) -> Literal[False]:
|
| 154 |
+
return False
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@runtime_checkable
|
| 158 |
+
class ModelBuilderProtocol(Protocol):
|
| 159 |
+
@classmethod
|
| 160 |
+
def build(
|
| 161 |
+
cls: type[_T],
|
| 162 |
+
*,
|
| 163 |
+
response: Response,
|
| 164 |
+
data: object,
|
| 165 |
+
) -> _T: ...
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
Headers = Mapping[str, Union[str, Omit]]
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class HeadersLikeProtocol(Protocol):
|
| 172 |
+
def get(self, __key: str) -> str | None: ...
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
HeadersLike = Union[Headers, HeadersLikeProtocol]
|
| 176 |
+
|
| 177 |
+
ResponseT = TypeVar(
|
| 178 |
+
"ResponseT",
|
| 179 |
+
bound=Union[
|
| 180 |
+
object,
|
| 181 |
+
str,
|
| 182 |
+
None,
|
| 183 |
+
"BaseModel",
|
| 184 |
+
List[Any],
|
| 185 |
+
Dict[str, Any],
|
| 186 |
+
Response,
|
| 187 |
+
ModelBuilderProtocol,
|
| 188 |
+
"APIResponse[Any]",
|
| 189 |
+
"AsyncAPIResponse[Any]",
|
| 190 |
+
"HttpxBinaryResponseContent",
|
| 191 |
+
],
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
StrBytesIntFloat = Union[str, bytes, int, float]
|
| 195 |
+
|
| 196 |
+
# Note: copied from Pydantic
|
| 197 |
+
# https://github.com/pydantic/pydantic/blob/6f31f8f68ef011f84357330186f603ff295312fd/pydantic/main.py#L79
|
| 198 |
+
IncEx: TypeAlias = Union[Set[int], Set[str], Mapping[int, Union["IncEx", bool]], Mapping[str, Union["IncEx", bool]]]
|
| 199 |
+
|
| 200 |
+
PostParser = Callable[[Any], Any]
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
@runtime_checkable
|
| 204 |
+
class InheritsGeneric(Protocol):
|
| 205 |
+
"""Represents a type that has inherited from `Generic`
|
| 206 |
+
|
| 207 |
+
The `__orig_bases__` property can be used to determine the resolved
|
| 208 |
+
type variable for a given base class.
|
| 209 |
+
"""
|
| 210 |
+
|
| 211 |
+
__orig_bases__: tuple[_GenericAlias]
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
class _GenericAlias(Protocol):
|
| 215 |
+
__origin__: type[object]
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class HttpxSendArgs(TypedDict, total=False):
|
| 219 |
+
auth: httpx.Auth
|
.venv/lib/python3.11/site-packages/openai/_version.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
__title__ = "openai"
|
| 4 |
+
__version__ = "1.61.1" # x-release-please-version
|
.venv/lib/python3.11/site-packages/openai/pagination.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import Any, List, Generic, TypeVar, Optional, cast
|
| 4 |
+
from typing_extensions import Protocol, override, runtime_checkable
|
| 5 |
+
|
| 6 |
+
from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage
|
| 7 |
+
|
| 8 |
+
__all__ = ["SyncPage", "AsyncPage", "SyncCursorPage", "AsyncCursorPage"]
|
| 9 |
+
|
| 10 |
+
_T = TypeVar("_T")
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@runtime_checkable
|
| 14 |
+
class CursorPageItem(Protocol):
|
| 15 |
+
id: Optional[str]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
|
| 19 |
+
"""Note: no pagination actually occurs yet, this is for forwards-compatibility."""
|
| 20 |
+
|
| 21 |
+
data: List[_T]
|
| 22 |
+
object: str
|
| 23 |
+
|
| 24 |
+
@override
|
| 25 |
+
def _get_page_items(self) -> List[_T]:
|
| 26 |
+
data = self.data
|
| 27 |
+
if not data:
|
| 28 |
+
return []
|
| 29 |
+
return data
|
| 30 |
+
|
| 31 |
+
@override
|
| 32 |
+
def next_page_info(self) -> None:
|
| 33 |
+
"""
|
| 34 |
+
This page represents a response that isn't actually paginated at the API level
|
| 35 |
+
so there will never be a next page.
|
| 36 |
+
"""
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class AsyncPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
|
| 41 |
+
"""Note: no pagination actually occurs yet, this is for forwards-compatibility."""
|
| 42 |
+
|
| 43 |
+
data: List[_T]
|
| 44 |
+
object: str
|
| 45 |
+
|
| 46 |
+
@override
|
| 47 |
+
def _get_page_items(self) -> List[_T]:
|
| 48 |
+
data = self.data
|
| 49 |
+
if not data:
|
| 50 |
+
return []
|
| 51 |
+
return data
|
| 52 |
+
|
| 53 |
+
@override
|
| 54 |
+
def next_page_info(self) -> None:
|
| 55 |
+
"""
|
| 56 |
+
This page represents a response that isn't actually paginated at the API level
|
| 57 |
+
so there will never be a next page.
|
| 58 |
+
"""
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
|
| 63 |
+
data: List[_T]
|
| 64 |
+
|
| 65 |
+
@override
|
| 66 |
+
def _get_page_items(self) -> List[_T]:
|
| 67 |
+
data = self.data
|
| 68 |
+
if not data:
|
| 69 |
+
return []
|
| 70 |
+
return data
|
| 71 |
+
|
| 72 |
+
@override
|
| 73 |
+
def next_page_info(self) -> Optional[PageInfo]:
|
| 74 |
+
data = self.data
|
| 75 |
+
if not data:
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
item = cast(Any, data[-1])
|
| 79 |
+
if not isinstance(item, CursorPageItem) or item.id is None:
|
| 80 |
+
# TODO emit warning log
|
| 81 |
+
return None
|
| 82 |
+
|
| 83 |
+
return PageInfo(params={"after": item.id})
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class AsyncCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
|
| 87 |
+
data: List[_T]
|
| 88 |
+
|
| 89 |
+
@override
|
| 90 |
+
def _get_page_items(self) -> List[_T]:
|
| 91 |
+
data = self.data
|
| 92 |
+
if not data:
|
| 93 |
+
return []
|
| 94 |
+
return data
|
| 95 |
+
|
| 96 |
+
@override
|
| 97 |
+
def next_page_info(self) -> Optional[PageInfo]:
|
| 98 |
+
data = self.data
|
| 99 |
+
if not data:
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
item = cast(Any, data[-1])
|
| 103 |
+
if not isinstance(item, CursorPageItem) or item.id is None:
|
| 104 |
+
# TODO emit warning log
|
| 105 |
+
return None
|
| 106 |
+
|
| 107 |
+
return PageInfo(params={"after": item.id})
|
.venv/lib/python3.11/site-packages/openai/py.typed
ADDED
|
File without changes
|
.venv/lib/python3.11/site-packages/openai/types/__init__.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from .batch import Batch as Batch
|
| 6 |
+
from .image import Image as Image
|
| 7 |
+
from .model import Model as Model
|
| 8 |
+
from .shared import (
|
| 9 |
+
Metadata as Metadata,
|
| 10 |
+
ErrorObject as ErrorObject,
|
| 11 |
+
FunctionDefinition as FunctionDefinition,
|
| 12 |
+
FunctionParameters as FunctionParameters,
|
| 13 |
+
ResponseFormatText as ResponseFormatText,
|
| 14 |
+
ResponseFormatJSONObject as ResponseFormatJSONObject,
|
| 15 |
+
ResponseFormatJSONSchema as ResponseFormatJSONSchema,
|
| 16 |
+
)
|
| 17 |
+
from .upload import Upload as Upload
|
| 18 |
+
from .embedding import Embedding as Embedding
|
| 19 |
+
from .chat_model import ChatModel as ChatModel
|
| 20 |
+
from .completion import Completion as Completion
|
| 21 |
+
from .moderation import Moderation as Moderation
|
| 22 |
+
from .audio_model import AudioModel as AudioModel
|
| 23 |
+
from .batch_error import BatchError as BatchError
|
| 24 |
+
from .file_object import FileObject as FileObject
|
| 25 |
+
from .image_model import ImageModel as ImageModel
|
| 26 |
+
from .file_content import FileContent as FileContent
|
| 27 |
+
from .file_deleted import FileDeleted as FileDeleted
|
| 28 |
+
from .file_purpose import FilePurpose as FilePurpose
|
| 29 |
+
from .model_deleted import ModelDeleted as ModelDeleted
|
| 30 |
+
from .embedding_model import EmbeddingModel as EmbeddingModel
|
| 31 |
+
from .images_response import ImagesResponse as ImagesResponse
|
| 32 |
+
from .completion_usage import CompletionUsage as CompletionUsage
|
| 33 |
+
from .file_list_params import FileListParams as FileListParams
|
| 34 |
+
from .moderation_model import ModerationModel as ModerationModel
|
| 35 |
+
from .batch_list_params import BatchListParams as BatchListParams
|
| 36 |
+
from .completion_choice import CompletionChoice as CompletionChoice
|
| 37 |
+
from .image_edit_params import ImageEditParams as ImageEditParams
|
| 38 |
+
from .file_create_params import FileCreateParams as FileCreateParams
|
| 39 |
+
from .batch_create_params import BatchCreateParams as BatchCreateParams
|
| 40 |
+
from .batch_request_counts import BatchRequestCounts as BatchRequestCounts
|
| 41 |
+
from .upload_create_params import UploadCreateParams as UploadCreateParams
|
| 42 |
+
from .audio_response_format import AudioResponseFormat as AudioResponseFormat
|
| 43 |
+
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
|
| 44 |
+
from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
|
| 45 |
+
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
|
| 46 |
+
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
|
| 47 |
+
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
|
| 48 |
+
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
|
| 49 |
+
from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
|
| 50 |
+
from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
|
| 51 |
+
from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions
|
| 52 |
+
from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams
|
| 53 |
+
from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam
|
| 54 |
+
from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam
|
.venv/lib/python3.11/site-packages/openai/types/audio/speech_create_params.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Union
|
| 6 |
+
from typing_extensions import Literal, Required, TypedDict
|
| 7 |
+
|
| 8 |
+
from .speech_model import SpeechModel
|
| 9 |
+
|
| 10 |
+
__all__ = ["SpeechCreateParams"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class SpeechCreateParams(TypedDict, total=False):
|
| 14 |
+
input: Required[str]
|
| 15 |
+
"""The text to generate audio for. The maximum length is 4096 characters."""
|
| 16 |
+
|
| 17 |
+
model: Required[Union[str, SpeechModel]]
|
| 18 |
+
"""
|
| 19 |
+
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
| 20 |
+
`tts-1` or `tts-1-hd`
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]]
|
| 24 |
+
"""The voice to use when generating the audio.
|
| 25 |
+
|
| 26 |
+
Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`,
|
| 27 |
+
`sage` and `shimmer`. Previews of the voices are available in the
|
| 28 |
+
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
|
| 32 |
+
"""The format to audio in.
|
| 33 |
+
|
| 34 |
+
Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
speed: float
|
| 38 |
+
"""The speed of the generated audio.
|
| 39 |
+
|
| 40 |
+
Select a value from `0.25` to `4.0`. `1.0` is the default.
|
| 41 |
+
"""
|
.venv/lib/python3.11/site-packages/openai/types/audio/speech_model.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing_extensions import Literal, TypeAlias
|
| 4 |
+
|
| 5 |
+
__all__ = ["SpeechModel"]
|
| 6 |
+
|
| 7 |
+
SpeechModel: TypeAlias = Literal["tts-1", "tts-1-hd"]
|
.venv/lib/python3.11/site-packages/openai/types/audio/transcription.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from ..._models import BaseModel
|
| 5 |
+
|
| 6 |
+
__all__ = ["Transcription"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Transcription(BaseModel):
|
| 10 |
+
text: str
|
| 11 |
+
"""The transcribed text."""
|
.venv/lib/python3.11/site-packages/openai/types/audio/transcription_create_params.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import List, Union
|
| 6 |
+
from typing_extensions import Literal, Required, TypedDict
|
| 7 |
+
|
| 8 |
+
from ..._types import FileTypes
|
| 9 |
+
from ..audio_model import AudioModel
|
| 10 |
+
from ..audio_response_format import AudioResponseFormat
|
| 11 |
+
|
| 12 |
+
__all__ = ["TranscriptionCreateParams"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TranscriptionCreateParams(TypedDict, total=False):
|
| 16 |
+
file: Required[FileTypes]
|
| 17 |
+
"""
|
| 18 |
+
The audio file object (not file name) to transcribe, in one of these formats:
|
| 19 |
+
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
model: Required[Union[str, AudioModel]]
|
| 23 |
+
"""ID of the model to use.
|
| 24 |
+
|
| 25 |
+
Only `whisper-1` (which is powered by our open source Whisper V2 model) is
|
| 26 |
+
currently available.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
language: str
|
| 30 |
+
"""The language of the input audio.
|
| 31 |
+
|
| 32 |
+
Supplying the input language in
|
| 33 |
+
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
| 34 |
+
format will improve accuracy and latency.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
prompt: str
|
| 38 |
+
"""An optional text to guide the model's style or continue a previous audio
|
| 39 |
+
segment.
|
| 40 |
+
|
| 41 |
+
The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
|
| 42 |
+
should match the audio language.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
response_format: AudioResponseFormat
|
| 46 |
+
"""
|
| 47 |
+
The format of the output, in one of these options: `json`, `text`, `srt`,
|
| 48 |
+
`verbose_json`, or `vtt`.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
temperature: float
|
| 52 |
+
"""The sampling temperature, between 0 and 1.
|
| 53 |
+
|
| 54 |
+
Higher values like 0.8 will make the output more random, while lower values like
|
| 55 |
+
0.2 will make it more focused and deterministic. If set to 0, the model will use
|
| 56 |
+
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
|
| 57 |
+
automatically increase the temperature until certain thresholds are hit.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
timestamp_granularities: List[Literal["word", "segment"]]
|
| 61 |
+
"""The timestamp granularities to populate for this transcription.
|
| 62 |
+
|
| 63 |
+
`response_format` must be set `verbose_json` to use timestamp granularities.
|
| 64 |
+
Either or both of these options are supported: `word`, or `segment`. Note: There
|
| 65 |
+
is no additional latency for segment timestamps, but generating word timestamps
|
| 66 |
+
incurs additional latency.
|
| 67 |
+
"""
|
.venv/lib/python3.11/site-packages/openai/types/audio/transcription_segment.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import List
|
| 4 |
+
|
| 5 |
+
from ..._models import BaseModel
|
| 6 |
+
|
| 7 |
+
__all__ = ["TranscriptionSegment"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TranscriptionSegment(BaseModel):
|
| 11 |
+
id: int
|
| 12 |
+
"""Unique identifier of the segment."""
|
| 13 |
+
|
| 14 |
+
avg_logprob: float
|
| 15 |
+
"""Average logprob of the segment.
|
| 16 |
+
|
| 17 |
+
If the value is lower than -1, consider the logprobs failed.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
compression_ratio: float
|
| 21 |
+
"""Compression ratio of the segment.
|
| 22 |
+
|
| 23 |
+
If the value is greater than 2.4, consider the compression failed.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
end: float
|
| 27 |
+
"""End time of the segment in seconds."""
|
| 28 |
+
|
| 29 |
+
no_speech_prob: float
|
| 30 |
+
"""Probability of no speech in the segment.
|
| 31 |
+
|
| 32 |
+
If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this
|
| 33 |
+
segment silent.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
seek: int
|
| 37 |
+
"""Seek offset of the segment."""
|
| 38 |
+
|
| 39 |
+
start: float
|
| 40 |
+
"""Start time of the segment in seconds."""
|
| 41 |
+
|
| 42 |
+
temperature: float
|
| 43 |
+
"""Temperature parameter used for generating the segment."""
|
| 44 |
+
|
| 45 |
+
text: str
|
| 46 |
+
"""Text content of the segment."""
|
| 47 |
+
|
| 48 |
+
tokens: List[int]
|
| 49 |
+
"""Array of token IDs for the text content."""
|
.venv/lib/python3.11/site-packages/openai/types/audio/transcription_verbose.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
|
| 5 |
+
from ..._models import BaseModel
|
| 6 |
+
from .transcription_word import TranscriptionWord
|
| 7 |
+
from .transcription_segment import TranscriptionSegment
|
| 8 |
+
|
| 9 |
+
__all__ = ["TranscriptionVerbose"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TranscriptionVerbose(BaseModel):
|
| 13 |
+
duration: float
|
| 14 |
+
"""The duration of the input audio."""
|
| 15 |
+
|
| 16 |
+
language: str
|
| 17 |
+
"""The language of the input audio."""
|
| 18 |
+
|
| 19 |
+
text: str
|
| 20 |
+
"""The transcribed text."""
|
| 21 |
+
|
| 22 |
+
segments: Optional[List[TranscriptionSegment]] = None
|
| 23 |
+
"""Segments of the transcribed text and their corresponding details."""
|
| 24 |
+
|
| 25 |
+
words: Optional[List[TranscriptionWord]] = None
|
| 26 |
+
"""Extracted words and their corresponding timestamps."""
|
.venv/lib/python3.11/site-packages/openai/types/audio/transcription_word.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from ..._models import BaseModel
|
| 5 |
+
|
| 6 |
+
__all__ = ["TranscriptionWord"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class TranscriptionWord(BaseModel):
|
| 10 |
+
end: float
|
| 11 |
+
"""End time of the word in seconds."""
|
| 12 |
+
|
| 13 |
+
start: float
|
| 14 |
+
"""Start time of the word in seconds."""
|
| 15 |
+
|
| 16 |
+
word: str
|
| 17 |
+
"""The text content of the word."""
|
.venv/lib/python3.11/site-packages/openai/types/audio/translation.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from ..._models import BaseModel
|
| 5 |
+
|
| 6 |
+
__all__ = ["Translation"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Translation(BaseModel):
|
| 10 |
+
text: str
|
.venv/lib/python3.11/site-packages/openai/types/audio/translation_create_params.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Union
|
| 6 |
+
from typing_extensions import Required, TypedDict
|
| 7 |
+
|
| 8 |
+
from ..._types import FileTypes
|
| 9 |
+
from ..audio_model import AudioModel
|
| 10 |
+
from ..audio_response_format import AudioResponseFormat
|
| 11 |
+
|
| 12 |
+
__all__ = ["TranslationCreateParams"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TranslationCreateParams(TypedDict, total=False):
|
| 16 |
+
file: Required[FileTypes]
|
| 17 |
+
"""
|
| 18 |
+
The audio file object (not file name) translate, in one of these formats: flac,
|
| 19 |
+
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
model: Required[Union[str, AudioModel]]
|
| 23 |
+
"""ID of the model to use.
|
| 24 |
+
|
| 25 |
+
Only `whisper-1` (which is powered by our open source Whisper V2 model) is
|
| 26 |
+
currently available.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
prompt: str
|
| 30 |
+
"""An optional text to guide the model's style or continue a previous audio
|
| 31 |
+
segment.
|
| 32 |
+
|
| 33 |
+
The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
|
| 34 |
+
should be in English.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
response_format: AudioResponseFormat
|
| 38 |
+
"""
|
| 39 |
+
The format of the output, in one of these options: `json`, `text`, `srt`,
|
| 40 |
+
`verbose_json`, or `vtt`.
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
temperature: float
|
| 44 |
+
"""The sampling temperature, between 0 and 1.
|
| 45 |
+
|
| 46 |
+
Higher values like 0.8 will make the output more random, while lower values like
|
| 47 |
+
0.2 will make it more focused and deterministic. If set to 0, the model will use
|
| 48 |
+
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
|
| 49 |
+
automatically increase the temperature until certain thresholds are hit.
|
| 50 |
+
"""
|
.venv/lib/python3.11/site-packages/openai/types/audio/translation_create_response.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import Union
|
| 4 |
+
from typing_extensions import TypeAlias
|
| 5 |
+
|
| 6 |
+
from .translation import Translation
|
| 7 |
+
from .translation_verbose import TranslationVerbose
|
| 8 |
+
|
| 9 |
+
__all__ = ["TranslationCreateResponse"]
|
| 10 |
+
|
| 11 |
+
TranslationCreateResponse: TypeAlias = Union[Translation, TranslationVerbose]
|
.venv/lib/python3.11/site-packages/openai/types/audio/translation_verbose.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
|
| 5 |
+
from ..._models import BaseModel
|
| 6 |
+
from .transcription_segment import TranscriptionSegment
|
| 7 |
+
|
| 8 |
+
__all__ = ["TranslationVerbose"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TranslationVerbose(BaseModel):
|
| 12 |
+
duration: float
|
| 13 |
+
"""The duration of the input audio."""
|
| 14 |
+
|
| 15 |
+
language: str
|
| 16 |
+
"""The language of the output translation (always `english`)."""
|
| 17 |
+
|
| 18 |
+
text: str
|
| 19 |
+
"""The translated text."""
|
| 20 |
+
|
| 21 |
+
segments: Optional[List[TranscriptionSegment]] = None
|
| 22 |
+
"""Segments of the translated text and their corresponding details."""
|
.venv/lib/python3.11/site-packages/openai/types/audio_model.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing_extensions import Literal, TypeAlias
|
| 4 |
+
|
| 5 |
+
__all__ = ["AudioModel"]
|
| 6 |
+
|
| 7 |
+
AudioModel: TypeAlias = Literal["whisper-1"]
|
.venv/lib/python3.11/site-packages/openai/types/audio_response_format.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing_extensions import Literal, TypeAlias
|
| 4 |
+
|
| 5 |
+
__all__ = ["AudioResponseFormat"]
|
| 6 |
+
|
| 7 |
+
AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json", "vtt"]
|
.venv/lib/python3.11/site-packages/openai/types/batch.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
from typing_extensions import Literal
|
| 5 |
+
|
| 6 |
+
from .._models import BaseModel
|
| 7 |
+
from .batch_error import BatchError
|
| 8 |
+
from .shared.metadata import Metadata
|
| 9 |
+
from .batch_request_counts import BatchRequestCounts
|
| 10 |
+
|
| 11 |
+
__all__ = ["Batch", "Errors"]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Errors(BaseModel):
|
| 15 |
+
data: Optional[List[BatchError]] = None
|
| 16 |
+
|
| 17 |
+
object: Optional[str] = None
|
| 18 |
+
"""The object type, which is always `list`."""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Batch(BaseModel):
|
| 22 |
+
id: str
|
| 23 |
+
|
| 24 |
+
completion_window: str
|
| 25 |
+
"""The time frame within which the batch should be processed."""
|
| 26 |
+
|
| 27 |
+
created_at: int
|
| 28 |
+
"""The Unix timestamp (in seconds) for when the batch was created."""
|
| 29 |
+
|
| 30 |
+
endpoint: str
|
| 31 |
+
"""The OpenAI API endpoint used by the batch."""
|
| 32 |
+
|
| 33 |
+
input_file_id: str
|
| 34 |
+
"""The ID of the input file for the batch."""
|
| 35 |
+
|
| 36 |
+
object: Literal["batch"]
|
| 37 |
+
"""The object type, which is always `batch`."""
|
| 38 |
+
|
| 39 |
+
status: Literal[
|
| 40 |
+
"validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
|
| 41 |
+
]
|
| 42 |
+
"""The current status of the batch."""
|
| 43 |
+
|
| 44 |
+
cancelled_at: Optional[int] = None
|
| 45 |
+
"""The Unix timestamp (in seconds) for when the batch was cancelled."""
|
| 46 |
+
|
| 47 |
+
cancelling_at: Optional[int] = None
|
| 48 |
+
"""The Unix timestamp (in seconds) for when the batch started cancelling."""
|
| 49 |
+
|
| 50 |
+
completed_at: Optional[int] = None
|
| 51 |
+
"""The Unix timestamp (in seconds) for when the batch was completed."""
|
| 52 |
+
|
| 53 |
+
error_file_id: Optional[str] = None
|
| 54 |
+
"""The ID of the file containing the outputs of requests with errors."""
|
| 55 |
+
|
| 56 |
+
errors: Optional[Errors] = None
|
| 57 |
+
|
| 58 |
+
expired_at: Optional[int] = None
|
| 59 |
+
"""The Unix timestamp (in seconds) for when the batch expired."""
|
| 60 |
+
|
| 61 |
+
expires_at: Optional[int] = None
|
| 62 |
+
"""The Unix timestamp (in seconds) for when the batch will expire."""
|
| 63 |
+
|
| 64 |
+
failed_at: Optional[int] = None
|
| 65 |
+
"""The Unix timestamp (in seconds) for when the batch failed."""
|
| 66 |
+
|
| 67 |
+
finalizing_at: Optional[int] = None
|
| 68 |
+
"""The Unix timestamp (in seconds) for when the batch started finalizing."""
|
| 69 |
+
|
| 70 |
+
in_progress_at: Optional[int] = None
|
| 71 |
+
"""The Unix timestamp (in seconds) for when the batch started processing."""
|
| 72 |
+
|
| 73 |
+
metadata: Optional[Metadata] = None
|
| 74 |
+
"""Set of 16 key-value pairs that can be attached to an object.
|
| 75 |
+
|
| 76 |
+
This can be useful for storing additional information about the object in a
|
| 77 |
+
structured format, and querying for objects via API or the dashboard.
|
| 78 |
+
|
| 79 |
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
| 80 |
+
a maximum length of 512 characters.
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
output_file_id: Optional[str] = None
|
| 84 |
+
"""The ID of the file containing the outputs of successfully executed requests."""
|
| 85 |
+
|
| 86 |
+
request_counts: Optional[BatchRequestCounts] = None
|
| 87 |
+
"""The request counts for different statuses within the batch."""
|
.venv/lib/python3.11/site-packages/openai/types/batch_create_params.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Optional
|
| 6 |
+
from typing_extensions import Literal, Required, TypedDict
|
| 7 |
+
|
| 8 |
+
from .shared_params.metadata import Metadata
|
| 9 |
+
|
| 10 |
+
__all__ = ["BatchCreateParams"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BatchCreateParams(TypedDict, total=False):
|
| 14 |
+
completion_window: Required[Literal["24h"]]
|
| 15 |
+
"""The time frame within which the batch should be processed.
|
| 16 |
+
|
| 17 |
+
Currently only `24h` is supported.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings", "/v1/completions"]]
|
| 21 |
+
"""The endpoint to be used for all requests in the batch.
|
| 22 |
+
|
| 23 |
+
Currently `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are
|
| 24 |
+
supported. Note that `/v1/embeddings` batches are also restricted to a maximum
|
| 25 |
+
of 50,000 embedding inputs across all requests in the batch.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
input_file_id: Required[str]
|
| 29 |
+
"""The ID of an uploaded file that contains requests for the new batch.
|
| 30 |
+
|
| 31 |
+
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
|
| 32 |
+
for how to upload a file.
|
| 33 |
+
|
| 34 |
+
Your input file must be formatted as a
|
| 35 |
+
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
|
| 36 |
+
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
|
| 37 |
+
requests, and can be up to 200 MB in size.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
metadata: Optional[Metadata]
|
| 41 |
+
"""Set of 16 key-value pairs that can be attached to an object.
|
| 42 |
+
|
| 43 |
+
This can be useful for storing additional information about the object in a
|
| 44 |
+
structured format, and querying for objects via API or the dashboard.
|
| 45 |
+
|
| 46 |
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
| 47 |
+
a maximum length of 512 characters.
|
| 48 |
+
"""
|
.venv/lib/python3.11/site-packages/openai/types/batch_error.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from .._models import BaseModel
|
| 6 |
+
|
| 7 |
+
__all__ = ["BatchError"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BatchError(BaseModel):
|
| 11 |
+
code: Optional[str] = None
|
| 12 |
+
"""An error code identifying the error type."""
|
| 13 |
+
|
| 14 |
+
line: Optional[int] = None
|
| 15 |
+
"""The line number of the input file where the error occurred, if applicable."""
|
| 16 |
+
|
| 17 |
+
message: Optional[str] = None
|
| 18 |
+
"""A human-readable message providing more details about the error."""
|
| 19 |
+
|
| 20 |
+
param: Optional[str] = None
|
| 21 |
+
"""The name of the parameter that caused the error, if applicable."""
|
.venv/lib/python3.11/site-packages/openai/types/batch_list_params.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing_extensions import TypedDict
|
| 6 |
+
|
| 7 |
+
__all__ = ["BatchListParams"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BatchListParams(TypedDict, total=False):
|
| 11 |
+
after: str
|
| 12 |
+
"""A cursor for use in pagination.
|
| 13 |
+
|
| 14 |
+
`after` is an object ID that defines your place in the list. For instance, if
|
| 15 |
+
you make a list request and receive 100 objects, ending with obj_foo, your
|
| 16 |
+
subsequent call can include after=obj_foo in order to fetch the next page of the
|
| 17 |
+
list.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
limit: int
|
| 21 |
+
"""A limit on the number of objects to be returned.
|
| 22 |
+
|
| 23 |
+
Limit can range between 1 and 100, and the default is 20.
|
| 24 |
+
"""
|
.venv/lib/python3.11/site-packages/openai/types/batch_request_counts.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from .._models import BaseModel
|
| 5 |
+
|
| 6 |
+
__all__ = ["BatchRequestCounts"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BatchRequestCounts(BaseModel):
|
| 10 |
+
completed: int
|
| 11 |
+
"""Number of requests that have been completed successfully."""
|
| 12 |
+
|
| 13 |
+
failed: int
|
| 14 |
+
"""Number of requests that have failed."""
|
| 15 |
+
|
| 16 |
+
total: int
|
| 17 |
+
"""Total number of requests in the batch."""
|
.venv/lib/python3.11/site-packages/openai/types/chat_model.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing_extensions import Literal, TypeAlias
|
| 4 |
+
|
| 5 |
+
__all__ = ["ChatModel"]
|
| 6 |
+
|
| 7 |
+
ChatModel: TypeAlias = Literal[
|
| 8 |
+
"o3-mini",
|
| 9 |
+
"o3-mini-2025-01-31",
|
| 10 |
+
"o1",
|
| 11 |
+
"o1-2024-12-17",
|
| 12 |
+
"o1-preview",
|
| 13 |
+
"o1-preview-2024-09-12",
|
| 14 |
+
"o1-mini",
|
| 15 |
+
"o1-mini-2024-09-12",
|
| 16 |
+
"gpt-4o",
|
| 17 |
+
"gpt-4o-2024-11-20",
|
| 18 |
+
"gpt-4o-2024-08-06",
|
| 19 |
+
"gpt-4o-2024-05-13",
|
| 20 |
+
"gpt-4o-audio-preview",
|
| 21 |
+
"gpt-4o-audio-preview-2024-10-01",
|
| 22 |
+
"gpt-4o-audio-preview-2024-12-17",
|
| 23 |
+
"gpt-4o-mini-audio-preview",
|
| 24 |
+
"gpt-4o-mini-audio-preview-2024-12-17",
|
| 25 |
+
"chatgpt-4o-latest",
|
| 26 |
+
"gpt-4o-mini",
|
| 27 |
+
"gpt-4o-mini-2024-07-18",
|
| 28 |
+
"gpt-4-turbo",
|
| 29 |
+
"gpt-4-turbo-2024-04-09",
|
| 30 |
+
"gpt-4-0125-preview",
|
| 31 |
+
"gpt-4-turbo-preview",
|
| 32 |
+
"gpt-4-1106-preview",
|
| 33 |
+
"gpt-4-vision-preview",
|
| 34 |
+
"gpt-4",
|
| 35 |
+
"gpt-4-0314",
|
| 36 |
+
"gpt-4-0613",
|
| 37 |
+
"gpt-4-32k",
|
| 38 |
+
"gpt-4-32k-0314",
|
| 39 |
+
"gpt-4-32k-0613",
|
| 40 |
+
"gpt-3.5-turbo",
|
| 41 |
+
"gpt-3.5-turbo-16k",
|
| 42 |
+
"gpt-3.5-turbo-0301",
|
| 43 |
+
"gpt-3.5-turbo-0613",
|
| 44 |
+
"gpt-3.5-turbo-1106",
|
| 45 |
+
"gpt-3.5-turbo-0125",
|
| 46 |
+
"gpt-3.5-turbo-16k-0613",
|
| 47 |
+
]
|
.venv/lib/python3.11/site-packages/openai/types/completion.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
from typing_extensions import Literal
|
| 5 |
+
|
| 6 |
+
from .._models import BaseModel
|
| 7 |
+
from .completion_usage import CompletionUsage
|
| 8 |
+
from .completion_choice import CompletionChoice
|
| 9 |
+
|
| 10 |
+
__all__ = ["Completion"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Completion(BaseModel):
|
| 14 |
+
id: str
|
| 15 |
+
"""A unique identifier for the completion."""
|
| 16 |
+
|
| 17 |
+
choices: List[CompletionChoice]
|
| 18 |
+
"""The list of completion choices the model generated for the input prompt."""
|
| 19 |
+
|
| 20 |
+
created: int
|
| 21 |
+
"""The Unix timestamp (in seconds) of when the completion was created."""
|
| 22 |
+
|
| 23 |
+
model: str
|
| 24 |
+
"""The model used for completion."""
|
| 25 |
+
|
| 26 |
+
object: Literal["text_completion"]
|
| 27 |
+
"""The object type, which is always "text_completion" """
|
| 28 |
+
|
| 29 |
+
system_fingerprint: Optional[str] = None
|
| 30 |
+
"""This fingerprint represents the backend configuration that the model runs with.
|
| 31 |
+
|
| 32 |
+
Can be used in conjunction with the `seed` request parameter to understand when
|
| 33 |
+
backend changes have been made that might impact determinism.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
usage: Optional[CompletionUsage] = None
|
| 37 |
+
"""Usage statistics for the completion request."""
|
.venv/lib/python3.11/site-packages/openai/types/completion_choice.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import Dict, List, Optional
|
| 4 |
+
from typing_extensions import Literal
|
| 5 |
+
|
| 6 |
+
from .._models import BaseModel
|
| 7 |
+
|
| 8 |
+
__all__ = ["CompletionChoice", "Logprobs"]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Logprobs(BaseModel):
|
| 12 |
+
text_offset: Optional[List[int]] = None
|
| 13 |
+
|
| 14 |
+
token_logprobs: Optional[List[float]] = None
|
| 15 |
+
|
| 16 |
+
tokens: Optional[List[str]] = None
|
| 17 |
+
|
| 18 |
+
top_logprobs: Optional[List[Dict[str, float]]] = None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class CompletionChoice(BaseModel):
|
| 22 |
+
finish_reason: Literal["stop", "length", "content_filter"]
|
| 23 |
+
"""The reason the model stopped generating tokens.
|
| 24 |
+
|
| 25 |
+
This will be `stop` if the model hit a natural stop point or a provided stop
|
| 26 |
+
sequence, `length` if the maximum number of tokens specified in the request was
|
| 27 |
+
reached, or `content_filter` if content was omitted due to a flag from our
|
| 28 |
+
content filters.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
index: int
|
| 32 |
+
|
| 33 |
+
logprobs: Optional[Logprobs] = None
|
| 34 |
+
|
| 35 |
+
text: str
|
.venv/lib/python3.11/site-packages/openai/types/completion_create_params.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from typing import Dict, List, Union, Iterable, Optional
|
| 6 |
+
from typing_extensions import Literal, Required, TypedDict
|
| 7 |
+
|
| 8 |
+
from .chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
|
| 9 |
+
|
| 10 |
+
__all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class CompletionCreateParamsBase(TypedDict, total=False):
|
| 14 |
+
model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
|
| 15 |
+
"""ID of the model to use.
|
| 16 |
+
|
| 17 |
+
You can use the
|
| 18 |
+
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
|
| 19 |
+
see all of your available models, or see our
|
| 20 |
+
[Model overview](https://platform.openai.com/docs/models) for descriptions of
|
| 21 |
+
them.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]]
|
| 25 |
+
"""
|
| 26 |
+
The prompt(s) to generate completions for, encoded as a string, array of
|
| 27 |
+
strings, array of tokens, or array of token arrays.
|
| 28 |
+
|
| 29 |
+
Note that <|endoftext|> is the document separator that the model sees during
|
| 30 |
+
training, so if a prompt is not specified the model will generate as if from the
|
| 31 |
+
beginning of a new document.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
best_of: Optional[int]
|
| 35 |
+
"""
|
| 36 |
+
Generates `best_of` completions server-side and returns the "best" (the one with
|
| 37 |
+
the highest log probability per token). Results cannot be streamed.
|
| 38 |
+
|
| 39 |
+
When used with `n`, `best_of` controls the number of candidate completions and
|
| 40 |
+
`n` specifies how many to return – `best_of` must be greater than `n`.
|
| 41 |
+
|
| 42 |
+
**Note:** Because this parameter generates many completions, it can quickly
|
| 43 |
+
consume your token quota. Use carefully and ensure that you have reasonable
|
| 44 |
+
settings for `max_tokens` and `stop`.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
echo: Optional[bool]
|
| 48 |
+
"""Echo back the prompt in addition to the completion"""
|
| 49 |
+
|
| 50 |
+
frequency_penalty: Optional[float]
|
| 51 |
+
"""Number between -2.0 and 2.0.
|
| 52 |
+
|
| 53 |
+
Positive values penalize new tokens based on their existing frequency in the
|
| 54 |
+
text so far, decreasing the model's likelihood to repeat the same line verbatim.
|
| 55 |
+
|
| 56 |
+
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
logit_bias: Optional[Dict[str, int]]
|
| 60 |
+
"""Modify the likelihood of specified tokens appearing in the completion.
|
| 61 |
+
|
| 62 |
+
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
|
| 63 |
+
tokenizer) to an associated bias value from -100 to 100. You can use this
|
| 64 |
+
[tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
|
| 65 |
+
Mathematically, the bias is added to the logits generated by the model prior to
|
| 66 |
+
sampling. The exact effect will vary per model, but values between -1 and 1
|
| 67 |
+
should decrease or increase likelihood of selection; values like -100 or 100
|
| 68 |
+
should result in a ban or exclusive selection of the relevant token.
|
| 69 |
+
|
| 70 |
+
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
|
| 71 |
+
from being generated.
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
logprobs: Optional[int]
|
| 75 |
+
"""
|
| 76 |
+
Include the log probabilities on the `logprobs` most likely output tokens, as
|
| 77 |
+
well the chosen tokens. For example, if `logprobs` is 5, the API will return a
|
| 78 |
+
list of the 5 most likely tokens. The API will always return the `logprob` of
|
| 79 |
+
the sampled token, so there may be up to `logprobs+1` elements in the response.
|
| 80 |
+
|
| 81 |
+
The maximum value for `logprobs` is 5.
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
max_tokens: Optional[int]
|
| 85 |
+
"""
|
| 86 |
+
The maximum number of [tokens](/tokenizer) that can be generated in the
|
| 87 |
+
completion.
|
| 88 |
+
|
| 89 |
+
The token count of your prompt plus `max_tokens` cannot exceed the model's
|
| 90 |
+
context length.
|
| 91 |
+
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
|
| 92 |
+
for counting tokens.
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
n: Optional[int]
|
| 96 |
+
"""How many completions to generate for each prompt.
|
| 97 |
+
|
| 98 |
+
**Note:** Because this parameter generates many completions, it can quickly
|
| 99 |
+
consume your token quota. Use carefully and ensure that you have reasonable
|
| 100 |
+
settings for `max_tokens` and `stop`.
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
presence_penalty: Optional[float]
|
| 104 |
+
"""Number between -2.0 and 2.0.
|
| 105 |
+
|
| 106 |
+
Positive values penalize new tokens based on whether they appear in the text so
|
| 107 |
+
far, increasing the model's likelihood to talk about new topics.
|
| 108 |
+
|
| 109 |
+
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
|
| 110 |
+
"""
|
| 111 |
+
|
| 112 |
+
seed: Optional[int]
|
| 113 |
+
"""
|
| 114 |
+
If specified, our system will make a best effort to sample deterministically,
|
| 115 |
+
such that repeated requests with the same `seed` and parameters should return
|
| 116 |
+
the same result.
|
| 117 |
+
|
| 118 |
+
Determinism is not guaranteed, and you should refer to the `system_fingerprint`
|
| 119 |
+
response parameter to monitor changes in the backend.
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
stop: Union[Optional[str], List[str], None]
|
| 123 |
+
"""Up to 4 sequences where the API will stop generating further tokens.
|
| 124 |
+
|
| 125 |
+
The returned text will not contain the stop sequence.
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
stream_options: Optional[ChatCompletionStreamOptionsParam]
|
| 129 |
+
"""Options for streaming response. Only set this when you set `stream: true`."""
|
| 130 |
+
|
| 131 |
+
suffix: Optional[str]
|
| 132 |
+
"""The suffix that comes after a completion of inserted text.
|
| 133 |
+
|
| 134 |
+
This parameter is only supported for `gpt-3.5-turbo-instruct`.
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
temperature: Optional[float]
|
| 138 |
+
"""What sampling temperature to use, between 0 and 2.
|
| 139 |
+
|
| 140 |
+
Higher values like 0.8 will make the output more random, while lower values like
|
| 141 |
+
0.2 will make it more focused and deterministic.
|
| 142 |
+
|
| 143 |
+
We generally recommend altering this or `top_p` but not both.
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
top_p: Optional[float]
|
| 147 |
+
"""
|
| 148 |
+
An alternative to sampling with temperature, called nucleus sampling, where the
|
| 149 |
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
| 150 |
+
means only the tokens comprising the top 10% probability mass are considered.
|
| 151 |
+
|
| 152 |
+
We generally recommend altering this or `temperature` but not both.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
user: str
|
| 156 |
+
"""
|
| 157 |
+
A unique identifier representing your end-user, which can help OpenAI to monitor
|
| 158 |
+
and detect abuse.
|
| 159 |
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
|
| 164 |
+
stream: Optional[Literal[False]]
|
| 165 |
+
"""Whether to stream back partial progress.
|
| 166 |
+
|
| 167 |
+
If set, tokens will be sent as data-only
|
| 168 |
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
| 169 |
+
as they become available, with the stream terminated by a `data: [DONE]`
|
| 170 |
+
message.
|
| 171 |
+
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
| 172 |
+
"""
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
|
| 176 |
+
stream: Required[Literal[True]]
|
| 177 |
+
"""Whether to stream back partial progress.
|
| 178 |
+
|
| 179 |
+
If set, tokens will be sent as data-only
|
| 180 |
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
| 181 |
+
as they become available, with the stream terminated by a `data: [DONE]`
|
| 182 |
+
message.
|
| 183 |
+
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
|
.venv/lib/python3.11/site-packages/openai/types/completion_usage.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
| 2 |
+
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from .._models import BaseModel
|
| 6 |
+
|
| 7 |
+
__all__ = ["CompletionUsage", "CompletionTokensDetails", "PromptTokensDetails"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class CompletionTokensDetails(BaseModel):
|
| 11 |
+
accepted_prediction_tokens: Optional[int] = None
|
| 12 |
+
"""
|
| 13 |
+
When using Predicted Outputs, the number of tokens in the prediction that
|
| 14 |
+
appeared in the completion.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
audio_tokens: Optional[int] = None
|
| 18 |
+
"""Audio input tokens generated by the model."""
|
| 19 |
+
|
| 20 |
+
reasoning_tokens: Optional[int] = None
|
| 21 |
+
"""Tokens generated by the model for reasoning."""
|
| 22 |
+
|
| 23 |
+
rejected_prediction_tokens: Optional[int] = None
|
| 24 |
+
"""
|
| 25 |
+
When using Predicted Outputs, the number of tokens in the prediction that did
|
| 26 |
+
not appear in the completion. However, like reasoning tokens, these tokens are
|
| 27 |
+
still counted in the total completion tokens for purposes of billing, output,
|
| 28 |
+
and context window limits.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class PromptTokensDetails(BaseModel):
|
| 33 |
+
audio_tokens: Optional[int] = None
|
| 34 |
+
"""Audio input tokens present in the prompt."""
|
| 35 |
+
|
| 36 |
+
cached_tokens: Optional[int] = None
|
| 37 |
+
"""Cached tokens present in the prompt."""
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class CompletionUsage(BaseModel):
|
| 41 |
+
completion_tokens: int
|
| 42 |
+
"""Number of tokens in the generated completion."""
|
| 43 |
+
|
| 44 |
+
prompt_tokens: int
|
| 45 |
+
"""Number of tokens in the prompt."""
|
| 46 |
+
|
| 47 |
+
total_tokens: int
|
| 48 |
+
"""Total number of tokens used in the request (prompt + completion)."""
|
| 49 |
+
|
| 50 |
+
completion_tokens_details: Optional[CompletionTokensDetails] = None
|
| 51 |
+
"""Breakdown of tokens used in a completion."""
|
| 52 |
+
|
| 53 |
+
prompt_tokens_details: Optional[PromptTokensDetails] = None
|
| 54 |
+
"""Breakdown of tokens used in the prompt."""
|