koichi12 commited on
Commit
ad4b7dc
·
verified ·
1 Parent(s): 1dc2eab

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/openai/__pycache__/__init__.cpython-311.pyc +0 -0
  2. .venv/lib/python3.11/site-packages/openai/__pycache__/_compat.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/openai/__pycache__/_constants.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/openai/__pycache__/_exceptions.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/openai/__pycache__/_files.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/openai/__pycache__/_models.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/openai/__pycache__/_module_client.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/openai/__pycache__/_qs.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/openai/__pycache__/_response.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/openai/__pycache__/_types.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/openai/__pycache__/pagination.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/__init__.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_logs.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_proxy.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_reflection.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_streams.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_sync.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_transform.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_typing.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_utils.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/openai/_utils/_logs.py +42 -0
  22. .venv/lib/python3.11/site-packages/openai/_utils/_reflection.py +45 -0
  23. .venv/lib/python3.11/site-packages/openai/_utils/_streams.py +12 -0
  24. .venv/lib/python3.11/site-packages/openai/_utils/_sync.py +70 -0
  25. .venv/lib/python3.11/site-packages/openai/_utils/_transform.py +392 -0
  26. .venv/lib/python3.11/site-packages/openai/_utils/_typing.py +149 -0
  27. .venv/lib/python3.11/site-packages/openai/_utils/_utils.py +430 -0
  28. .venv/lib/python3.11/site-packages/openai/cli/__init__.py +1 -0
  29. .venv/lib/python3.11/site-packages/openai/cli/__pycache__/__init__.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/openai/cli/__pycache__/_cli.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/openai/cli/__pycache__/_errors.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/openai/cli/__pycache__/_models.cpython-311.pyc +0 -0
  33. .venv/lib/python3.11/site-packages/openai/cli/__pycache__/_progress.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/openai/cli/__pycache__/_utils.cpython-311.pyc +0 -0
  35. .venv/lib/python3.11/site-packages/openai/cli/_api/_main.py +16 -0
  36. .venv/lib/python3.11/site-packages/openai/cli/_api/audio.py +108 -0
  37. .venv/lib/python3.11/site-packages/openai/cli/_api/chat/__pycache__/__init__.cpython-311.pyc +0 -0
  38. .venv/lib/python3.11/site-packages/openai/cli/_api/chat/__pycache__/completions.cpython-311.pyc +0 -0
  39. .venv/lib/python3.11/site-packages/openai/cli/_api/completions.py +173 -0
  40. .venv/lib/python3.11/site-packages/openai/cli/_api/files.py +80 -0
  41. .venv/lib/python3.11/site-packages/openai/cli/_api/image.py +139 -0
  42. .venv/lib/python3.11/site-packages/openai/cli/_api/models.py +45 -0
  43. .venv/lib/python3.11/site-packages/openai/cli/_cli.py +233 -0
  44. .venv/lib/python3.11/site-packages/openai/cli/_errors.py +21 -0
  45. .venv/lib/python3.11/site-packages/openai/cli/_models.py +17 -0
  46. .venv/lib/python3.11/site-packages/openai/cli/_progress.py +59 -0
  47. .venv/lib/python3.11/site-packages/openai/cli/_tools/__init__.py +1 -0
  48. .venv/lib/python3.11/site-packages/openai/cli/_tools/__pycache__/__init__.cpython-311.pyc +0 -0
  49. .venv/lib/python3.11/site-packages/openai/cli/_tools/__pycache__/_main.cpython-311.pyc +0 -0
  50. .venv/lib/python3.11/site-packages/openai/cli/_tools/__pycache__/fine_tunes.cpython-311.pyc +0 -0
.venv/lib/python3.11/site-packages/openai/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (13.4 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_compat.cpython-311.pyc ADDED
Binary file (9.62 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_constants.cpython-311.pyc ADDED
Binary file (709 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_exceptions.cpython-311.pyc ADDED
Binary file (9.06 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_files.cpython-311.pyc ADDED
Binary file (7.48 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_models.cpython-311.pyc ADDED
Binary file (34.7 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_module_client.cpython-311.pyc ADDED
Binary file (7.17 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_qs.cpython-311.pyc ADDED
Binary file (6.71 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_response.cpython-311.pyc ADDED
Binary file (41.1 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/_types.cpython-311.pyc ADDED
Binary file (8.54 kB). View file
 
.venv/lib/python3.11/site-packages/openai/__pycache__/pagination.cpython-311.pyc ADDED
Binary file (4.59 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (2.33 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_logs.cpython-311.pyc ADDED
Binary file (2.81 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_proxy.cpython-311.pyc ADDED
Binary file (3.74 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_reflection.cpython-311.pyc ADDED
Binary file (2.39 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_streams.cpython-311.pyc ADDED
Binary file (829 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_sync.cpython-311.pyc ADDED
Binary file (3.27 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_transform.cpython-311.pyc ADDED
Binary file (14.9 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_typing.cpython-311.pyc ADDED
Binary file (5.75 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/__pycache__/_utils.cpython-311.pyc ADDED
Binary file (21 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_utils/_logs.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from typing_extensions import override
4
+
5
+ from ._utils import is_dict
6
+
7
+ logger: logging.Logger = logging.getLogger("openai")
8
+ httpx_logger: logging.Logger = logging.getLogger("httpx")
9
+
10
+
11
+ SENSITIVE_HEADERS = {"api-key", "authorization"}
12
+
13
+
14
+ def _basic_config() -> None:
15
+ # e.g. [2023-10-05 14:12:26 - openai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
16
+ logging.basicConfig(
17
+ format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
18
+ datefmt="%Y-%m-%d %H:%M:%S",
19
+ )
20
+
21
+
22
+ def setup_logging() -> None:
23
+ env = os.environ.get("OPENAI_LOG")
24
+ if env == "debug":
25
+ _basic_config()
26
+ logger.setLevel(logging.DEBUG)
27
+ httpx_logger.setLevel(logging.DEBUG)
28
+ elif env == "info":
29
+ _basic_config()
30
+ logger.setLevel(logging.INFO)
31
+ httpx_logger.setLevel(logging.INFO)
32
+
33
+
34
+ class SensitiveHeadersFilter(logging.Filter):
35
+ @override
36
+ def filter(self, record: logging.LogRecord) -> bool:
37
+ if is_dict(record.args) and "headers" in record.args and is_dict(record.args["headers"]):
38
+ headers = record.args["headers"] = {**record.args["headers"]}
39
+ for header in headers:
40
+ if str(header).lower() in SENSITIVE_HEADERS:
41
+ headers[header] = "<redacted>"
42
+ return True
.venv/lib/python3.11/site-packages/openai/_utils/_reflection.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ from typing import Any, Callable
5
+
6
+
7
+ def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool:
8
+ """Returns whether or not the given function has a specific parameter"""
9
+ sig = inspect.signature(func)
10
+ return arg_name in sig.parameters
11
+
12
+
13
+ def assert_signatures_in_sync(
14
+ source_func: Callable[..., Any],
15
+ check_func: Callable[..., Any],
16
+ *,
17
+ exclude_params: set[str] = set(),
18
+ description: str = "",
19
+ ) -> None:
20
+ """Ensure that the signature of the second function matches the first."""
21
+
22
+ check_sig = inspect.signature(check_func)
23
+ source_sig = inspect.signature(source_func)
24
+
25
+ errors: list[str] = []
26
+
27
+ for name, source_param in source_sig.parameters.items():
28
+ if name in exclude_params:
29
+ continue
30
+
31
+ custom_param = check_sig.parameters.get(name)
32
+ if not custom_param:
33
+ errors.append(f"the `{name}` param is missing")
34
+ continue
35
+
36
+ if custom_param.annotation != source_param.annotation:
37
+ errors.append(
38
+ f"types for the `{name}` param are do not match; source={repr(source_param.annotation)} checking={repr(custom_param.annotation)}"
39
+ )
40
+ continue
41
+
42
+ if errors:
43
+ raise AssertionError(
44
+ f"{len(errors)} errors encountered when comparing signatures{description}:\n\n" + "\n\n".join(errors)
45
+ )
.venv/lib/python3.11/site-packages/openai/_utils/_streams.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from typing_extensions import Iterator, AsyncIterator
3
+
4
+
5
+ def consume_sync_iterator(iterator: Iterator[Any]) -> None:
6
+ for _ in iterator:
7
+ ...
8
+
9
+
10
+ async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None:
11
+ async for _ in iterator:
12
+ ...
.venv/lib/python3.11/site-packages/openai/_utils/_sync.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ import asyncio
5
+ import functools
6
+ import contextvars
7
+ from typing import Any, TypeVar, Callable, Awaitable
8
+ from typing_extensions import ParamSpec
9
+
10
+ T_Retval = TypeVar("T_Retval")
11
+ T_ParamSpec = ParamSpec("T_ParamSpec")
12
+
13
+
14
+ if sys.version_info >= (3, 9):
15
+ to_thread = asyncio.to_thread
16
+ else:
17
+ # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
18
+ # for Python 3.8 support
19
+ async def to_thread(
20
+ func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
21
+ ) -> Any:
22
+ """Asynchronously run function *func* in a separate thread.
23
+
24
+ Any *args and **kwargs supplied for this function are directly passed
25
+ to *func*. Also, the current :class:`contextvars.Context` is propagated,
26
+ allowing context variables from the main thread to be accessed in the
27
+ separate thread.
28
+
29
+ Returns a coroutine that can be awaited to get the eventual result of *func*.
30
+ """
31
+ loop = asyncio.events.get_running_loop()
32
+ ctx = contextvars.copy_context()
33
+ func_call = functools.partial(ctx.run, func, *args, **kwargs)
34
+ return await loop.run_in_executor(None, func_call)
35
+
36
+
37
+ # inspired by `asyncer`, https://github.com/tiangolo/asyncer
38
+ def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
39
+ """
40
+ Take a blocking function and create an async one that receives the same
41
+ positional and keyword arguments. For python version 3.9 and above, it uses
42
+ asyncio.to_thread to run the function in a separate thread. For python version
43
+ 3.8, it uses locally defined copy of the asyncio.to_thread function which was
44
+ introduced in python 3.9.
45
+
46
+ Usage:
47
+
48
+ ```python
49
+ def blocking_func(arg1, arg2, kwarg1=None):
50
+ # blocking code
51
+ return result
52
+
53
+ result = asyncify(blocking_function)(arg1, arg2, kwarg1=value1)
54
+ ```
55
+
56
+ ## Arguments
57
+
58
+ `function`: a blocking regular callable (e.g. a function)
59
+
60
+ ## Return
61
+
62
+ An async function that takes the same positional and keyword arguments as the
63
+ original one, that when called runs the same original function in a thread worker
64
+ and returns the result.
65
+ """
66
+
67
+ async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval:
68
+ return await to_thread(function, *args, **kwargs)
69
+
70
+ return wrapper
.venv/lib/python3.11/site-packages/openai/_utils/_transform.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import base64
5
+ import pathlib
6
+ from typing import Any, Mapping, TypeVar, cast
7
+ from datetime import date, datetime
8
+ from typing_extensions import Literal, get_args, override, get_type_hints
9
+
10
+ import anyio
11
+ import pydantic
12
+
13
+ from ._utils import (
14
+ is_list,
15
+ is_mapping,
16
+ is_iterable,
17
+ )
18
+ from .._files import is_base64_file_input
19
+ from ._typing import (
20
+ is_list_type,
21
+ is_union_type,
22
+ extract_type_arg,
23
+ is_iterable_type,
24
+ is_required_type,
25
+ is_annotated_type,
26
+ strip_annotated_type,
27
+ )
28
+ from .._compat import model_dump, is_typeddict
29
+
30
+ _T = TypeVar("_T")
31
+
32
+
33
+ # TODO: support for drilling globals() and locals()
34
+ # TODO: ensure works correctly with forward references in all cases
35
+
36
+
37
+ PropertyFormat = Literal["iso8601", "base64", "custom"]
38
+
39
+
40
+ class PropertyInfo:
41
+ """Metadata class to be used in Annotated types to provide information about a given type.
42
+
43
+ For example:
44
+
45
+ class MyParams(TypedDict):
46
+ account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
47
+
48
+ This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
49
+ """
50
+
51
+ alias: str | None
52
+ format: PropertyFormat | None
53
+ format_template: str | None
54
+ discriminator: str | None
55
+
56
+ def __init__(
57
+ self,
58
+ *,
59
+ alias: str | None = None,
60
+ format: PropertyFormat | None = None,
61
+ format_template: str | None = None,
62
+ discriminator: str | None = None,
63
+ ) -> None:
64
+ self.alias = alias
65
+ self.format = format
66
+ self.format_template = format_template
67
+ self.discriminator = discriminator
68
+
69
+ @override
70
+ def __repr__(self) -> str:
71
+ return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')"
72
+
73
+
74
+ def maybe_transform(
75
+ data: object,
76
+ expected_type: object,
77
+ ) -> Any | None:
78
+ """Wrapper over `transform()` that allows `None` to be passed.
79
+
80
+ See `transform()` for more details.
81
+ """
82
+ if data is None:
83
+ return None
84
+ return transform(data, expected_type)
85
+
86
+
87
+ # Wrapper over _transform_recursive providing fake types
88
+ def transform(
89
+ data: _T,
90
+ expected_type: object,
91
+ ) -> _T:
92
+ """Transform dictionaries based off of type information from the given type, for example:
93
+
94
+ ```py
95
+ class Params(TypedDict, total=False):
96
+ card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
97
+
98
+
99
+ transformed = transform({"card_id": "<my card ID>"}, Params)
100
+ # {'cardID': '<my card ID>'}
101
+ ```
102
+
103
+ Any keys / data that does not have type information given will be included as is.
104
+
105
+ It should be noted that the transformations that this function does are not represented in the type system.
106
+ """
107
+ transformed = _transform_recursive(data, annotation=cast(type, expected_type))
108
+ return cast(_T, transformed)
109
+
110
+
111
+ def _get_annotated_type(type_: type) -> type | None:
112
+ """If the given type is an `Annotated` type then it is returned, if not `None` is returned.
113
+
114
+ This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
115
+ """
116
+ if is_required_type(type_):
117
+ # Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
118
+ type_ = get_args(type_)[0]
119
+
120
+ if is_annotated_type(type_):
121
+ return type_
122
+
123
+ return None
124
+
125
+
126
+ def _maybe_transform_key(key: str, type_: type) -> str:
127
+ """Transform the given `data` based on the annotations provided in `type_`.
128
+
129
+ Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
130
+ """
131
+ annotated_type = _get_annotated_type(type_)
132
+ if annotated_type is None:
133
+ # no `Annotated` definition for this type, no transformation needed
134
+ return key
135
+
136
+ # ignore the first argument as it is the actual type
137
+ annotations = get_args(annotated_type)[1:]
138
+ for annotation in annotations:
139
+ if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
140
+ return annotation.alias
141
+
142
+ return key
143
+
144
+
145
+ def _transform_recursive(
146
+ data: object,
147
+ *,
148
+ annotation: type,
149
+ inner_type: type | None = None,
150
+ ) -> object:
151
+ """Transform the given data against the expected type.
152
+
153
+ Args:
154
+ annotation: The direct type annotation given to the particular piece of data.
155
+ This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
156
+
157
+ inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
158
+ is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
159
+ the list can be transformed using the metadata from the container type.
160
+
161
+ Defaults to the same value as the `annotation` argument.
162
+ """
163
+ if inner_type is None:
164
+ inner_type = annotation
165
+
166
+ stripped_type = strip_annotated_type(inner_type)
167
+ if is_typeddict(stripped_type) and is_mapping(data):
168
+ return _transform_typeddict(data, stripped_type)
169
+
170
+ if (
171
+ # List[T]
172
+ (is_list_type(stripped_type) and is_list(data))
173
+ # Iterable[T]
174
+ or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
175
+ ):
176
+ # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
177
+ # intended as an iterable, so we don't transform it.
178
+ if isinstance(data, dict):
179
+ return cast(object, data)
180
+
181
+ inner_type = extract_type_arg(stripped_type, 0)
182
+ return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
183
+
184
+ if is_union_type(stripped_type):
185
+ # For union types we run the transformation against all subtypes to ensure that everything is transformed.
186
+ #
187
+ # TODO: there may be edge cases where the same normalized field name will transform to two different names
188
+ # in different subtypes.
189
+ for subtype in get_args(stripped_type):
190
+ data = _transform_recursive(data, annotation=annotation, inner_type=subtype)
191
+ return data
192
+
193
+ if isinstance(data, pydantic.BaseModel):
194
+ return model_dump(data, exclude_unset=True, mode="json")
195
+
196
+ annotated_type = _get_annotated_type(annotation)
197
+ if annotated_type is None:
198
+ return data
199
+
200
+ # ignore the first argument as it is the actual type
201
+ annotations = get_args(annotated_type)[1:]
202
+ for annotation in annotations:
203
+ if isinstance(annotation, PropertyInfo) and annotation.format is not None:
204
+ return _format_data(data, annotation.format, annotation.format_template)
205
+
206
+ return data
207
+
208
+
209
+ def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
210
+ if isinstance(data, (date, datetime)):
211
+ if format_ == "iso8601":
212
+ return data.isoformat()
213
+
214
+ if format_ == "custom" and format_template is not None:
215
+ return data.strftime(format_template)
216
+
217
+ if format_ == "base64" and is_base64_file_input(data):
218
+ binary: str | bytes | None = None
219
+
220
+ if isinstance(data, pathlib.Path):
221
+ binary = data.read_bytes()
222
+ elif isinstance(data, io.IOBase):
223
+ binary = data.read()
224
+
225
+ if isinstance(binary, str): # type: ignore[unreachable]
226
+ binary = binary.encode()
227
+
228
+ if not isinstance(binary, bytes):
229
+ raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
230
+
231
+ return base64.b64encode(binary).decode("ascii")
232
+
233
+ return data
234
+
235
+
236
+ def _transform_typeddict(
237
+ data: Mapping[str, object],
238
+ expected_type: type,
239
+ ) -> Mapping[str, object]:
240
+ result: dict[str, object] = {}
241
+ annotations = get_type_hints(expected_type, include_extras=True)
242
+ for key, value in data.items():
243
+ type_ = annotations.get(key)
244
+ if type_ is None:
245
+ # we do not have a type annotation for this field, leave it as is
246
+ result[key] = value
247
+ else:
248
+ result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_)
249
+ return result
250
+
251
+
252
+ async def async_maybe_transform(
253
+ data: object,
254
+ expected_type: object,
255
+ ) -> Any | None:
256
+ """Wrapper over `async_transform()` that allows `None` to be passed.
257
+
258
+ See `async_transform()` for more details.
259
+ """
260
+ if data is None:
261
+ return None
262
+ return await async_transform(data, expected_type)
263
+
264
+
265
+ async def async_transform(
266
+ data: _T,
267
+ expected_type: object,
268
+ ) -> _T:
269
+ """Transform dictionaries based off of type information from the given type, for example:
270
+
271
+ ```py
272
+ class Params(TypedDict, total=False):
273
+ card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]]
274
+
275
+
276
+ transformed = transform({"card_id": "<my card ID>"}, Params)
277
+ # {'cardID': '<my card ID>'}
278
+ ```
279
+
280
+ Any keys / data that does not have type information given will be included as is.
281
+
282
+ It should be noted that the transformations that this function does are not represented in the type system.
283
+ """
284
+ transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type))
285
+ return cast(_T, transformed)
286
+
287
+
288
+ async def _async_transform_recursive(
289
+ data: object,
290
+ *,
291
+ annotation: type,
292
+ inner_type: type | None = None,
293
+ ) -> object:
294
+ """Transform the given data against the expected type.
295
+
296
+ Args:
297
+ annotation: The direct type annotation given to the particular piece of data.
298
+ This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
299
+
300
+ inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
301
+ is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
302
+ the list can be transformed using the metadata from the container type.
303
+
304
+ Defaults to the same value as the `annotation` argument.
305
+ """
306
+ if inner_type is None:
307
+ inner_type = annotation
308
+
309
+ stripped_type = strip_annotated_type(inner_type)
310
+ if is_typeddict(stripped_type) and is_mapping(data):
311
+ return await _async_transform_typeddict(data, stripped_type)
312
+
313
+ if (
314
+ # List[T]
315
+ (is_list_type(stripped_type) and is_list(data))
316
+ # Iterable[T]
317
+ or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str))
318
+ ):
319
+ # dicts are technically iterable, but it is an iterable on the keys of the dict and is not usually
320
+ # intended as an iterable, so we don't transform it.
321
+ if isinstance(data, dict):
322
+ return cast(object, data)
323
+
324
+ inner_type = extract_type_arg(stripped_type, 0)
325
+ return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data]
326
+
327
+ if is_union_type(stripped_type):
328
+ # For union types we run the transformation against all subtypes to ensure that everything is transformed.
329
+ #
330
+ # TODO: there may be edge cases where the same normalized field name will transform to two different names
331
+ # in different subtypes.
332
+ for subtype in get_args(stripped_type):
333
+ data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype)
334
+ return data
335
+
336
+ if isinstance(data, pydantic.BaseModel):
337
+ return model_dump(data, exclude_unset=True, mode="json")
338
+
339
+ annotated_type = _get_annotated_type(annotation)
340
+ if annotated_type is None:
341
+ return data
342
+
343
+ # ignore the first argument as it is the actual type
344
+ annotations = get_args(annotated_type)[1:]
345
+ for annotation in annotations:
346
+ if isinstance(annotation, PropertyInfo) and annotation.format is not None:
347
+ return await _async_format_data(data, annotation.format, annotation.format_template)
348
+
349
+ return data
350
+
351
+
352
+ async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object:
353
+ if isinstance(data, (date, datetime)):
354
+ if format_ == "iso8601":
355
+ return data.isoformat()
356
+
357
+ if format_ == "custom" and format_template is not None:
358
+ return data.strftime(format_template)
359
+
360
+ if format_ == "base64" and is_base64_file_input(data):
361
+ binary: str | bytes | None = None
362
+
363
+ if isinstance(data, pathlib.Path):
364
+ binary = await anyio.Path(data).read_bytes()
365
+ elif isinstance(data, io.IOBase):
366
+ binary = data.read()
367
+
368
+ if isinstance(binary, str): # type: ignore[unreachable]
369
+ binary = binary.encode()
370
+
371
+ if not isinstance(binary, bytes):
372
+ raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}")
373
+
374
+ return base64.b64encode(binary).decode("ascii")
375
+
376
+ return data
377
+
378
+
379
+ async def _async_transform_typeddict(
380
+ data: Mapping[str, object],
381
+ expected_type: type,
382
+ ) -> Mapping[str, object]:
383
+ result: dict[str, object] = {}
384
+ annotations = get_type_hints(expected_type, include_extras=True)
385
+ for key, value in data.items():
386
+ type_ = annotations.get(key)
387
+ if type_ is None:
388
+ # we do not have a type annotation for this field, leave it as is
389
+ result[key] = value
390
+ else:
391
+ result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_)
392
+ return result
.venv/lib/python3.11/site-packages/openai/_utils/_typing.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ import typing
5
+ import typing_extensions
6
+ from typing import Any, TypeVar, Iterable, cast
7
+ from collections import abc as _c_abc
8
+ from typing_extensions import (
9
+ TypeIs,
10
+ Required,
11
+ Annotated,
12
+ get_args,
13
+ get_origin,
14
+ )
15
+
16
+ from .._types import InheritsGeneric
17
+ from .._compat import is_union as _is_union
18
+
19
+
20
+ def is_annotated_type(typ: type) -> bool:
21
+ return get_origin(typ) == Annotated
22
+
23
+
24
+ def is_list_type(typ: type) -> bool:
25
+ return (get_origin(typ) or typ) == list
26
+
27
+
28
+ def is_iterable_type(typ: type) -> bool:
29
+ """If the given type is `typing.Iterable[T]`"""
30
+ origin = get_origin(typ) or typ
31
+ return origin == Iterable or origin == _c_abc.Iterable
32
+
33
+
34
+ def is_union_type(typ: type) -> bool:
35
+ return _is_union(get_origin(typ))
36
+
37
+
38
+ def is_required_type(typ: type) -> bool:
39
+ return get_origin(typ) == Required
40
+
41
+
42
+ def is_typevar(typ: type) -> bool:
43
+ # type ignore is required because type checkers
44
+ # think this expression will always return False
45
+ return type(typ) == TypeVar # type: ignore
46
+
47
+
48
+ _TYPE_ALIAS_TYPES: tuple[type[typing_extensions.TypeAliasType], ...] = (typing_extensions.TypeAliasType,)
49
+ if sys.version_info >= (3, 12):
50
+ _TYPE_ALIAS_TYPES = (*_TYPE_ALIAS_TYPES, typing.TypeAliasType)
51
+
52
+
53
+ def is_type_alias_type(tp: Any, /) -> TypeIs[typing_extensions.TypeAliasType]:
54
+ """Return whether the provided argument is an instance of `TypeAliasType`.
55
+
56
+ ```python
57
+ type Int = int
58
+ is_type_alias_type(Int)
59
+ # > True
60
+ Str = TypeAliasType("Str", str)
61
+ is_type_alias_type(Str)
62
+ # > True
63
+ ```
64
+ """
65
+ return isinstance(tp, _TYPE_ALIAS_TYPES)
66
+
67
+
68
+ # Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]]
69
+ def strip_annotated_type(typ: type) -> type:
70
+ if is_required_type(typ) or is_annotated_type(typ):
71
+ return strip_annotated_type(cast(type, get_args(typ)[0]))
72
+
73
+ return typ
74
+
75
+
76
+ def extract_type_arg(typ: type, index: int) -> type:
77
+ args = get_args(typ)
78
+ try:
79
+ return cast(type, args[index])
80
+ except IndexError as err:
81
+ raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err
82
+
83
+
84
+ def extract_type_var_from_base(
85
+ typ: type,
86
+ *,
87
+ generic_bases: tuple[type, ...],
88
+ index: int,
89
+ failure_message: str | None = None,
90
+ ) -> type:
91
+ """Given a type like `Foo[T]`, returns the generic type variable `T`.
92
+
93
+ This also handles the case where a concrete subclass is given, e.g.
94
+ ```py
95
+ class MyResponse(Foo[bytes]):
96
+ ...
97
+
98
+ extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes
99
+ ```
100
+
101
+ And where a generic subclass is given:
102
+ ```py
103
+ _T = TypeVar('_T')
104
+ class MyResponse(Foo[_T]):
105
+ ...
106
+
107
+ extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes
108
+ ```
109
+ """
110
+ cls = cast(object, get_origin(typ) or typ)
111
+ if cls in generic_bases:
112
+ # we're given the class directly
113
+ return extract_type_arg(typ, index)
114
+
115
+ # if a subclass is given
116
+ # ---
117
+ # this is needed as __orig_bases__ is not present in the typeshed stubs
118
+ # because it is intended to be for internal use only, however there does
119
+ # not seem to be a way to resolve generic TypeVars for inherited subclasses
120
+ # without using it.
121
+ if isinstance(cls, InheritsGeneric):
122
+ target_base_class: Any | None = None
123
+ for base in cls.__orig_bases__:
124
+ if base.__origin__ in generic_bases:
125
+ target_base_class = base
126
+ break
127
+
128
+ if target_base_class is None:
129
+ raise RuntimeError(
130
+ "Could not find the generic base class;\n"
131
+ "This should never happen;\n"
132
+ f"Does {cls} inherit from one of {generic_bases} ?"
133
+ )
134
+
135
+ extracted = extract_type_arg(target_base_class, index)
136
+ if is_typevar(extracted):
137
+ # If the extracted type argument is itself a type variable
138
+ # then that means the subclass itself is generic, so we have
139
+ # to resolve the type argument from the class itself, not
140
+ # the base class.
141
+ #
142
+ # Note: if there is more than 1 type argument, the subclass could
143
+ # change the ordering of the type arguments, this is not currently
144
+ # supported.
145
+ return extract_type_arg(typ, index)
146
+
147
+ return extracted
148
+
149
+ raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}")
.venv/lib/python3.11/site-packages/openai/_utils/_utils.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import re
5
+ import inspect
6
+ import functools
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Tuple,
11
+ Mapping,
12
+ TypeVar,
13
+ Callable,
14
+ Iterable,
15
+ Sequence,
16
+ cast,
17
+ overload,
18
+ )
19
+ from pathlib import Path
20
+ from datetime import date, datetime
21
+ from typing_extensions import TypeGuard
22
+
23
+ import sniffio
24
+
25
+ from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
26
+ from .._compat import parse_date as parse_date, parse_datetime as parse_datetime
27
+
28
+ _T = TypeVar("_T")
29
+ _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
30
+ _MappingT = TypeVar("_MappingT", bound=Mapping[str, object])
31
+ _SequenceT = TypeVar("_SequenceT", bound=Sequence[object])
32
+ CallableT = TypeVar("CallableT", bound=Callable[..., Any])
33
+
34
+ if TYPE_CHECKING:
35
+ from ..lib.azure import AzureOpenAI, AsyncAzureOpenAI
36
+
37
+
38
+ def flatten(t: Iterable[Iterable[_T]]) -> list[_T]:
39
+ return [item for sublist in t for item in sublist]
40
+
41
+
42
+ def extract_files(
43
+ # TODO: this needs to take Dict but variance issues.....
44
+ # create protocol type ?
45
+ query: Mapping[str, object],
46
+ *,
47
+ paths: Sequence[Sequence[str]],
48
+ ) -> list[tuple[str, FileTypes]]:
49
+ """Recursively extract files from the given dictionary based on specified paths.
50
+
51
+ A path may look like this ['foo', 'files', '<array>', 'data'].
52
+
53
+ Note: this mutates the given dictionary.
54
+ """
55
+ files: list[tuple[str, FileTypes]] = []
56
+ for path in paths:
57
+ files.extend(_extract_items(query, path, index=0, flattened_key=None))
58
+ return files
59
+
60
+
61
+ def _extract_items(
62
+ obj: object,
63
+ path: Sequence[str],
64
+ *,
65
+ index: int,
66
+ flattened_key: str | None,
67
+ ) -> list[tuple[str, FileTypes]]:
68
+ try:
69
+ key = path[index]
70
+ except IndexError:
71
+ if isinstance(obj, NotGiven):
72
+ # no value was provided - we can safely ignore
73
+ return []
74
+
75
+ # cyclical import
76
+ from .._files import assert_is_file_content
77
+
78
+ # We have exhausted the path, return the entry we found.
79
+ assert_is_file_content(obj, key=flattened_key)
80
+ assert flattened_key is not None
81
+ return [(flattened_key, cast(FileTypes, obj))]
82
+
83
+ index += 1
84
+ if is_dict(obj):
85
+ try:
86
+ # We are at the last entry in the path so we must remove the field
87
+ if (len(path)) == index:
88
+ item = obj.pop(key)
89
+ else:
90
+ item = obj[key]
91
+ except KeyError:
92
+ # Key was not present in the dictionary, this is not indicative of an error
93
+ # as the given path may not point to a required field. We also do not want
94
+ # to enforce required fields as the API may differ from the spec in some cases.
95
+ return []
96
+ if flattened_key is None:
97
+ flattened_key = key
98
+ else:
99
+ flattened_key += f"[{key}]"
100
+ return _extract_items(
101
+ item,
102
+ path,
103
+ index=index,
104
+ flattened_key=flattened_key,
105
+ )
106
+ elif is_list(obj):
107
+ if key != "<array>":
108
+ return []
109
+
110
+ return flatten(
111
+ [
112
+ _extract_items(
113
+ item,
114
+ path,
115
+ index=index,
116
+ flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
117
+ )
118
+ for item in obj
119
+ ]
120
+ )
121
+
122
+ # Something unexpected was passed, just ignore it.
123
+ return []
124
+
125
+
126
+ def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]:
127
+ return not isinstance(obj, NotGiven)
128
+
129
+
130
+ # Type safe methods for narrowing types with TypeVars.
131
+ # The default narrowing for isinstance(obj, dict) is dict[unknown, unknown],
132
+ # however this cause Pyright to rightfully report errors. As we know we don't
133
+ # care about the contained types we can safely use `object` in it's place.
134
+ #
135
+ # There are two separate functions defined, `is_*` and `is_*_t` for different use cases.
136
+ # `is_*` is for when you're dealing with an unknown input
137
+ # `is_*_t` is for when you're narrowing a known union type to a specific subset
138
+
139
+
140
+ def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]:
141
+ return isinstance(obj, tuple)
142
+
143
+
144
+ def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]:
145
+ return isinstance(obj, tuple)
146
+
147
+
148
+ def is_sequence(obj: object) -> TypeGuard[Sequence[object]]:
149
+ return isinstance(obj, Sequence)
150
+
151
+
152
+ def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]:
153
+ return isinstance(obj, Sequence)
154
+
155
+
156
+ def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]:
157
+ return isinstance(obj, Mapping)
158
+
159
+
160
+ def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]:
161
+ return isinstance(obj, Mapping)
162
+
163
+
164
+ def is_dict(obj: object) -> TypeGuard[dict[object, object]]:
165
+ return isinstance(obj, dict)
166
+
167
+
168
+ def is_list(obj: object) -> TypeGuard[list[object]]:
169
+ return isinstance(obj, list)
170
+
171
+
172
+ def is_iterable(obj: object) -> TypeGuard[Iterable[object]]:
173
+ return isinstance(obj, Iterable)
174
+
175
+
176
+ def deepcopy_minimal(item: _T) -> _T:
177
+ """Minimal reimplementation of copy.deepcopy() that will only copy certain object types:
178
+
179
+ - mappings, e.g. `dict`
180
+ - list
181
+
182
+ This is done for performance reasons.
183
+ """
184
+ if is_mapping(item):
185
+ return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()})
186
+ if is_list(item):
187
+ return cast(_T, [deepcopy_minimal(entry) for entry in item])
188
+ return item
189
+
190
+
191
+ # copied from https://github.com/Rapptz/RoboDanny
192
+ def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str:
193
+ size = len(seq)
194
+ if size == 0:
195
+ return ""
196
+
197
+ if size == 1:
198
+ return seq[0]
199
+
200
+ if size == 2:
201
+ return f"{seq[0]} {final} {seq[1]}"
202
+
203
+ return delim.join(seq[:-1]) + f" {final} {seq[-1]}"
204
+
205
+
206
+ def quote(string: str) -> str:
207
+ """Add single quotation marks around the given string. Does *not* do any escaping."""
208
+ return f"'{string}'"
209
+
210
+
211
+ def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]:
212
+ """Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function.
213
+
214
+ Useful for enforcing runtime validation of overloaded functions.
215
+
216
+ Example usage:
217
+ ```py
218
+ @overload
219
+ def foo(*, a: str) -> str: ...
220
+
221
+
222
+ @overload
223
+ def foo(*, b: bool) -> str: ...
224
+
225
+
226
+ # This enforces the same constraints that a static type checker would
227
+ # i.e. that either a or b must be passed to the function
228
+ @required_args(["a"], ["b"])
229
+ def foo(*, a: str | None = None, b: bool | None = None) -> str: ...
230
+ ```
231
+ """
232
+
233
+ def inner(func: CallableT) -> CallableT:
234
+ params = inspect.signature(func).parameters
235
+ positional = [
236
+ name
237
+ for name, param in params.items()
238
+ if param.kind
239
+ in {
240
+ param.POSITIONAL_ONLY,
241
+ param.POSITIONAL_OR_KEYWORD,
242
+ }
243
+ ]
244
+
245
+ @functools.wraps(func)
246
+ def wrapper(*args: object, **kwargs: object) -> object:
247
+ given_params: set[str] = set()
248
+ for i, _ in enumerate(args):
249
+ try:
250
+ given_params.add(positional[i])
251
+ except IndexError:
252
+ raise TypeError(
253
+ f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given"
254
+ ) from None
255
+
256
+ for key in kwargs.keys():
257
+ given_params.add(key)
258
+
259
+ for variant in variants:
260
+ matches = all((param in given_params for param in variant))
261
+ if matches:
262
+ break
263
+ else: # no break
264
+ if len(variants) > 1:
265
+ variations = human_join(
266
+ ["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants]
267
+ )
268
+ msg = f"Missing required arguments; Expected either {variations} arguments to be given"
269
+ else:
270
+ assert len(variants) > 0
271
+
272
+ # TODO: this error message is not deterministic
273
+ missing = list(set(variants[0]) - given_params)
274
+ if len(missing) > 1:
275
+ msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}"
276
+ else:
277
+ msg = f"Missing required argument: {quote(missing[0])}"
278
+ raise TypeError(msg)
279
+ return func(*args, **kwargs)
280
+
281
+ return wrapper # type: ignore
282
+
283
+ return inner
284
+
285
+
286
+ _K = TypeVar("_K")
287
+ _V = TypeVar("_V")
288
+
289
+
290
+ @overload
291
+ def strip_not_given(obj: None) -> None: ...
292
+
293
+
294
+ @overload
295
+ def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ...
296
+
297
+
298
+ @overload
299
+ def strip_not_given(obj: object) -> object: ...
300
+
301
+
302
+ def strip_not_given(obj: object | None) -> object:
303
+ """Remove all top-level keys where their values are instances of `NotGiven`"""
304
+ if obj is None:
305
+ return None
306
+
307
+ if not is_mapping(obj):
308
+ return obj
309
+
310
+ return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)}
311
+
312
+
313
+ def coerce_integer(val: str) -> int:
314
+ return int(val, base=10)
315
+
316
+
317
+ def coerce_float(val: str) -> float:
318
+ return float(val)
319
+
320
+
321
+ def coerce_boolean(val: str) -> bool:
322
+ return val == "true" or val == "1" or val == "on"
323
+
324
+
325
+ def maybe_coerce_integer(val: str | None) -> int | None:
326
+ if val is None:
327
+ return None
328
+ return coerce_integer(val)
329
+
330
+
331
+ def maybe_coerce_float(val: str | None) -> float | None:
332
+ if val is None:
333
+ return None
334
+ return coerce_float(val)
335
+
336
+
337
+ def maybe_coerce_boolean(val: str | None) -> bool | None:
338
+ if val is None:
339
+ return None
340
+ return coerce_boolean(val)
341
+
342
+
343
+ def removeprefix(string: str, prefix: str) -> str:
344
+ """Remove a prefix from a string.
345
+
346
+ Backport of `str.removeprefix` for Python < 3.9
347
+ """
348
+ if string.startswith(prefix):
349
+ return string[len(prefix) :]
350
+ return string
351
+
352
+
353
+ def removesuffix(string: str, suffix: str) -> str:
354
+ """Remove a suffix from a string.
355
+
356
+ Backport of `str.removesuffix` for Python < 3.9
357
+ """
358
+ if string.endswith(suffix):
359
+ return string[: -len(suffix)]
360
+ return string
361
+
362
+
363
+ def file_from_path(path: str) -> FileTypes:
364
+ contents = Path(path).read_bytes()
365
+ file_name = os.path.basename(path)
366
+ return (file_name, contents)
367
+
368
+
369
+ def get_required_header(headers: HeadersLike, header: str) -> str:
370
+ lower_header = header.lower()
371
+ if is_mapping_t(headers):
372
+ # mypy doesn't understand the type narrowing here
373
+ for k, v in headers.items(): # type: ignore
374
+ if k.lower() == lower_header and isinstance(v, str):
375
+ return v
376
+
377
+ # to deal with the case where the header looks like Stainless-Event-Id
378
+ intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize())
379
+
380
+ for normalized_header in [header, lower_header, header.upper(), intercaps_header]:
381
+ value = headers.get(normalized_header)
382
+ if value:
383
+ return value
384
+
385
+ raise ValueError(f"Could not find {header} header")
386
+
387
+
388
+ def get_async_library() -> str:
389
+ try:
390
+ return sniffio.current_async_library()
391
+ except Exception:
392
+ return "false"
393
+
394
+
395
+ def lru_cache(*, maxsize: int | None = 128) -> Callable[[CallableT], CallableT]:
396
+ """A version of functools.lru_cache that retains the type signature
397
+ for the wrapped function arguments.
398
+ """
399
+ wrapper = functools.lru_cache( # noqa: TID251
400
+ maxsize=maxsize,
401
+ )
402
+ return cast(Any, wrapper) # type: ignore[no-any-return]
403
+
404
+
405
+ def json_safe(data: object) -> object:
406
+ """Translates a mapping / sequence recursively in the same fashion
407
+ as `pydantic` v2's `model_dump(mode="json")`.
408
+ """
409
+ if is_mapping(data):
410
+ return {json_safe(key): json_safe(value) for key, value in data.items()}
411
+
412
+ if is_iterable(data) and not isinstance(data, (str, bytes, bytearray)):
413
+ return [json_safe(item) for item in data]
414
+
415
+ if isinstance(data, (datetime, date)):
416
+ return data.isoformat()
417
+
418
+ return data
419
+
420
+
421
+ def is_azure_client(client: object) -> TypeGuard[AzureOpenAI]:
422
+ from ..lib.azure import AzureOpenAI
423
+
424
+ return isinstance(client, AzureOpenAI)
425
+
426
+
427
+ def is_async_azure_client(client: object) -> TypeGuard[AsyncAzureOpenAI]:
428
+ from ..lib.azure import AsyncAzureOpenAI
429
+
430
+ return isinstance(client, AsyncAzureOpenAI)
.venv/lib/python3.11/site-packages/openai/cli/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from ._cli import main as main
.venv/lib/python3.11/site-packages/openai/cli/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (230 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/cli/__pycache__/_cli.cpython-311.pyc ADDED
Binary file (10.2 kB). View file
 
.venv/lib/python3.11/site-packages/openai/cli/__pycache__/_errors.cpython-311.pyc ADDED
Binary file (1.47 kB). View file
 
.venv/lib/python3.11/site-packages/openai/cli/__pycache__/_models.cpython-311.pyc ADDED
Binary file (1.31 kB). View file
 
.venv/lib/python3.11/site-packages/openai/cli/__pycache__/_progress.cpython-311.pyc ADDED
Binary file (3.99 kB). View file
 
.venv/lib/python3.11/site-packages/openai/cli/__pycache__/_utils.cpython-311.pyc ADDED
Binary file (2.01 kB). View file
 
.venv/lib/python3.11/site-packages/openai/cli/_api/_main.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from argparse import ArgumentParser
4
+
5
+ from . import chat, audio, files, image, models, completions
6
+
7
+
8
+ def register_commands(parser: ArgumentParser) -> None:
9
+ subparsers = parser.add_subparsers(help="All API subcommands")
10
+
11
+ chat.register(subparsers)
12
+ image.register(subparsers)
13
+ audio.register(subparsers)
14
+ files.register(subparsers)
15
+ models.register(subparsers)
16
+ completions.register(subparsers)
.venv/lib/python3.11/site-packages/openai/cli/_api/audio.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from typing import TYPE_CHECKING, Any, Optional, cast
5
+ from argparse import ArgumentParser
6
+
7
+ from .._utils import get_client, print_model
8
+ from ..._types import NOT_GIVEN
9
+ from .._models import BaseModel
10
+ from .._progress import BufferReader
11
+ from ...types.audio import Transcription
12
+
13
+ if TYPE_CHECKING:
14
+ from argparse import _SubParsersAction
15
+
16
+
17
+ def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
18
+ # transcriptions
19
+ sub = subparser.add_parser("audio.transcriptions.create")
20
+
21
+ # Required
22
+ sub.add_argument("-m", "--model", type=str, default="whisper-1")
23
+ sub.add_argument("-f", "--file", type=str, required=True)
24
+ # Optional
25
+ sub.add_argument("--response-format", type=str)
26
+ sub.add_argument("--language", type=str)
27
+ sub.add_argument("-t", "--temperature", type=float)
28
+ sub.add_argument("--prompt", type=str)
29
+ sub.set_defaults(func=CLIAudio.transcribe, args_model=CLITranscribeArgs)
30
+
31
+ # translations
32
+ sub = subparser.add_parser("audio.translations.create")
33
+
34
+ # Required
35
+ sub.add_argument("-f", "--file", type=str, required=True)
36
+ # Optional
37
+ sub.add_argument("-m", "--model", type=str, default="whisper-1")
38
+ sub.add_argument("--response-format", type=str)
39
+ # TODO: doesn't seem to be supported by the API
40
+ # sub.add_argument("--language", type=str)
41
+ sub.add_argument("-t", "--temperature", type=float)
42
+ sub.add_argument("--prompt", type=str)
43
+ sub.set_defaults(func=CLIAudio.translate, args_model=CLITranslationArgs)
44
+
45
+
46
+ class CLITranscribeArgs(BaseModel):
47
+ model: str
48
+ file: str
49
+ response_format: Optional[str] = None
50
+ language: Optional[str] = None
51
+ temperature: Optional[float] = None
52
+ prompt: Optional[str] = None
53
+
54
+
55
+ class CLITranslationArgs(BaseModel):
56
+ model: str
57
+ file: str
58
+ response_format: Optional[str] = None
59
+ language: Optional[str] = None
60
+ temperature: Optional[float] = None
61
+ prompt: Optional[str] = None
62
+
63
+
64
+ class CLIAudio:
65
+ @staticmethod
66
+ def transcribe(args: CLITranscribeArgs) -> None:
67
+ with open(args.file, "rb") as file_reader:
68
+ buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
69
+
70
+ model = cast(
71
+ "Transcription | str",
72
+ get_client().audio.transcriptions.create(
73
+ file=(args.file, buffer_reader),
74
+ model=args.model,
75
+ language=args.language or NOT_GIVEN,
76
+ temperature=args.temperature or NOT_GIVEN,
77
+ prompt=args.prompt or NOT_GIVEN,
78
+ # casts required because the API is typed for enums
79
+ # but we don't want to validate that here for forwards-compat
80
+ response_format=cast(Any, args.response_format),
81
+ ),
82
+ )
83
+ if isinstance(model, str):
84
+ sys.stdout.write(model + "\n")
85
+ else:
86
+ print_model(model)
87
+
88
+ @staticmethod
89
+ def translate(args: CLITranslationArgs) -> None:
90
+ with open(args.file, "rb") as file_reader:
91
+ buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
92
+
93
+ model = cast(
94
+ "Transcription | str",
95
+ get_client().audio.translations.create(
96
+ file=(args.file, buffer_reader),
97
+ model=args.model,
98
+ temperature=args.temperature or NOT_GIVEN,
99
+ prompt=args.prompt or NOT_GIVEN,
100
+ # casts required because the API is typed for enums
101
+ # but we don't want to validate that here for forwards-compat
102
+ response_format=cast(Any, args.response_format),
103
+ ),
104
+ )
105
+ if isinstance(model, str):
106
+ sys.stdout.write(model + "\n")
107
+ else:
108
+ print_model(model)
.venv/lib/python3.11/site-packages/openai/cli/_api/chat/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (779 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/cli/_api/chat/__pycache__/completions.cpython-311.pyc ADDED
Binary file (8.75 kB). View file
 
.venv/lib/python3.11/site-packages/openai/cli/_api/completions.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from typing import TYPE_CHECKING, Optional, cast
5
+ from argparse import ArgumentParser
6
+ from functools import partial
7
+
8
+ from openai.types.completion import Completion
9
+
10
+ from .._utils import get_client
11
+ from ..._types import NOT_GIVEN, NotGivenOr
12
+ from ..._utils import is_given
13
+ from .._errors import CLIError
14
+ from .._models import BaseModel
15
+ from ..._streaming import Stream
16
+
17
+ if TYPE_CHECKING:
18
+ from argparse import _SubParsersAction
19
+
20
+
21
+ def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
22
+ sub = subparser.add_parser("completions.create")
23
+
24
+ # Required
25
+ sub.add_argument(
26
+ "-m",
27
+ "--model",
28
+ help="The model to use",
29
+ required=True,
30
+ )
31
+
32
+ # Optional
33
+ sub.add_argument("-p", "--prompt", help="An optional prompt to complete from")
34
+ sub.add_argument("--stream", help="Stream tokens as they're ready.", action="store_true")
35
+ sub.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate", type=int)
36
+ sub.add_argument(
37
+ "-t",
38
+ "--temperature",
39
+ help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
40
+
41
+ Mutually exclusive with `top_p`.""",
42
+ type=float,
43
+ )
44
+ sub.add_argument(
45
+ "-P",
46
+ "--top_p",
47
+ help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.
48
+
49
+ Mutually exclusive with `temperature`.""",
50
+ type=float,
51
+ )
52
+ sub.add_argument(
53
+ "-n",
54
+ "--n",
55
+ help="How many sub-completions to generate for each prompt.",
56
+ type=int,
57
+ )
58
+ sub.add_argument(
59
+ "--logprobs",
60
+ help="Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.",
61
+ type=int,
62
+ )
63
+ sub.add_argument(
64
+ "--best_of",
65
+ help="Generates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.",
66
+ type=int,
67
+ )
68
+ sub.add_argument(
69
+ "--echo",
70
+ help="Echo back the prompt in addition to the completion",
71
+ action="store_true",
72
+ )
73
+ sub.add_argument(
74
+ "--frequency_penalty",
75
+ help="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
76
+ type=float,
77
+ )
78
+ sub.add_argument(
79
+ "--presence_penalty",
80
+ help="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
81
+ type=float,
82
+ )
83
+ sub.add_argument("--suffix", help="The suffix that comes after a completion of inserted text.")
84
+ sub.add_argument("--stop", help="A stop sequence at which to stop generating tokens.")
85
+ sub.add_argument(
86
+ "--user",
87
+ help="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.",
88
+ )
89
+ # TODO: add support for logit_bias
90
+ sub.set_defaults(func=CLICompletions.create, args_model=CLICompletionCreateArgs)
91
+
92
+
93
+ class CLICompletionCreateArgs(BaseModel):
94
+ model: str
95
+ stream: bool = False
96
+
97
+ prompt: Optional[str] = None
98
+ n: NotGivenOr[int] = NOT_GIVEN
99
+ stop: NotGivenOr[str] = NOT_GIVEN
100
+ user: NotGivenOr[str] = NOT_GIVEN
101
+ echo: NotGivenOr[bool] = NOT_GIVEN
102
+ suffix: NotGivenOr[str] = NOT_GIVEN
103
+ best_of: NotGivenOr[int] = NOT_GIVEN
104
+ top_p: NotGivenOr[float] = NOT_GIVEN
105
+ logprobs: NotGivenOr[int] = NOT_GIVEN
106
+ max_tokens: NotGivenOr[int] = NOT_GIVEN
107
+ temperature: NotGivenOr[float] = NOT_GIVEN
108
+ presence_penalty: NotGivenOr[float] = NOT_GIVEN
109
+ frequency_penalty: NotGivenOr[float] = NOT_GIVEN
110
+
111
+
112
+ class CLICompletions:
113
+ @staticmethod
114
+ def create(args: CLICompletionCreateArgs) -> None:
115
+ if is_given(args.n) and args.n > 1 and args.stream:
116
+ raise CLIError("Can't stream completions with n>1 with the current CLI")
117
+
118
+ make_request = partial(
119
+ get_client().completions.create,
120
+ n=args.n,
121
+ echo=args.echo,
122
+ stop=args.stop,
123
+ user=args.user,
124
+ model=args.model,
125
+ top_p=args.top_p,
126
+ prompt=args.prompt,
127
+ suffix=args.suffix,
128
+ best_of=args.best_of,
129
+ logprobs=args.logprobs,
130
+ max_tokens=args.max_tokens,
131
+ temperature=args.temperature,
132
+ presence_penalty=args.presence_penalty,
133
+ frequency_penalty=args.frequency_penalty,
134
+ )
135
+
136
+ if args.stream:
137
+ return CLICompletions._stream_create(
138
+ # mypy doesn't understand the `partial` function but pyright does
139
+ cast(Stream[Completion], make_request(stream=True)) # pyright: ignore[reportUnnecessaryCast]
140
+ )
141
+
142
+ return CLICompletions._create(make_request())
143
+
144
+ @staticmethod
145
+ def _create(completion: Completion) -> None:
146
+ should_print_header = len(completion.choices) > 1
147
+ for choice in completion.choices:
148
+ if should_print_header:
149
+ sys.stdout.write("===== Completion {} =====\n".format(choice.index))
150
+
151
+ sys.stdout.write(choice.text)
152
+
153
+ if should_print_header or not choice.text.endswith("\n"):
154
+ sys.stdout.write("\n")
155
+
156
+ sys.stdout.flush()
157
+
158
+ @staticmethod
159
+ def _stream_create(stream: Stream[Completion]) -> None:
160
+ for completion in stream:
161
+ should_print_header = len(completion.choices) > 1
162
+ for choice in sorted(completion.choices, key=lambda c: c.index):
163
+ if should_print_header:
164
+ sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index))
165
+
166
+ sys.stdout.write(choice.text)
167
+
168
+ if should_print_header:
169
+ sys.stdout.write("\n")
170
+
171
+ sys.stdout.flush()
172
+
173
+ sys.stdout.write("\n")
.venv/lib/python3.11/site-packages/openai/cli/_api/files.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, cast
4
+ from argparse import ArgumentParser
5
+
6
+ from .._utils import get_client, print_model
7
+ from .._models import BaseModel
8
+ from .._progress import BufferReader
9
+
10
+ if TYPE_CHECKING:
11
+ from argparse import _SubParsersAction
12
+
13
+
14
+ def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
15
+ sub = subparser.add_parser("files.create")
16
+
17
+ sub.add_argument(
18
+ "-f",
19
+ "--file",
20
+ required=True,
21
+ help="File to upload",
22
+ )
23
+ sub.add_argument(
24
+ "-p",
25
+ "--purpose",
26
+ help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)",
27
+ required=True,
28
+ )
29
+ sub.set_defaults(func=CLIFile.create, args_model=CLIFileCreateArgs)
30
+
31
+ sub = subparser.add_parser("files.retrieve")
32
+ sub.add_argument("-i", "--id", required=True, help="The files ID")
33
+ sub.set_defaults(func=CLIFile.get, args_model=CLIFileCreateArgs)
34
+
35
+ sub = subparser.add_parser("files.delete")
36
+ sub.add_argument("-i", "--id", required=True, help="The files ID")
37
+ sub.set_defaults(func=CLIFile.delete, args_model=CLIFileCreateArgs)
38
+
39
+ sub = subparser.add_parser("files.list")
40
+ sub.set_defaults(func=CLIFile.list)
41
+
42
+
43
+ class CLIFileIDArgs(BaseModel):
44
+ id: str
45
+
46
+
47
+ class CLIFileCreateArgs(BaseModel):
48
+ file: str
49
+ purpose: str
50
+
51
+
52
+ class CLIFile:
53
+ @staticmethod
54
+ def create(args: CLIFileCreateArgs) -> None:
55
+ with open(args.file, "rb") as file_reader:
56
+ buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
57
+
58
+ file = get_client().files.create(
59
+ file=(args.file, buffer_reader),
60
+ # casts required because the API is typed for enums
61
+ # but we don't want to validate that here for forwards-compat
62
+ purpose=cast(Any, args.purpose),
63
+ )
64
+ print_model(file)
65
+
66
+ @staticmethod
67
+ def get(args: CLIFileIDArgs) -> None:
68
+ file = get_client().files.retrieve(file_id=args.id)
69
+ print_model(file)
70
+
71
+ @staticmethod
72
+ def delete(args: CLIFileIDArgs) -> None:
73
+ file = get_client().files.delete(file_id=args.id)
74
+ print_model(file)
75
+
76
+ @staticmethod
77
+ def list() -> None:
78
+ files = get_client().files.list()
79
+ for file in files:
80
+ print_model(file)
.venv/lib/python3.11/site-packages/openai/cli/_api/image.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, cast
4
+ from argparse import ArgumentParser
5
+
6
+ from .._utils import get_client, print_model
7
+ from ..._types import NOT_GIVEN, NotGiven, NotGivenOr
8
+ from .._models import BaseModel
9
+ from .._progress import BufferReader
10
+
11
+ if TYPE_CHECKING:
12
+ from argparse import _SubParsersAction
13
+
14
+
15
+ def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
16
+ sub = subparser.add_parser("images.generate")
17
+ sub.add_argument("-m", "--model", type=str)
18
+ sub.add_argument("-p", "--prompt", type=str, required=True)
19
+ sub.add_argument("-n", "--num-images", type=int, default=1)
20
+ sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
21
+ sub.add_argument("--response-format", type=str, default="url")
22
+ sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs)
23
+
24
+ sub = subparser.add_parser("images.edit")
25
+ sub.add_argument("-m", "--model", type=str)
26
+ sub.add_argument("-p", "--prompt", type=str, required=True)
27
+ sub.add_argument("-n", "--num-images", type=int, default=1)
28
+ sub.add_argument(
29
+ "-I",
30
+ "--image",
31
+ type=str,
32
+ required=True,
33
+ help="Image to modify. Should be a local path and a PNG encoded image.",
34
+ )
35
+ sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
36
+ sub.add_argument("--response-format", type=str, default="url")
37
+ sub.add_argument(
38
+ "-M",
39
+ "--mask",
40
+ type=str,
41
+ required=False,
42
+ help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.",
43
+ )
44
+ sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs)
45
+
46
+ sub = subparser.add_parser("images.create_variation")
47
+ sub.add_argument("-m", "--model", type=str)
48
+ sub.add_argument("-n", "--num-images", type=int, default=1)
49
+ sub.add_argument(
50
+ "-I",
51
+ "--image",
52
+ type=str,
53
+ required=True,
54
+ help="Image to modify. Should be a local path and a PNG encoded image.",
55
+ )
56
+ sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image")
57
+ sub.add_argument("--response-format", type=str, default="url")
58
+ sub.set_defaults(func=CLIImage.create_variation, args_model=CLIImageCreateVariationArgs)
59
+
60
+
61
+ class CLIImageCreateArgs(BaseModel):
62
+ prompt: str
63
+ num_images: int
64
+ size: str
65
+ response_format: str
66
+ model: NotGivenOr[str] = NOT_GIVEN
67
+
68
+
69
+ class CLIImageCreateVariationArgs(BaseModel):
70
+ image: str
71
+ num_images: int
72
+ size: str
73
+ response_format: str
74
+ model: NotGivenOr[str] = NOT_GIVEN
75
+
76
+
77
+ class CLIImageEditArgs(BaseModel):
78
+ image: str
79
+ num_images: int
80
+ size: str
81
+ response_format: str
82
+ prompt: str
83
+ mask: NotGivenOr[str] = NOT_GIVEN
84
+ model: NotGivenOr[str] = NOT_GIVEN
85
+
86
+
87
+ class CLIImage:
88
+ @staticmethod
89
+ def create(args: CLIImageCreateArgs) -> None:
90
+ image = get_client().images.generate(
91
+ model=args.model,
92
+ prompt=args.prompt,
93
+ n=args.num_images,
94
+ # casts required because the API is typed for enums
95
+ # but we don't want to validate that here for forwards-compat
96
+ size=cast(Any, args.size),
97
+ response_format=cast(Any, args.response_format),
98
+ )
99
+ print_model(image)
100
+
101
+ @staticmethod
102
+ def create_variation(args: CLIImageCreateVariationArgs) -> None:
103
+ with open(args.image, "rb") as file_reader:
104
+ buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")
105
+
106
+ image = get_client().images.create_variation(
107
+ model=args.model,
108
+ image=("image", buffer_reader),
109
+ n=args.num_images,
110
+ # casts required because the API is typed for enums
111
+ # but we don't want to validate that here for forwards-compat
112
+ size=cast(Any, args.size),
113
+ response_format=cast(Any, args.response_format),
114
+ )
115
+ print_model(image)
116
+
117
+ @staticmethod
118
+ def edit(args: CLIImageEditArgs) -> None:
119
+ with open(args.image, "rb") as file_reader:
120
+ buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress")
121
+
122
+ if isinstance(args.mask, NotGiven):
123
+ mask: NotGivenOr[BufferReader] = NOT_GIVEN
124
+ else:
125
+ with open(args.mask, "rb") as file_reader:
126
+ mask = BufferReader(file_reader.read(), desc="Mask progress")
127
+
128
+ image = get_client().images.edit(
129
+ model=args.model,
130
+ prompt=args.prompt,
131
+ image=("image", buffer_reader),
132
+ n=args.num_images,
133
+ mask=("mask", mask) if not isinstance(mask, NotGiven) else mask,
134
+ # casts required because the API is typed for enums
135
+ # but we don't want to validate that here for forwards-compat
136
+ size=cast(Any, args.size),
137
+ response_format=cast(Any, args.response_format),
138
+ )
139
+ print_model(image)
.venv/lib/python3.11/site-packages/openai/cli/_api/models.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+ from argparse import ArgumentParser
5
+
6
+ from .._utils import get_client, print_model
7
+ from .._models import BaseModel
8
+
9
+ if TYPE_CHECKING:
10
+ from argparse import _SubParsersAction
11
+
12
+
13
+ def register(subparser: _SubParsersAction[ArgumentParser]) -> None:
14
+ sub = subparser.add_parser("models.list")
15
+ sub.set_defaults(func=CLIModels.list)
16
+
17
+ sub = subparser.add_parser("models.retrieve")
18
+ sub.add_argument("-i", "--id", required=True, help="The model ID")
19
+ sub.set_defaults(func=CLIModels.get, args_model=CLIModelIDArgs)
20
+
21
+ sub = subparser.add_parser("models.delete")
22
+ sub.add_argument("-i", "--id", required=True, help="The model ID")
23
+ sub.set_defaults(func=CLIModels.delete, args_model=CLIModelIDArgs)
24
+
25
+
26
+ class CLIModelIDArgs(BaseModel):
27
+ id: str
28
+
29
+
30
+ class CLIModels:
31
+ @staticmethod
32
+ def get(args: CLIModelIDArgs) -> None:
33
+ model = get_client().models.retrieve(model=args.id)
34
+ print_model(model)
35
+
36
+ @staticmethod
37
+ def delete(args: CLIModelIDArgs) -> None:
38
+ model = get_client().models.delete(model=args.id)
39
+ print_model(model)
40
+
41
+ @staticmethod
42
+ def list() -> None:
43
+ models = get_client().models.list()
44
+ for model in models:
45
+ print_model(model)
.venv/lib/python3.11/site-packages/openai/cli/_cli.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ import logging
5
+ import argparse
6
+ from typing import Any, List, Type, Optional
7
+ from typing_extensions import ClassVar
8
+
9
+ import httpx
10
+ import pydantic
11
+
12
+ import openai
13
+
14
+ from . import _tools
15
+ from .. import _ApiType, __version__
16
+ from ._api import register_commands
17
+ from ._utils import can_use_http2
18
+ from ._errors import CLIError, display_error
19
+ from .._compat import PYDANTIC_V2, ConfigDict, model_parse
20
+ from .._models import BaseModel
21
+ from .._exceptions import APIError
22
+
23
+ logger = logging.getLogger()
24
+ formatter = logging.Formatter("[%(asctime)s] %(message)s")
25
+ handler = logging.StreamHandler(sys.stderr)
26
+ handler.setFormatter(formatter)
27
+ logger.addHandler(handler)
28
+
29
+
30
+ class Arguments(BaseModel):
31
+ if PYDANTIC_V2:
32
+ model_config: ClassVar[ConfigDict] = ConfigDict(
33
+ extra="ignore",
34
+ )
35
+ else:
36
+
37
+ class Config(pydantic.BaseConfig): # type: ignore
38
+ extra: Any = pydantic.Extra.ignore # type: ignore
39
+
40
+ verbosity: int
41
+ version: Optional[str] = None
42
+
43
+ api_key: Optional[str]
44
+ api_base: Optional[str]
45
+ organization: Optional[str]
46
+ proxy: Optional[List[str]]
47
+ api_type: Optional[_ApiType] = None
48
+ api_version: Optional[str] = None
49
+
50
+ # azure
51
+ azure_endpoint: Optional[str] = None
52
+ azure_ad_token: Optional[str] = None
53
+
54
+ # internal, set by subparsers to parse their specific args
55
+ args_model: Optional[Type[BaseModel]] = None
56
+
57
+ # internal, used so that subparsers can forward unknown arguments
58
+ unknown_args: List[str] = []
59
+ allow_unknown_args: bool = False
60
+
61
+
62
+ def _build_parser() -> argparse.ArgumentParser:
63
+ parser = argparse.ArgumentParser(description=None, prog="openai")
64
+ parser.add_argument(
65
+ "-v",
66
+ "--verbose",
67
+ action="count",
68
+ dest="verbosity",
69
+ default=0,
70
+ help="Set verbosity.",
71
+ )
72
+ parser.add_argument("-b", "--api-base", help="What API base url to use.")
73
+ parser.add_argument("-k", "--api-key", help="What API key to use.")
74
+ parser.add_argument("-p", "--proxy", nargs="+", help="What proxy to use.")
75
+ parser.add_argument(
76
+ "-o",
77
+ "--organization",
78
+ help="Which organization to run as (will use your default organization if not specified)",
79
+ )
80
+ parser.add_argument(
81
+ "-t",
82
+ "--api-type",
83
+ type=str,
84
+ choices=("openai", "azure"),
85
+ help="The backend API to call, must be `openai` or `azure`",
86
+ )
87
+ parser.add_argument(
88
+ "--api-version",
89
+ help="The Azure API version, e.g. 'https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning'",
90
+ )
91
+
92
+ # azure
93
+ parser.add_argument(
94
+ "--azure-endpoint",
95
+ help="The Azure endpoint, e.g. 'https://endpoint.openai.azure.com'",
96
+ )
97
+ parser.add_argument(
98
+ "--azure-ad-token",
99
+ help="A token from Azure Active Directory, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id",
100
+ )
101
+
102
+ # prints the package version
103
+ parser.add_argument(
104
+ "-V",
105
+ "--version",
106
+ action="version",
107
+ version="%(prog)s " + __version__,
108
+ )
109
+
110
+ def help() -> None:
111
+ parser.print_help()
112
+
113
+ parser.set_defaults(func=help)
114
+
115
+ subparsers = parser.add_subparsers()
116
+ sub_api = subparsers.add_parser("api", help="Direct API calls")
117
+
118
+ register_commands(sub_api)
119
+
120
+ sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
121
+ _tools.register_commands(sub_tools, subparsers)
122
+
123
+ return parser
124
+
125
+
126
+ def main() -> int:
127
+ try:
128
+ _main()
129
+ except (APIError, CLIError, pydantic.ValidationError) as err:
130
+ display_error(err)
131
+ return 1
132
+ except KeyboardInterrupt:
133
+ sys.stderr.write("\n")
134
+ return 1
135
+ return 0
136
+
137
+
138
+ def _parse_args(parser: argparse.ArgumentParser) -> tuple[argparse.Namespace, Arguments, list[str]]:
139
+ # argparse by default will strip out the `--` but we want to keep it for unknown arguments
140
+ if "--" in sys.argv:
141
+ idx = sys.argv.index("--")
142
+ known_args = sys.argv[1:idx]
143
+ unknown_args = sys.argv[idx:]
144
+ else:
145
+ known_args = sys.argv[1:]
146
+ unknown_args = []
147
+
148
+ parsed, remaining_unknown = parser.parse_known_args(known_args)
149
+
150
+ # append any remaining unknown arguments from the initial parsing
151
+ remaining_unknown.extend(unknown_args)
152
+
153
+ args = model_parse(Arguments, vars(parsed))
154
+ if not args.allow_unknown_args:
155
+ # we have to parse twice to ensure any unknown arguments
156
+ # result in an error if that behaviour is desired
157
+ parser.parse_args()
158
+
159
+ return parsed, args, remaining_unknown
160
+
161
+
162
+ def _main() -> None:
163
+ parser = _build_parser()
164
+ parsed, args, unknown = _parse_args(parser)
165
+
166
+ if args.verbosity != 0:
167
+ sys.stderr.write("Warning: --verbosity isn't supported yet\n")
168
+
169
+ proxies: dict[str, httpx.BaseTransport] = {}
170
+ if args.proxy is not None:
171
+ for proxy in args.proxy:
172
+ key = "https://" if proxy.startswith("https") else "http://"
173
+ if key in proxies:
174
+ raise CLIError(f"Multiple {key} proxies given - only the last one would be used")
175
+
176
+ proxies[key] = httpx.HTTPTransport(proxy=httpx.Proxy(httpx.URL(proxy)))
177
+
178
+ http_client = httpx.Client(
179
+ mounts=proxies or None,
180
+ http2=can_use_http2(),
181
+ )
182
+ openai.http_client = http_client
183
+
184
+ if args.organization:
185
+ openai.organization = args.organization
186
+
187
+ if args.api_key:
188
+ openai.api_key = args.api_key
189
+
190
+ if args.api_base:
191
+ openai.base_url = args.api_base
192
+
193
+ # azure
194
+ if args.api_type is not None:
195
+ openai.api_type = args.api_type
196
+
197
+ if args.azure_endpoint is not None:
198
+ openai.azure_endpoint = args.azure_endpoint
199
+
200
+ if args.api_version is not None:
201
+ openai.api_version = args.api_version
202
+
203
+ if args.azure_ad_token is not None:
204
+ openai.azure_ad_token = args.azure_ad_token
205
+
206
+ try:
207
+ if args.args_model:
208
+ parsed.func(
209
+ model_parse(
210
+ args.args_model,
211
+ {
212
+ **{
213
+ # we omit None values so that they can be defaulted to `NotGiven`
214
+ # and we'll strip it from the API request
215
+ key: value
216
+ for key, value in vars(parsed).items()
217
+ if value is not None
218
+ },
219
+ "unknown_args": unknown,
220
+ },
221
+ )
222
+ )
223
+ else:
224
+ parsed.func()
225
+ finally:
226
+ try:
227
+ http_client.close()
228
+ except Exception:
229
+ pass
230
+
231
+
232
+ if __name__ == "__main__":
233
+ sys.exit(main())
.venv/lib/python3.11/site-packages/openai/cli/_errors.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+
5
+ import pydantic
6
+
7
+ from ._utils import Colors, organization_info
8
+ from .._exceptions import APIError, OpenAIError
9
+
10
+
11
+ class CLIError(OpenAIError): ...
12
+
13
+
14
+ class SilentCLIError(CLIError): ...
15
+
16
+
17
+ def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None:
18
+ if isinstance(err, SilentCLIError):
19
+ return
20
+
21
+ sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err))
.venv/lib/python3.11/site-packages/openai/cli/_models.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from typing_extensions import ClassVar
3
+
4
+ import pydantic
5
+
6
+ from .. import _models
7
+ from .._compat import PYDANTIC_V2, ConfigDict
8
+
9
+
10
+ class BaseModel(_models.BaseModel):
11
+ if PYDANTIC_V2:
12
+ model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True)
13
+ else:
14
+
15
+ class Config(pydantic.BaseConfig): # type: ignore
16
+ extra: Any = pydantic.Extra.ignore # type: ignore
17
+ arbitrary_types_allowed: bool = True
.venv/lib/python3.11/site-packages/openai/cli/_progress.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ from typing import Callable
5
+ from typing_extensions import override
6
+
7
+
8
+ class CancelledError(Exception):
9
+ def __init__(self, msg: str) -> None:
10
+ self.msg = msg
11
+ super().__init__(msg)
12
+
13
+ @override
14
+ def __str__(self) -> str:
15
+ return self.msg
16
+
17
+ __repr__ = __str__
18
+
19
+
20
+ class BufferReader(io.BytesIO):
21
+ def __init__(self, buf: bytes = b"", desc: str | None = None) -> None:
22
+ super().__init__(buf)
23
+ self._len = len(buf)
24
+ self._progress = 0
25
+ self._callback = progress(len(buf), desc=desc)
26
+
27
+ def __len__(self) -> int:
28
+ return self._len
29
+
30
+ @override
31
+ def read(self, n: int | None = -1) -> bytes:
32
+ chunk = io.BytesIO.read(self, n)
33
+ self._progress += len(chunk)
34
+
35
+ try:
36
+ self._callback(self._progress)
37
+ except Exception as e: # catches exception from the callback
38
+ raise CancelledError("The upload was cancelled: {}".format(e)) from e
39
+
40
+ return chunk
41
+
42
+
43
+ def progress(total: float, desc: str | None) -> Callable[[float], None]:
44
+ import tqdm
45
+
46
+ meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc)
47
+
48
+ def incr(progress: float) -> None:
49
+ meter.n = progress
50
+ if progress == total:
51
+ meter.close()
52
+ else:
53
+ meter.refresh()
54
+
55
+ return incr
56
+
57
+
58
+ def MB(i: int) -> int:
59
+ return int(i // 1024**2)
.venv/lib/python3.11/site-packages/openai/cli/_tools/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from ._main import register_commands as register_commands
.venv/lib/python3.11/site-packages/openai/cli/_tools/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (251 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/cli/_tools/__pycache__/_main.cpython-311.pyc ADDED
Binary file (1.05 kB). View file
 
.venv/lib/python3.11/site-packages/openai/cli/_tools/__pycache__/fine_tunes.cpython-311.pyc ADDED
Binary file (2.73 kB). View file