koichi12 commited on
Commit
92cca4b
·
verified ·
1 Parent(s): 5cd10e5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .venv/lib/python3.11/site-packages/multidict/_multidict.cpython-311-x86_64-linux-gnu.so +3 -0
  3. .venv/lib/python3.11/site-packages/openai/types/audio/__init__.py +16 -0
  4. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/__init__.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_create_params.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_model.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_params.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_response.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_segment.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_verbose.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_word.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_params.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_response.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_verbose.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/openai/types/audio/transcription_create_response.py +11 -0
  18. .venv/lib/python3.11/site-packages/openai/types/beta/__init__.py +48 -0
  19. .venv/lib/python3.11/site-packages/openai/types/beta/assistant.py +134 -0
  20. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_create_params.py +168 -0
  21. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_deleted.py +15 -0
  22. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_list_params.py +39 -0
  23. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option.py +14 -0
  24. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option_param.py +16 -0
  25. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_stream_event.py +294 -0
  26. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool.py +15 -0
  27. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice.py +16 -0
  28. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function.py +11 -0
  29. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function_param.py +12 -0
  30. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option.py +10 -0
  31. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option_param.py +12 -0
  32. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_param.py +16 -0
  33. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_param.py +14 -0
  34. .venv/lib/python3.11/site-packages/openai/types/beta/assistant_update_params.py +127 -0
  35. .venv/lib/python3.11/site-packages/openai/types/beta/auto_file_chunking_strategy_param.py +12 -0
  36. .venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool.py +12 -0
  37. .venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool_param.py +12 -0
  38. .venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy.py +14 -0
  39. .venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy_param.py +13 -0
  40. .venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool.py +55 -0
  41. .venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool_param.py +54 -0
  42. .venv/lib/python3.11/site-packages/openai/types/beta/function_tool.py +15 -0
  43. .venv/lib/python3.11/site-packages/openai/types/beta/function_tool_param.py +16 -0
  44. .venv/lib/python3.11/site-packages/openai/types/beta/other_file_chunking_strategy_object.py +12 -0
  45. .venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy.py +21 -0
  46. .venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object.py +15 -0
  47. .venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object_param.py +16 -0
  48. .venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_param.py +22 -0
  49. .venv/lib/python3.11/site-packages/openai/types/beta/thread.py +63 -0
  50. .venv/lib/python3.11/site-packages/openai/types/beta/thread_create_and_run_params.py +366 -0
.gitattributes CHANGED
@@ -343,3 +343,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
343
  .venv/lib/python3.11/site-packages/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text
344
  .venv/lib/python3.11/site-packages/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text
345
  .venv/lib/python3.11/site-packages/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text
 
 
343
  .venv/lib/python3.11/site-packages/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text
344
  .venv/lib/python3.11/site-packages/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text
345
  .venv/lib/python3.11/site-packages/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text
346
+ .venv/lib/python3.11/site-packages/multidict/_multidict.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/multidict/_multidict.cpython-311-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49419ffe4026850c619fceb5fa38826b47b9d68342aa5813871ea4dc83a0c2ab
3
+ size 410592
.venv/lib/python3.11/site-packages/openai/types/audio/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from .translation import Translation as Translation
6
+ from .speech_model import SpeechModel as SpeechModel
7
+ from .transcription import Transcription as Transcription
8
+ from .transcription_word import TranscriptionWord as TranscriptionWord
9
+ from .translation_verbose import TranslationVerbose as TranslationVerbose
10
+ from .speech_create_params import SpeechCreateParams as SpeechCreateParams
11
+ from .transcription_segment import TranscriptionSegment as TranscriptionSegment
12
+ from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose
13
+ from .translation_create_params import TranslationCreateParams as TranslationCreateParams
14
+ from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams
15
+ from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse
16
+ from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (1.22 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_create_params.cpython-311.pyc ADDED
Binary file (1.2 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_model.cpython-311.pyc ADDED
Binary file (415 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription.cpython-311.pyc ADDED
Binary file (562 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_params.cpython-311.pyc ADDED
Binary file (1.38 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_response.cpython-311.pyc ADDED
Binary file (622 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_segment.cpython-311.pyc ADDED
Binary file (1 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_verbose.cpython-311.pyc ADDED
Binary file (1.08 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_word.cpython-311.pyc ADDED
Binary file (638 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation.cpython-311.pyc ADDED
Binary file (555 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_params.cpython-311.pyc ADDED
Binary file (1.2 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_response.cpython-311.pyc ADDED
Binary file (610 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_verbose.cpython-311.pyc ADDED
Binary file (926 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/audio/transcription_create_response.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union
4
+ from typing_extensions import TypeAlias
5
+
6
+ from .transcription import Transcription
7
+ from .transcription_verbose import TranscriptionVerbose
8
+
9
+ __all__ = ["TranscriptionCreateResponse"]
10
+
11
+ TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose]
.venv/lib/python3.11/site-packages/openai/types/beta/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from .thread import Thread as Thread
6
+ from .assistant import Assistant as Assistant
7
+ from .vector_store import VectorStore as VectorStore
8
+ from .function_tool import FunctionTool as FunctionTool
9
+ from .assistant_tool import AssistantTool as AssistantTool
10
+ from .thread_deleted import ThreadDeleted as ThreadDeleted
11
+ from .file_search_tool import FileSearchTool as FileSearchTool
12
+ from .assistant_deleted import AssistantDeleted as AssistantDeleted
13
+ from .function_tool_param import FunctionToolParam as FunctionToolParam
14
+ from .assistant_tool_param import AssistantToolParam as AssistantToolParam
15
+ from .thread_create_params import ThreadCreateParams as ThreadCreateParams
16
+ from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams
17
+ from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
18
+ from .assistant_list_params import AssistantListParams as AssistantListParams
19
+ from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice
20
+ from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool
21
+ from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent
22
+ from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
23
+ from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam
24
+ from .assistant_create_params import AssistantCreateParams as AssistantCreateParams
25
+ from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams
26
+ from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
27
+ from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
28
+ from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
29
+ from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam
30
+ from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam
31
+ from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption
32
+ from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
33
+ from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams
34
+ from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy
35
+ from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction
36
+ from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption
37
+ from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam
38
+ from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam
39
+ from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject
40
+ from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam
41
+ from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam
42
+ from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject
43
+ from .assistant_response_format_option_param import (
44
+ AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam,
45
+ )
46
+ from .static_file_chunking_strategy_object_param import (
47
+ StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,
48
+ )
.venv/lib/python3.11/site-packages/openai/types/beta/assistant.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from typing_extensions import Literal
5
+
6
+ from ..._models import BaseModel
7
+ from .assistant_tool import AssistantTool
8
+ from ..shared.metadata import Metadata
9
+ from .assistant_response_format_option import AssistantResponseFormatOption
10
+
11
+ __all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
12
+
13
+
14
+ class ToolResourcesCodeInterpreter(BaseModel):
15
+ file_ids: Optional[List[str]] = None
16
+ """
17
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
18
+ available to the `code_interpreter`` tool. There can be a maximum of 20 files
19
+ associated with the tool.
20
+ """
21
+
22
+
23
+ class ToolResourcesFileSearch(BaseModel):
24
+ vector_store_ids: Optional[List[str]] = None
25
+ """
26
+ The ID of the
27
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
28
+ attached to this assistant. There can be a maximum of 1 vector store attached to
29
+ the assistant.
30
+ """
31
+
32
+
33
+ class ToolResources(BaseModel):
34
+ code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
35
+
36
+ file_search: Optional[ToolResourcesFileSearch] = None
37
+
38
+
39
+ class Assistant(BaseModel):
40
+ id: str
41
+ """The identifier, which can be referenced in API endpoints."""
42
+
43
+ created_at: int
44
+ """The Unix timestamp (in seconds) for when the assistant was created."""
45
+
46
+ description: Optional[str] = None
47
+ """The description of the assistant. The maximum length is 512 characters."""
48
+
49
+ instructions: Optional[str] = None
50
+ """The system instructions that the assistant uses.
51
+
52
+ The maximum length is 256,000 characters.
53
+ """
54
+
55
+ metadata: Optional[Metadata] = None
56
+ """Set of 16 key-value pairs that can be attached to an object.
57
+
58
+ This can be useful for storing additional information about the object in a
59
+ structured format, and querying for objects via API or the dashboard.
60
+
61
+ Keys are strings with a maximum length of 64 characters. Values are strings with
62
+ a maximum length of 512 characters.
63
+ """
64
+
65
+ model: str
66
+ """ID of the model to use.
67
+
68
+ You can use the
69
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
70
+ see all of your available models, or see our
71
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
72
+ them.
73
+ """
74
+
75
+ name: Optional[str] = None
76
+ """The name of the assistant. The maximum length is 256 characters."""
77
+
78
+ object: Literal["assistant"]
79
+ """The object type, which is always `assistant`."""
80
+
81
+ tools: List[AssistantTool]
82
+ """A list of tool enabled on the assistant.
83
+
84
+ There can be a maximum of 128 tools per assistant. Tools can be of types
85
+ `code_interpreter`, `file_search`, or `function`.
86
+ """
87
+
88
+ response_format: Optional[AssistantResponseFormatOption] = None
89
+ """Specifies the format that the model must output.
90
+
91
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
92
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
93
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
94
+
95
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
96
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
97
+ in the
98
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
99
+
100
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
101
+ message the model generates is valid JSON.
102
+
103
+ **Important:** when using JSON mode, you **must** also instruct the model to
104
+ produce JSON yourself via a system or user message. Without this, the model may
105
+ generate an unending stream of whitespace until the generation reaches the token
106
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
107
+ the message content may be partially cut off if `finish_reason="length"`, which
108
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
109
+ max context length.
110
+ """
111
+
112
+ temperature: Optional[float] = None
113
+ """What sampling temperature to use, between 0 and 2.
114
+
115
+ Higher values like 0.8 will make the output more random, while lower values like
116
+ 0.2 will make it more focused and deterministic.
117
+ """
118
+
119
+ tool_resources: Optional[ToolResources] = None
120
+ """A set of resources that are used by the assistant's tools.
121
+
122
+ The resources are specific to the type of tool. For example, the
123
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
124
+ tool requires a list of vector store IDs.
125
+ """
126
+
127
+ top_p: Optional[float] = None
128
+ """
129
+ An alternative to sampling with temperature, called nucleus sampling, where the
130
+ model considers the results of the tokens with top_p probability mass. So 0.1
131
+ means only the tokens comprising the top 10% probability mass are considered.
132
+
133
+ We generally recommend altering this or temperature but not both.
134
+ """
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_create_params.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List, Union, Iterable, Optional
6
+ from typing_extensions import Required, TypedDict
7
+
8
+ from ..chat_model import ChatModel
9
+ from .assistant_tool_param import AssistantToolParam
10
+ from ..shared_params.metadata import Metadata
11
+ from .file_chunking_strategy_param import FileChunkingStrategyParam
12
+ from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
13
+
14
+ __all__ = [
15
+ "AssistantCreateParams",
16
+ "ToolResources",
17
+ "ToolResourcesCodeInterpreter",
18
+ "ToolResourcesFileSearch",
19
+ "ToolResourcesFileSearchVectorStore",
20
+ ]
21
+
22
+
23
+ class AssistantCreateParams(TypedDict, total=False):
24
+ model: Required[Union[str, ChatModel]]
25
+ """ID of the model to use.
26
+
27
+ You can use the
28
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
29
+ see all of your available models, or see our
30
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
31
+ them.
32
+ """
33
+
34
+ description: Optional[str]
35
+ """The description of the assistant. The maximum length is 512 characters."""
36
+
37
+ instructions: Optional[str]
38
+ """The system instructions that the assistant uses.
39
+
40
+ The maximum length is 256,000 characters.
41
+ """
42
+
43
+ metadata: Optional[Metadata]
44
+ """Set of 16 key-value pairs that can be attached to an object.
45
+
46
+ This can be useful for storing additional information about the object in a
47
+ structured format, and querying for objects via API or the dashboard.
48
+
49
+ Keys are strings with a maximum length of 64 characters. Values are strings with
50
+ a maximum length of 512 characters.
51
+ """
52
+
53
+ name: Optional[str]
54
+ """The name of the assistant. The maximum length is 256 characters."""
55
+
56
+ response_format: Optional[AssistantResponseFormatOptionParam]
57
+ """Specifies the format that the model must output.
58
+
59
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
60
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
61
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
62
+
63
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
64
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
65
+ in the
66
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
67
+
68
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
69
+ message the model generates is valid JSON.
70
+
71
+ **Important:** when using JSON mode, you **must** also instruct the model to
72
+ produce JSON yourself via a system or user message. Without this, the model may
73
+ generate an unending stream of whitespace until the generation reaches the token
74
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
75
+ the message content may be partially cut off if `finish_reason="length"`, which
76
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
77
+ max context length.
78
+ """
79
+
80
+ temperature: Optional[float]
81
+ """What sampling temperature to use, between 0 and 2.
82
+
83
+ Higher values like 0.8 will make the output more random, while lower values like
84
+ 0.2 will make it more focused and deterministic.
85
+ """
86
+
87
+ tool_resources: Optional[ToolResources]
88
+ """A set of resources that are used by the assistant's tools.
89
+
90
+ The resources are specific to the type of tool. For example, the
91
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
92
+ tool requires a list of vector store IDs.
93
+ """
94
+
95
+ tools: Iterable[AssistantToolParam]
96
+ """A list of tool enabled on the assistant.
97
+
98
+ There can be a maximum of 128 tools per assistant. Tools can be of types
99
+ `code_interpreter`, `file_search`, or `function`.
100
+ """
101
+
102
+ top_p: Optional[float]
103
+ """
104
+ An alternative to sampling with temperature, called nucleus sampling, where the
105
+ model considers the results of the tokens with top_p probability mass. So 0.1
106
+ means only the tokens comprising the top 10% probability mass are considered.
107
+
108
+ We generally recommend altering this or temperature but not both.
109
+ """
110
+
111
+
112
+ class ToolResourcesCodeInterpreter(TypedDict, total=False):
113
+ file_ids: List[str]
114
+ """
115
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
116
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
117
+ associated with the tool.
118
+ """
119
+
120
+
121
+ class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
122
+ chunking_strategy: FileChunkingStrategyParam
123
+ """The chunking strategy used to chunk the file(s).
124
+
125
+ If not set, will use the `auto` strategy. Only applicable if `file_ids` is
126
+ non-empty.
127
+ """
128
+
129
+ file_ids: List[str]
130
+ """
131
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
132
+ add to the vector store. There can be a maximum of 10000 files in a vector
133
+ store.
134
+ """
135
+
136
+ metadata: Optional[Metadata]
137
+ """Set of 16 key-value pairs that can be attached to an object.
138
+
139
+ This can be useful for storing additional information about the object in a
140
+ structured format, and querying for objects via API or the dashboard.
141
+
142
+ Keys are strings with a maximum length of 64 characters. Values are strings with
143
+ a maximum length of 512 characters.
144
+ """
145
+
146
+
147
+ class ToolResourcesFileSearch(TypedDict, total=False):
148
+ vector_store_ids: List[str]
149
+ """
150
+ The
151
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
152
+ attached to this assistant. There can be a maximum of 1 vector store attached to
153
+ the assistant.
154
+ """
155
+
156
+ vector_stores: Iterable[ToolResourcesFileSearchVectorStore]
157
+ """
158
+ A helper to create a
159
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
160
+ with file_ids and attach it to this assistant. There can be a maximum of 1
161
+ vector store attached to the assistant.
162
+ """
163
+
164
+
165
+ class ToolResources(TypedDict, total=False):
166
+ code_interpreter: ToolResourcesCodeInterpreter
167
+
168
+ file_search: ToolResourcesFileSearch
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_deleted.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from ..._models import BaseModel
6
+
7
+ __all__ = ["AssistantDeleted"]
8
+
9
+
10
+ class AssistantDeleted(BaseModel):
11
+ id: str
12
+
13
+ deleted: bool
14
+
15
+ object: Literal["assistant.deleted"]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_list_params.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, TypedDict
6
+
7
+ __all__ = ["AssistantListParams"]
8
+
9
+
10
+ class AssistantListParams(TypedDict, total=False):
11
+ after: str
12
+ """A cursor for use in pagination.
13
+
14
+ `after` is an object ID that defines your place in the list. For instance, if
15
+ you make a list request and receive 100 objects, ending with obj_foo, your
16
+ subsequent call can include after=obj_foo in order to fetch the next page of the
17
+ list.
18
+ """
19
+
20
+ before: str
21
+ """A cursor for use in pagination.
22
+
23
+ `before` is an object ID that defines your place in the list. For instance, if
24
+ you make a list request and receive 100 objects, starting with obj_foo, your
25
+ subsequent call can include before=obj_foo in order to fetch the previous page
26
+ of the list.
27
+ """
28
+
29
+ limit: int
30
+ """A limit on the number of objects to be returned.
31
+
32
+ Limit can range between 1 and 100, and the default is 20.
33
+ """
34
+
35
+ order: Literal["asc", "desc"]
36
+ """Sort order by the `created_at` timestamp of the objects.
37
+
38
+ `asc` for ascending order and `desc` for descending order.
39
+ """
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union
4
+ from typing_extensions import Literal, TypeAlias
5
+
6
+ from ..shared.response_format_text import ResponseFormatText
7
+ from ..shared.response_format_json_object import ResponseFormatJSONObject
8
+ from ..shared.response_format_json_schema import ResponseFormatJSONSchema
9
+
10
+ __all__ = ["AssistantResponseFormatOption"]
11
+
12
+ AssistantResponseFormatOption: TypeAlias = Union[
13
+ Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
14
+ ]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option_param.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import Literal, TypeAlias
7
+
8
+ from ..shared_params.response_format_text import ResponseFormatText
9
+ from ..shared_params.response_format_json_object import ResponseFormatJSONObject
10
+ from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema
11
+
12
+ __all__ = ["AssistantResponseFormatOptionParam"]
13
+
14
+ AssistantResponseFormatOptionParam: TypeAlias = Union[
15
+ Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema
16
+ ]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_stream_event.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union, Optional
4
+ from typing_extensions import Literal, Annotated, TypeAlias
5
+
6
+ from .thread import Thread
7
+ from ..._utils import PropertyInfo
8
+ from ..._models import BaseModel
9
+ from .threads.run import Run
10
+ from .threads.message import Message
11
+ from ..shared.error_object import ErrorObject
12
+ from .threads.runs.run_step import RunStep
13
+ from .threads.message_delta_event import MessageDeltaEvent
14
+ from .threads.runs.run_step_delta_event import RunStepDeltaEvent
15
+
16
+ __all__ = [
17
+ "AssistantStreamEvent",
18
+ "ThreadCreated",
19
+ "ThreadRunCreated",
20
+ "ThreadRunQueued",
21
+ "ThreadRunInProgress",
22
+ "ThreadRunRequiresAction",
23
+ "ThreadRunCompleted",
24
+ "ThreadRunIncomplete",
25
+ "ThreadRunFailed",
26
+ "ThreadRunCancelling",
27
+ "ThreadRunCancelled",
28
+ "ThreadRunExpired",
29
+ "ThreadRunStepCreated",
30
+ "ThreadRunStepInProgress",
31
+ "ThreadRunStepDelta",
32
+ "ThreadRunStepCompleted",
33
+ "ThreadRunStepFailed",
34
+ "ThreadRunStepCancelled",
35
+ "ThreadRunStepExpired",
36
+ "ThreadMessageCreated",
37
+ "ThreadMessageInProgress",
38
+ "ThreadMessageDelta",
39
+ "ThreadMessageCompleted",
40
+ "ThreadMessageIncomplete",
41
+ "ErrorEvent",
42
+ ]
43
+
44
+
45
+ class ThreadCreated(BaseModel):
46
+ data: Thread
47
+ """
48
+ Represents a thread that contains
49
+ [messages](https://platform.openai.com/docs/api-reference/messages).
50
+ """
51
+
52
+ event: Literal["thread.created"]
53
+
54
+ enabled: Optional[bool] = None
55
+ """Whether to enable input audio transcription."""
56
+
57
+
58
+ class ThreadRunCreated(BaseModel):
59
+ data: Run
60
+ """
61
+ Represents an execution run on a
62
+ [thread](https://platform.openai.com/docs/api-reference/threads).
63
+ """
64
+
65
+ event: Literal["thread.run.created"]
66
+
67
+
68
+ class ThreadRunQueued(BaseModel):
69
+ data: Run
70
+ """
71
+ Represents an execution run on a
72
+ [thread](https://platform.openai.com/docs/api-reference/threads).
73
+ """
74
+
75
+ event: Literal["thread.run.queued"]
76
+
77
+
78
+ class ThreadRunInProgress(BaseModel):
79
+ data: Run
80
+ """
81
+ Represents an execution run on a
82
+ [thread](https://platform.openai.com/docs/api-reference/threads).
83
+ """
84
+
85
+ event: Literal["thread.run.in_progress"]
86
+
87
+
88
+ class ThreadRunRequiresAction(BaseModel):
89
+ data: Run
90
+ """
91
+ Represents an execution run on a
92
+ [thread](https://platform.openai.com/docs/api-reference/threads).
93
+ """
94
+
95
+ event: Literal["thread.run.requires_action"]
96
+
97
+
98
+ class ThreadRunCompleted(BaseModel):
99
+ data: Run
100
+ """
101
+ Represents an execution run on a
102
+ [thread](https://platform.openai.com/docs/api-reference/threads).
103
+ """
104
+
105
+ event: Literal["thread.run.completed"]
106
+
107
+
108
+ class ThreadRunIncomplete(BaseModel):
109
+ data: Run
110
+ """
111
+ Represents an execution run on a
112
+ [thread](https://platform.openai.com/docs/api-reference/threads).
113
+ """
114
+
115
+ event: Literal["thread.run.incomplete"]
116
+
117
+
118
+ class ThreadRunFailed(BaseModel):
119
+ data: Run
120
+ """
121
+ Represents an execution run on a
122
+ [thread](https://platform.openai.com/docs/api-reference/threads).
123
+ """
124
+
125
+ event: Literal["thread.run.failed"]
126
+
127
+
128
+ class ThreadRunCancelling(BaseModel):
129
+ data: Run
130
+ """
131
+ Represents an execution run on a
132
+ [thread](https://platform.openai.com/docs/api-reference/threads).
133
+ """
134
+
135
+ event: Literal["thread.run.cancelling"]
136
+
137
+
138
+ class ThreadRunCancelled(BaseModel):
139
+ data: Run
140
+ """
141
+ Represents an execution run on a
142
+ [thread](https://platform.openai.com/docs/api-reference/threads).
143
+ """
144
+
145
+ event: Literal["thread.run.cancelled"]
146
+
147
+
148
+ class ThreadRunExpired(BaseModel):
149
+ data: Run
150
+ """
151
+ Represents an execution run on a
152
+ [thread](https://platform.openai.com/docs/api-reference/threads).
153
+ """
154
+
155
+ event: Literal["thread.run.expired"]
156
+
157
+
158
+ class ThreadRunStepCreated(BaseModel):
159
+ data: RunStep
160
+ """Represents a step in execution of a run."""
161
+
162
+ event: Literal["thread.run.step.created"]
163
+
164
+
165
+ class ThreadRunStepInProgress(BaseModel):
166
+ data: RunStep
167
+ """Represents a step in execution of a run."""
168
+
169
+ event: Literal["thread.run.step.in_progress"]
170
+
171
+
172
+ class ThreadRunStepDelta(BaseModel):
173
+ data: RunStepDeltaEvent
174
+ """Represents a run step delta i.e.
175
+
176
+ any changed fields on a run step during streaming.
177
+ """
178
+
179
+ event: Literal["thread.run.step.delta"]
180
+
181
+
182
+ class ThreadRunStepCompleted(BaseModel):
183
+ data: RunStep
184
+ """Represents a step in execution of a run."""
185
+
186
+ event: Literal["thread.run.step.completed"]
187
+
188
+
189
+ class ThreadRunStepFailed(BaseModel):
190
+ data: RunStep
191
+ """Represents a step in execution of a run."""
192
+
193
+ event: Literal["thread.run.step.failed"]
194
+
195
+
196
+ class ThreadRunStepCancelled(BaseModel):
197
+ data: RunStep
198
+ """Represents a step in execution of a run."""
199
+
200
+ event: Literal["thread.run.step.cancelled"]
201
+
202
+
203
+ class ThreadRunStepExpired(BaseModel):
204
+ data: RunStep
205
+ """Represents a step in execution of a run."""
206
+
207
+ event: Literal["thread.run.step.expired"]
208
+
209
+
210
+ class ThreadMessageCreated(BaseModel):
211
+ data: Message
212
+ """
213
+ Represents a message within a
214
+ [thread](https://platform.openai.com/docs/api-reference/threads).
215
+ """
216
+
217
+ event: Literal["thread.message.created"]
218
+
219
+
220
+ class ThreadMessageInProgress(BaseModel):
221
+ data: Message
222
+ """
223
+ Represents a message within a
224
+ [thread](https://platform.openai.com/docs/api-reference/threads).
225
+ """
226
+
227
+ event: Literal["thread.message.in_progress"]
228
+
229
+
230
+ class ThreadMessageDelta(BaseModel):
231
+ data: MessageDeltaEvent
232
+ """Represents a message delta i.e.
233
+
234
+ any changed fields on a message during streaming.
235
+ """
236
+
237
+ event: Literal["thread.message.delta"]
238
+
239
+
240
+ class ThreadMessageCompleted(BaseModel):
241
+ data: Message
242
+ """
243
+ Represents a message within a
244
+ [thread](https://platform.openai.com/docs/api-reference/threads).
245
+ """
246
+
247
+ event: Literal["thread.message.completed"]
248
+
249
+
250
+ class ThreadMessageIncomplete(BaseModel):
251
+ data: Message
252
+ """
253
+ Represents a message within a
254
+ [thread](https://platform.openai.com/docs/api-reference/threads).
255
+ """
256
+
257
+ event: Literal["thread.message.incomplete"]
258
+
259
+
260
+ class ErrorEvent(BaseModel):
261
+ data: ErrorObject
262
+
263
+ event: Literal["error"]
264
+
265
+
266
+ AssistantStreamEvent: TypeAlias = Annotated[
267
+ Union[
268
+ ThreadCreated,
269
+ ThreadRunCreated,
270
+ ThreadRunQueued,
271
+ ThreadRunInProgress,
272
+ ThreadRunRequiresAction,
273
+ ThreadRunCompleted,
274
+ ThreadRunIncomplete,
275
+ ThreadRunFailed,
276
+ ThreadRunCancelling,
277
+ ThreadRunCancelled,
278
+ ThreadRunExpired,
279
+ ThreadRunStepCreated,
280
+ ThreadRunStepInProgress,
281
+ ThreadRunStepDelta,
282
+ ThreadRunStepCompleted,
283
+ ThreadRunStepFailed,
284
+ ThreadRunStepCancelled,
285
+ ThreadRunStepExpired,
286
+ ThreadMessageCreated,
287
+ ThreadMessageInProgress,
288
+ ThreadMessageDelta,
289
+ ThreadMessageCompleted,
290
+ ThreadMessageIncomplete,
291
+ ErrorEvent,
292
+ ],
293
+ PropertyInfo(discriminator="event"),
294
+ ]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union
4
+ from typing_extensions import Annotated, TypeAlias
5
+
6
+ from ..._utils import PropertyInfo
7
+ from .function_tool import FunctionTool
8
+ from .file_search_tool import FileSearchTool
9
+ from .code_interpreter_tool import CodeInterpreterTool
10
+
11
+ __all__ = ["AssistantTool"]
12
+
13
+ AssistantTool: TypeAlias = Annotated[
14
+ Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type")
15
+ ]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+ from typing_extensions import Literal
5
+
6
+ from ..._models import BaseModel
7
+ from .assistant_tool_choice_function import AssistantToolChoiceFunction
8
+
9
+ __all__ = ["AssistantToolChoice"]
10
+
11
+
12
+ class AssistantToolChoice(BaseModel):
13
+ type: Literal["function", "code_interpreter", "file_search"]
14
+ """The type of the tool. If type is `function`, the function name must be set"""
15
+
16
+ function: Optional[AssistantToolChoiceFunction] = None
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from ..._models import BaseModel
5
+
6
+ __all__ = ["AssistantToolChoiceFunction"]
7
+
8
+
9
+ class AssistantToolChoiceFunction(BaseModel):
10
+ name: str
11
+ """The name of the function to call."""
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function_param.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+ __all__ = ["AssistantToolChoiceFunctionParam"]
8
+
9
+
10
+ class AssistantToolChoiceFunctionParam(TypedDict, total=False):
11
+ name: Required[str]
12
+ """The name of the function to call."""
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union
4
+ from typing_extensions import Literal, TypeAlias
5
+
6
+ from .assistant_tool_choice import AssistantToolChoice
7
+
8
+ __all__ = ["AssistantToolChoiceOption"]
9
+
10
+ AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option_param.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import Literal, TypeAlias
7
+
8
+ from .assistant_tool_choice_param import AssistantToolChoiceParam
9
+
10
+ __all__ = ["AssistantToolChoiceOptionParam"]
11
+
12
+ AssistantToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_param.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam
8
+
9
+ __all__ = ["AssistantToolChoiceParam"]
10
+
11
+
12
+ class AssistantToolChoiceParam(TypedDict, total=False):
13
+ type: Required[Literal["function", "code_interpreter", "file_search"]]
14
+ """The type of the tool. If type is `function`, the function name must be set"""
15
+
16
+ function: AssistantToolChoiceFunctionParam
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_param.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import TypeAlias
7
+
8
+ from .function_tool_param import FunctionToolParam
9
+ from .file_search_tool_param import FileSearchToolParam
10
+ from .code_interpreter_tool_param import CodeInterpreterToolParam
11
+
12
+ __all__ = ["AssistantToolParam"]
13
+
14
+ AssistantToolParam: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
.venv/lib/python3.11/site-packages/openai/types/beta/assistant_update_params.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List, Iterable, Optional
6
+ from typing_extensions import TypedDict
7
+
8
+ from .assistant_tool_param import AssistantToolParam
9
+ from ..shared_params.metadata import Metadata
10
+ from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
11
+
12
+ __all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
13
+
14
+
15
+ class AssistantUpdateParams(TypedDict, total=False):
16
+ description: Optional[str]
17
+ """The description of the assistant. The maximum length is 512 characters."""
18
+
19
+ instructions: Optional[str]
20
+ """The system instructions that the assistant uses.
21
+
22
+ The maximum length is 256,000 characters.
23
+ """
24
+
25
+ metadata: Optional[Metadata]
26
+ """Set of 16 key-value pairs that can be attached to an object.
27
+
28
+ This can be useful for storing additional information about the object in a
29
+ structured format, and querying for objects via API or the dashboard.
30
+
31
+ Keys are strings with a maximum length of 64 characters. Values are strings with
32
+ a maximum length of 512 characters.
33
+ """
34
+
35
+ model: str
36
+ """ID of the model to use.
37
+
38
+ You can use the
39
+ [List models](https://platform.openai.com/docs/api-reference/models/list) API to
40
+ see all of your available models, or see our
41
+ [Model overview](https://platform.openai.com/docs/models) for descriptions of
42
+ them.
43
+ """
44
+
45
+ name: Optional[str]
46
+ """The name of the assistant. The maximum length is 256 characters."""
47
+
48
+ response_format: Optional[AssistantResponseFormatOptionParam]
49
+ """Specifies the format that the model must output.
50
+
51
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
52
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
53
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
54
+
55
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
56
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
57
+ in the
58
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
59
+
60
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
61
+ message the model generates is valid JSON.
62
+
63
+ **Important:** when using JSON mode, you **must** also instruct the model to
64
+ produce JSON yourself via a system or user message. Without this, the model may
65
+ generate an unending stream of whitespace until the generation reaches the token
66
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
67
+ the message content may be partially cut off if `finish_reason="length"`, which
68
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
69
+ max context length.
70
+ """
71
+
72
+ temperature: Optional[float]
73
+ """What sampling temperature to use, between 0 and 2.
74
+
75
+ Higher values like 0.8 will make the output more random, while lower values like
76
+ 0.2 will make it more focused and deterministic.
77
+ """
78
+
79
+ tool_resources: Optional[ToolResources]
80
+ """A set of resources that are used by the assistant's tools.
81
+
82
+ The resources are specific to the type of tool. For example, the
83
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
84
+ tool requires a list of vector store IDs.
85
+ """
86
+
87
+ tools: Iterable[AssistantToolParam]
88
+ """A list of tool enabled on the assistant.
89
+
90
+ There can be a maximum of 128 tools per assistant. Tools can be of types
91
+ `code_interpreter`, `file_search`, or `function`.
92
+ """
93
+
94
+ top_p: Optional[float]
95
+ """
96
+ An alternative to sampling with temperature, called nucleus sampling, where the
97
+ model considers the results of the tokens with top_p probability mass. So 0.1
98
+ means only the tokens comprising the top 10% probability mass are considered.
99
+
100
+ We generally recommend altering this or temperature but not both.
101
+ """
102
+
103
+
104
+ class ToolResourcesCodeInterpreter(TypedDict, total=False):
105
+ file_ids: List[str]
106
+ """
107
+ Overrides the list of
108
+ [file](https://platform.openai.com/docs/api-reference/files) IDs made available
109
+ to the `code_interpreter` tool. There can be a maximum of 20 files associated
110
+ with the tool.
111
+ """
112
+
113
+
114
+ class ToolResourcesFileSearch(TypedDict, total=False):
115
+ vector_store_ids: List[str]
116
+ """
117
+ Overrides the
118
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
119
+ attached to this assistant. There can be a maximum of 1 vector store attached to
120
+ the assistant.
121
+ """
122
+
123
+
124
+ class ToolResources(TypedDict, total=False):
125
+ code_interpreter: ToolResourcesCodeInterpreter
126
+
127
+ file_search: ToolResourcesFileSearch
.venv/lib/python3.11/site-packages/openai/types/beta/auto_file_chunking_strategy_param.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["AutoFileChunkingStrategyParam"]
8
+
9
+
10
+ class AutoFileChunkingStrategyParam(TypedDict, total=False):
11
+ type: Required[Literal["auto"]]
12
+ """Always `auto`."""
.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from ..._models import BaseModel
6
+
7
+ __all__ = ["CodeInterpreterTool"]
8
+
9
+
10
+ class CodeInterpreterTool(BaseModel):
11
+ type: Literal["code_interpreter"]
12
+ """The type of tool being defined: `code_interpreter`"""
.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool_param.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["CodeInterpreterToolParam"]
8
+
9
+
10
+ class CodeInterpreterToolParam(TypedDict, total=False):
11
+ type: Required[Literal["code_interpreter"]]
12
+ """The type of tool being defined: `code_interpreter`"""
.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union
4
+ from typing_extensions import Annotated, TypeAlias
5
+
6
+ from ..._utils import PropertyInfo
7
+ from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject
8
+ from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject
9
+
10
+ __all__ = ["FileChunkingStrategy"]
11
+
12
+ FileChunkingStrategy: TypeAlias = Annotated[
13
+ Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type")
14
+ ]
.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy_param.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import TypeAlias
7
+
8
+ from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam
9
+ from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam
10
+
11
+ __all__ = ["FileChunkingStrategyParam"]
12
+
13
+ FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam]
.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+ from typing_extensions import Literal
5
+
6
+ from ..._models import BaseModel
7
+
8
+ __all__ = ["FileSearchTool", "FileSearch", "FileSearchRankingOptions"]
9
+
10
+
11
+ class FileSearchRankingOptions(BaseModel):
12
+ score_threshold: float
13
+ """The score threshold for the file search.
14
+
15
+ All values must be a floating point number between 0 and 1.
16
+ """
17
+
18
+ ranker: Optional[Literal["auto", "default_2024_08_21"]] = None
19
+ """The ranker to use for the file search.
20
+
21
+ If not specified will use the `auto` ranker.
22
+ """
23
+
24
+
25
+ class FileSearch(BaseModel):
26
+ max_num_results: Optional[int] = None
27
+ """The maximum number of results the file search tool should output.
28
+
29
+ The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
30
+ should be between 1 and 50 inclusive.
31
+
32
+ Note that the file search tool may output fewer than `max_num_results` results.
33
+ See the
34
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
35
+ for more information.
36
+ """
37
+
38
+ ranking_options: Optional[FileSearchRankingOptions] = None
39
+ """The ranking options for the file search.
40
+
41
+ If not specified, the file search tool will use the `auto` ranker and a
42
+ score_threshold of 0.
43
+
44
+ See the
45
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
46
+ for more information.
47
+ """
48
+
49
+
50
+ class FileSearchTool(BaseModel):
51
+ type: Literal["file_search"]
52
+ """The type of tool being defined: `file_search`"""
53
+
54
+ file_search: Optional[FileSearch] = None
55
+ """Overrides for the file search tool."""
.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool_param.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["FileSearchToolParam", "FileSearch", "FileSearchRankingOptions"]
8
+
9
+
10
+ class FileSearchRankingOptions(TypedDict, total=False):
11
+ score_threshold: Required[float]
12
+ """The score threshold for the file search.
13
+
14
+ All values must be a floating point number between 0 and 1.
15
+ """
16
+
17
+ ranker: Literal["auto", "default_2024_08_21"]
18
+ """The ranker to use for the file search.
19
+
20
+ If not specified will use the `auto` ranker.
21
+ """
22
+
23
+
24
+ class FileSearch(TypedDict, total=False):
25
+ max_num_results: int
26
+ """The maximum number of results the file search tool should output.
27
+
28
+ The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
29
+ should be between 1 and 50 inclusive.
30
+
31
+ Note that the file search tool may output fewer than `max_num_results` results.
32
+ See the
33
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
34
+ for more information.
35
+ """
36
+
37
+ ranking_options: FileSearchRankingOptions
38
+ """The ranking options for the file search.
39
+
40
+ If not specified, the file search tool will use the `auto` ranker and a
41
+ score_threshold of 0.
42
+
43
+ See the
44
+ [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
45
+ for more information.
46
+ """
47
+
48
+
49
+ class FileSearchToolParam(TypedDict, total=False):
50
+ type: Required[Literal["file_search"]]
51
+ """The type of tool being defined: `file_search`"""
52
+
53
+ file_search: FileSearch
54
+ """Overrides for the file search tool."""
.venv/lib/python3.11/site-packages/openai/types/beta/function_tool.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from ..._models import BaseModel
6
+ from ..shared.function_definition import FunctionDefinition
7
+
8
+ __all__ = ["FunctionTool"]
9
+
10
+
11
+ class FunctionTool(BaseModel):
12
+ function: FunctionDefinition
13
+
14
+ type: Literal["function"]
15
+ """The type of tool being defined: `function`"""
.venv/lib/python3.11/site-packages/openai/types/beta/function_tool_param.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ from ..shared_params.function_definition import FunctionDefinition
8
+
9
+ __all__ = ["FunctionToolParam"]
10
+
11
+
12
+ class FunctionToolParam(TypedDict, total=False):
13
+ function: Required[FunctionDefinition]
14
+
15
+ type: Required[Literal["function"]]
16
+ """The type of tool being defined: `function`"""
.venv/lib/python3.11/site-packages/openai/types/beta/other_file_chunking_strategy_object.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from ..._models import BaseModel
6
+
7
+ __all__ = ["OtherFileChunkingStrategyObject"]
8
+
9
+
10
+ class OtherFileChunkingStrategyObject(BaseModel):
11
+ type: Literal["other"]
12
+ """Always `other`."""
.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from ..._models import BaseModel
5
+
6
+ __all__ = ["StaticFileChunkingStrategy"]
7
+
8
+
9
+ class StaticFileChunkingStrategy(BaseModel):
10
+ chunk_overlap_tokens: int
11
+ """The number of tokens that overlap between chunks. The default value is `400`.
12
+
13
+ Note that the overlap must not exceed half of `max_chunk_size_tokens`.
14
+ """
15
+
16
+ max_chunk_size_tokens: int
17
+ """The maximum number of tokens in each chunk.
18
+
19
+ The default value is `800`. The minimum value is `100` and the maximum value is
20
+ `4096`.
21
+ """
.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from ..._models import BaseModel
6
+ from .static_file_chunking_strategy import StaticFileChunkingStrategy
7
+
8
+ __all__ = ["StaticFileChunkingStrategyObject"]
9
+
10
+
11
+ class StaticFileChunkingStrategyObject(BaseModel):
12
+ static: StaticFileChunkingStrategy
13
+
14
+ type: Literal["static"]
15
+ """Always `static`."""
.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object_param.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam
8
+
9
+ __all__ = ["StaticFileChunkingStrategyObjectParam"]
10
+
11
+
12
+ class StaticFileChunkingStrategyObjectParam(TypedDict, total=False):
13
+ static: Required[StaticFileChunkingStrategyParam]
14
+
15
+ type: Required[Literal["static"]]
16
+ """Always `static`."""
.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_param.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+ __all__ = ["StaticFileChunkingStrategyParam"]
8
+
9
+
10
+ class StaticFileChunkingStrategyParam(TypedDict, total=False):
11
+ chunk_overlap_tokens: Required[int]
12
+ """The number of tokens that overlap between chunks. The default value is `400`.
13
+
14
+ Note that the overlap must not exceed half of `max_chunk_size_tokens`.
15
+ """
16
+
17
+ max_chunk_size_tokens: Required[int]
18
+ """The maximum number of tokens in each chunk.
19
+
20
+ The default value is `800`. The minimum value is `100` and the maximum value is
21
+ `4096`.
22
+ """
.venv/lib/python3.11/site-packages/openai/types/beta/thread.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from typing_extensions import Literal
5
+
6
+ from ..._models import BaseModel
7
+ from ..shared.metadata import Metadata
8
+
9
+ __all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"]
10
+
11
+
12
+ class ToolResourcesCodeInterpreter(BaseModel):
13
+ file_ids: Optional[List[str]] = None
14
+ """
15
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
16
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
17
+ associated with the tool.
18
+ """
19
+
20
+
21
+ class ToolResourcesFileSearch(BaseModel):
22
+ vector_store_ids: Optional[List[str]] = None
23
+ """
24
+ The
25
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
26
+ attached to this thread. There can be a maximum of 1 vector store attached to
27
+ the thread.
28
+ """
29
+
30
+
31
+ class ToolResources(BaseModel):
32
+ code_interpreter: Optional[ToolResourcesCodeInterpreter] = None
33
+
34
+ file_search: Optional[ToolResourcesFileSearch] = None
35
+
36
+
37
+ class Thread(BaseModel):
38
+ id: str
39
+ """The identifier, which can be referenced in API endpoints."""
40
+
41
+ created_at: int
42
+ """The Unix timestamp (in seconds) for when the thread was created."""
43
+
44
+ metadata: Optional[Metadata] = None
45
+ """Set of 16 key-value pairs that can be attached to an object.
46
+
47
+ This can be useful for storing additional information about the object in a
48
+ structured format, and querying for objects via API or the dashboard.
49
+
50
+ Keys are strings with a maximum length of 64 characters. Values are strings with
51
+ a maximum length of 512 characters.
52
+ """
53
+
54
+ object: Literal["thread"]
55
+ """The object type, which is always `thread`."""
56
+
57
+ tool_resources: Optional[ToolResources] = None
58
+ """
59
+ A set of resources that are made available to the assistant's tools in this
60
+ thread. The resources are specific to the type of tool. For example, the
61
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
62
+ tool requires a list of vector store IDs.
63
+ """
.venv/lib/python3.11/site-packages/openai/types/beta/thread_create_and_run_params.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List, Union, Iterable, Optional
6
+ from typing_extensions import Literal, Required, TypeAlias, TypedDict
7
+
8
+ from ..chat_model import ChatModel
9
+ from .function_tool_param import FunctionToolParam
10
+ from .file_search_tool_param import FileSearchToolParam
11
+ from ..shared_params.metadata import Metadata
12
+ from .code_interpreter_tool_param import CodeInterpreterToolParam
13
+ from .file_chunking_strategy_param import FileChunkingStrategyParam
14
+ from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
15
+ from .threads.message_content_part_param import MessageContentPartParam
16
+ from .assistant_response_format_option_param import AssistantResponseFormatOptionParam
17
+
18
+ __all__ = [
19
+ "ThreadCreateAndRunParamsBase",
20
+ "Thread",
21
+ "ThreadMessage",
22
+ "ThreadMessageAttachment",
23
+ "ThreadMessageAttachmentTool",
24
+ "ThreadMessageAttachmentToolFileSearch",
25
+ "ThreadToolResources",
26
+ "ThreadToolResourcesCodeInterpreter",
27
+ "ThreadToolResourcesFileSearch",
28
+ "ThreadToolResourcesFileSearchVectorStore",
29
+ "ToolResources",
30
+ "ToolResourcesCodeInterpreter",
31
+ "ToolResourcesFileSearch",
32
+ "Tool",
33
+ "TruncationStrategy",
34
+ "ThreadCreateAndRunParamsNonStreaming",
35
+ "ThreadCreateAndRunParamsStreaming",
36
+ ]
37
+
38
+
39
+ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
40
+ assistant_id: Required[str]
41
+ """
42
+ The ID of the
43
+ [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
44
+ execute this run.
45
+ """
46
+
47
+ instructions: Optional[str]
48
+ """Override the default system message of the assistant.
49
+
50
+ This is useful for modifying the behavior on a per-run basis.
51
+ """
52
+
53
+ max_completion_tokens: Optional[int]
54
+ """
55
+ The maximum number of completion tokens that may be used over the course of the
56
+ run. The run will make a best effort to use only the number of completion tokens
57
+ specified, across multiple turns of the run. If the run exceeds the number of
58
+ completion tokens specified, the run will end with status `incomplete`. See
59
+ `incomplete_details` for more info.
60
+ """
61
+
62
+ max_prompt_tokens: Optional[int]
63
+ """The maximum number of prompt tokens that may be used over the course of the run.
64
+
65
+ The run will make a best effort to use only the number of prompt tokens
66
+ specified, across multiple turns of the run. If the run exceeds the number of
67
+ prompt tokens specified, the run will end with status `incomplete`. See
68
+ `incomplete_details` for more info.
69
+ """
70
+
71
+ metadata: Optional[Metadata]
72
+ """Set of 16 key-value pairs that can be attached to an object.
73
+
74
+ This can be useful for storing additional information about the object in a
75
+ structured format, and querying for objects via API or the dashboard.
76
+
77
+ Keys are strings with a maximum length of 64 characters. Values are strings with
78
+ a maximum length of 512 characters.
79
+ """
80
+
81
+ model: Union[str, ChatModel, None]
82
+ """
83
+ The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
84
+ be used to execute this run. If a value is provided here, it will override the
85
+ model associated with the assistant. If not, the model associated with the
86
+ assistant will be used.
87
+ """
88
+
89
+ parallel_tool_calls: bool
90
+ """
91
+ Whether to enable
92
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
93
+ during tool use.
94
+ """
95
+
96
+ response_format: Optional[AssistantResponseFormatOptionParam]
97
+ """Specifies the format that the model must output.
98
+
99
+ Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
100
+ [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
101
+ and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
102
+
103
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
104
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
105
+ in the
106
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
107
+
108
+ Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
109
+ message the model generates is valid JSON.
110
+
111
+ **Important:** when using JSON mode, you **must** also instruct the model to
112
+ produce JSON yourself via a system or user message. Without this, the model may
113
+ generate an unending stream of whitespace until the generation reaches the token
114
+ limit, resulting in a long-running and seemingly "stuck" request. Also note that
115
+ the message content may be partially cut off if `finish_reason="length"`, which
116
+ indicates the generation exceeded `max_tokens` or the conversation exceeded the
117
+ max context length.
118
+ """
119
+
120
+ temperature: Optional[float]
121
+ """What sampling temperature to use, between 0 and 2.
122
+
123
+ Higher values like 0.8 will make the output more random, while lower values like
124
+ 0.2 will make it more focused and deterministic.
125
+ """
126
+
127
+ thread: Thread
128
+ """Options to create a new thread.
129
+
130
+ If no thread is provided when running a request, an empty thread will be
131
+ created.
132
+ """
133
+
134
+ tool_choice: Optional[AssistantToolChoiceOptionParam]
135
+ """
136
+ Controls which (if any) tool is called by the model. `none` means the model will
137
+ not call any tools and instead generates a message. `auto` is the default value
138
+ and means the model can pick between generating a message or calling one or more
139
+ tools. `required` means the model must call one or more tools before responding
140
+ to the user. Specifying a particular tool like `{"type": "file_search"}` or
141
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
142
+ call that tool.
143
+ """
144
+
145
+ tool_resources: Optional[ToolResources]
146
+ """A set of resources that are used by the assistant's tools.
147
+
148
+ The resources are specific to the type of tool. For example, the
149
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
150
+ tool requires a list of vector store IDs.
151
+ """
152
+
153
+ tools: Optional[Iterable[Tool]]
154
+ """Override the tools the assistant can use for this run.
155
+
156
+ This is useful for modifying the behavior on a per-run basis.
157
+ """
158
+
159
+ top_p: Optional[float]
160
+ """
161
+ An alternative to sampling with temperature, called nucleus sampling, where the
162
+ model considers the results of the tokens with top_p probability mass. So 0.1
163
+ means only the tokens comprising the top 10% probability mass are considered.
164
+
165
+ We generally recommend altering this or temperature but not both.
166
+ """
167
+
168
+ truncation_strategy: Optional[TruncationStrategy]
169
+ """Controls for how a thread will be truncated prior to the run.
170
+
171
+ Use this to control the intial context window of the run.
172
+ """
173
+
174
+
175
+ class ThreadMessageAttachmentToolFileSearch(TypedDict, total=False):
176
+ type: Required[Literal["file_search"]]
177
+ """The type of tool being defined: `file_search`"""
178
+
179
+
180
+ ThreadMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, ThreadMessageAttachmentToolFileSearch]
181
+
182
+
183
+ class ThreadMessageAttachment(TypedDict, total=False):
184
+ file_id: str
185
+ """The ID of the file to attach to the message."""
186
+
187
+ tools: Iterable[ThreadMessageAttachmentTool]
188
+ """The tools to add this file to."""
189
+
190
+
191
+ class ThreadMessage(TypedDict, total=False):
192
+ content: Required[Union[str, Iterable[MessageContentPartParam]]]
193
+ """The text contents of the message."""
194
+
195
+ role: Required[Literal["user", "assistant"]]
196
+ """The role of the entity that is creating the message. Allowed values include:
197
+
198
+ - `user`: Indicates the message is sent by an actual user and should be used in
199
+ most cases to represent user-generated messages.
200
+ - `assistant`: Indicates the message is generated by the assistant. Use this
201
+ value to insert messages from the assistant into the conversation.
202
+ """
203
+
204
+ attachments: Optional[Iterable[ThreadMessageAttachment]]
205
+ """A list of files attached to the message, and the tools they should be added to."""
206
+
207
+ metadata: Optional[Metadata]
208
+ """Set of 16 key-value pairs that can be attached to an object.
209
+
210
+ This can be useful for storing additional information about the object in a
211
+ structured format, and querying for objects via API or the dashboard.
212
+
213
+ Keys are strings with a maximum length of 64 characters. Values are strings with
214
+ a maximum length of 512 characters.
215
+ """
216
+
217
+
218
+ class ThreadToolResourcesCodeInterpreter(TypedDict, total=False):
219
+ file_ids: List[str]
220
+ """
221
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
222
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
223
+ associated with the tool.
224
+ """
225
+
226
+
227
+ class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False):
228
+ chunking_strategy: FileChunkingStrategyParam
229
+ """The chunking strategy used to chunk the file(s).
230
+
231
+ If not set, will use the `auto` strategy. Only applicable if `file_ids` is
232
+ non-empty.
233
+ """
234
+
235
+ file_ids: List[str]
236
+ """
237
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
238
+ add to the vector store. There can be a maximum of 10000 files in a vector
239
+ store.
240
+ """
241
+
242
+ metadata: Optional[Metadata]
243
+ """Set of 16 key-value pairs that can be attached to an object.
244
+
245
+ This can be useful for storing additional information about the object in a
246
+ structured format, and querying for objects via API or the dashboard.
247
+
248
+ Keys are strings with a maximum length of 64 characters. Values are strings with
249
+ a maximum length of 512 characters.
250
+ """
251
+
252
+
253
+ class ThreadToolResourcesFileSearch(TypedDict, total=False):
254
+ vector_store_ids: List[str]
255
+ """
256
+ The
257
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
258
+ attached to this thread. There can be a maximum of 1 vector store attached to
259
+ the thread.
260
+ """
261
+
262
+ vector_stores: Iterable[ThreadToolResourcesFileSearchVectorStore]
263
+ """
264
+ A helper to create a
265
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
266
+ with file_ids and attach it to this thread. There can be a maximum of 1 vector
267
+ store attached to the thread.
268
+ """
269
+
270
+
271
+ class ThreadToolResources(TypedDict, total=False):
272
+ code_interpreter: ThreadToolResourcesCodeInterpreter
273
+
274
+ file_search: ThreadToolResourcesFileSearch
275
+
276
+
277
+ class Thread(TypedDict, total=False):
278
+ messages: Iterable[ThreadMessage]
279
+ """
280
+ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
281
+ start the thread with.
282
+ """
283
+
284
+ metadata: Optional[Metadata]
285
+ """Set of 16 key-value pairs that can be attached to an object.
286
+
287
+ This can be useful for storing additional information about the object in a
288
+ structured format, and querying for objects via API or the dashboard.
289
+
290
+ Keys are strings with a maximum length of 64 characters. Values are strings with
291
+ a maximum length of 512 characters.
292
+ """
293
+
294
+ tool_resources: Optional[ThreadToolResources]
295
+ """
296
+ A set of resources that are made available to the assistant's tools in this
297
+ thread. The resources are specific to the type of tool. For example, the
298
+ `code_interpreter` tool requires a list of file IDs, while the `file_search`
299
+ tool requires a list of vector store IDs.
300
+ """
301
+
302
+
303
+ class ToolResourcesCodeInterpreter(TypedDict, total=False):
304
+ file_ids: List[str]
305
+ """
306
+ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
307
+ available to the `code_interpreter` tool. There can be a maximum of 20 files
308
+ associated with the tool.
309
+ """
310
+
311
+
312
+ class ToolResourcesFileSearch(TypedDict, total=False):
313
+ vector_store_ids: List[str]
314
+ """
315
+ The ID of the
316
+ [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
317
+ attached to this assistant. There can be a maximum of 1 vector store attached to
318
+ the assistant.
319
+ """
320
+
321
+
322
+ class ToolResources(TypedDict, total=False):
323
+ code_interpreter: ToolResourcesCodeInterpreter
324
+
325
+ file_search: ToolResourcesFileSearch
326
+
327
+
328
+ Tool: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
329
+
330
+
331
+ class TruncationStrategy(TypedDict, total=False):
332
+ type: Required[Literal["auto", "last_messages"]]
333
+ """The truncation strategy to use for the thread.
334
+
335
+ The default is `auto`. If set to `last_messages`, the thread will be truncated
336
+ to the n most recent messages in the thread. When set to `auto`, messages in the
337
+ middle of the thread will be dropped to fit the context length of the model,
338
+ `max_prompt_tokens`.
339
+ """
340
+
341
+ last_messages: Optional[int]
342
+ """
343
+ The number of most recent messages from the thread when constructing the context
344
+ for the run.
345
+ """
346
+
347
+
348
+ class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False):
349
+ stream: Optional[Literal[False]]
350
+ """
351
+ If `true`, returns a stream of events that happen during the Run as server-sent
352
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
353
+ message.
354
+ """
355
+
356
+
357
+ class ThreadCreateAndRunParamsStreaming(ThreadCreateAndRunParamsBase):
358
+ stream: Required[Literal[True]]
359
+ """
360
+ If `true`, returns a stream of events that happen during the Run as server-sent
361
+ events, terminating when the Run enters a terminal state with a `data: [DONE]`
362
+ message.
363
+ """
364
+
365
+
366
+ ThreadCreateAndRunParams = Union[ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming]