diff --git a/.gitattributes b/.gitattributes index 7a27c14e911120886cc3ff2a29b17fdcabdd98af..7abf9a54e71c696141e88d26a2689df12bfca4e4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -343,3 +343,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/ .venv/lib/python3.11/site-packages/distlib/w64.exe filter=lfs diff=lfs merge=lfs -text .venv/lib/python3.11/site-packages/distlib/t64.exe filter=lfs diff=lfs merge=lfs -text .venv/lib/python3.11/site-packages/distlib/t64-arm.exe filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/multidict/_multidict.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/.venv/lib/python3.11/site-packages/multidict/_multidict.cpython-311-x86_64-linux-gnu.so b/.venv/lib/python3.11/site-packages/multidict/_multidict.cpython-311-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b35695676129351589c9aa49c37674b7a27ded61 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/multidict/_multidict.cpython-311-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49419ffe4026850c619fceb5fa38826b47b9d68342aa5813871ea4dc83a0c2ab +size 410592 diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__init__.py b/.venv/lib/python3.11/site-packages/openai/types/audio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..822e0f3a8d239609862eef476c8e27e64238bce6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/audio/__init__.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .translation import Translation as Translation +from .speech_model import SpeechModel as SpeechModel +from .transcription import Transcription as Transcription +from .transcription_word import TranscriptionWord as TranscriptionWord +from .translation_verbose import TranslationVerbose as TranslationVerbose +from .speech_create_params import SpeechCreateParams as SpeechCreateParams +from .transcription_segment import TranscriptionSegment as TranscriptionSegment +from .transcription_verbose import TranscriptionVerbose as TranscriptionVerbose +from .translation_create_params import TranslationCreateParams as TranslationCreateParams +from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams +from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse +from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bebb6de1d41adc11aa19a2554e2352572048272f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_create_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_create_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f807f83df40bc5f69c9a6f30c846ab68424d12d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_create_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_model.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_model.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed0486a2725fcb851ec3706ac8b8202e1d53f25a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/speech_model.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8659e62a16fd87ef60fb68f7fb6ac4136b68261 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..154a5dcdbafe7432ccb71bdaf4c7401a61494c1f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_response.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_response.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b4fbb6441db2419367c84bf564ebdec8c4ca89b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_create_response.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_segment.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_segment.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97689a73d3091750982d2e111dc5a3b932531fb3 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_segment.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_verbose.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_verbose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6783c30bdbb94c44eb6ecee7e6b70f682923a6a0 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_verbose.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_word.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_word.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efe31fe51c772e9d7b97bff06aea45b0a6f557f3 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/transcription_word.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..306e02daad2db7302175da7e1dc873ccedbb9b68 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b992a30f1308220194e0ef33be71a5f700fd7bb4 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_response.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_response.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6e97bfa80eb78248125da936355cd193dad286e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_create_response.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_verbose.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_verbose.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..767a68a19817af7521e78eaf30e4340f1e05d81f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/audio/__pycache__/translation_verbose.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/audio/transcription_create_response.py b/.venv/lib/python3.11/site-packages/openai/types/audio/transcription_create_response.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7bed8114cec0f799078b7932187e131db5e6f7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/audio/transcription_create_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import TypeAlias + +from .transcription import Transcription +from .transcription_verbose import TranscriptionVerbose + +__all__ = ["TranscriptionCreateResponse"] + +TranscriptionCreateResponse: TypeAlias = Union[Transcription, TranscriptionVerbose] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/__init__.py b/.venv/lib/python3.11/site-packages/openai/types/beta/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b9ea792bfa1e466f10b44b0e2796bc7a06db5498 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/__init__.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .thread import Thread as Thread +from .assistant import Assistant as Assistant +from .vector_store import VectorStore as VectorStore +from .function_tool import FunctionTool as FunctionTool +from .assistant_tool import AssistantTool as AssistantTool +from .thread_deleted import ThreadDeleted as ThreadDeleted +from .file_search_tool import FileSearchTool as FileSearchTool +from .assistant_deleted import AssistantDeleted as AssistantDeleted +from .function_tool_param import FunctionToolParam as FunctionToolParam +from .assistant_tool_param import AssistantToolParam as AssistantToolParam +from .thread_create_params import ThreadCreateParams as ThreadCreateParams +from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams +from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted +from .assistant_list_params import AssistantListParams as AssistantListParams +from .assistant_tool_choice import AssistantToolChoice as AssistantToolChoice +from .code_interpreter_tool import CodeInterpreterTool as CodeInterpreterTool +from .assistant_stream_event import AssistantStreamEvent as AssistantStreamEvent +from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy +from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam +from .assistant_create_params import AssistantCreateParams as AssistantCreateParams +from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams +from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams +from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams +from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams +from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam +from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam +from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption +from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam +from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams +from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy +from .assistant_tool_choice_function import AssistantToolChoiceFunction as AssistantToolChoiceFunction +from .assistant_response_format_option import AssistantResponseFormatOption as AssistantResponseFormatOption +from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam +from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam as AssistantToolChoiceOptionParam +from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam +from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam as AssistantToolChoiceFunctionParam +from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject +from .assistant_response_format_option_param import ( + AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam, +) +from .static_file_chunking_strategy_object_param import ( + StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, +) diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant.py new file mode 100644 index 0000000000000000000000000000000000000000..58421e0f661b14922655353b91d4569fd97fe9d4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .assistant_tool import AssistantTool +from ..shared.metadata import Metadata +from .assistant_response_format_option import AssistantResponseFormatOption + +__all__ = ["Assistant", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] + + +class ToolResourcesCodeInterpreter(BaseModel): + file_ids: Optional[List[str]] = None + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + available to the `code_interpreter`` tool. There can be a maximum of 20 files + associated with the tool. + """ + + +class ToolResourcesFileSearch(BaseModel): + vector_store_ids: Optional[List[str]] = None + """ + The ID of the + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this assistant. There can be a maximum of 1 vector store attached to + the assistant. + """ + + +class ToolResources(BaseModel): + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None + + file_search: Optional[ToolResourcesFileSearch] = None + + +class Assistant(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the assistant was created.""" + + description: Optional[str] = None + """The description of the assistant. The maximum length is 512 characters.""" + + instructions: Optional[str] = None + """The system instructions that the assistant uses. + + The maximum length is 256,000 characters. + """ + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: str + """ID of the model to use. + + You can use the + [List models](https://platform.openai.com/docs/api-reference/models/list) API to + see all of your available models, or see our + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. + """ + + name: Optional[str] = None + """The name of the assistant. The maximum length is 256 characters.""" + + object: Literal["assistant"] + """The object type, which is always `assistant`.""" + + tools: List[AssistantTool] + """A list of tool enabled on the assistant. + + There can be a maximum of 128 tools per assistant. Tools can be of types + `code_interpreter`, `file_search`, or `function`. + """ + + response_format: Optional[AssistantResponseFormatOption] = None + """Specifies the format that the model must output. + + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + temperature: Optional[float] = None + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + tool_resources: Optional[ToolResources] = None + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + top_p: Optional[float] = None + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_create_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_create_params.py new file mode 100644 index 0000000000000000000000000000000000000000..e205856395bf88d48804381b024a6cbf67f1c854 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_create_params.py @@ -0,0 +1,168 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Required, TypedDict + +from ..chat_model import ChatModel +from .assistant_tool_param import AssistantToolParam +from ..shared_params.metadata import Metadata +from .file_chunking_strategy_param import FileChunkingStrategyParam +from .assistant_response_format_option_param import AssistantResponseFormatOptionParam + +__all__ = [ + "AssistantCreateParams", + "ToolResources", + "ToolResourcesCodeInterpreter", + "ToolResourcesFileSearch", + "ToolResourcesFileSearchVectorStore", +] + + +class AssistantCreateParams(TypedDict, total=False): + model: Required[Union[str, ChatModel]] + """ID of the model to use. + + You can use the + [List models](https://platform.openai.com/docs/api-reference/models/list) API to + see all of your available models, or see our + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. + """ + + description: Optional[str] + """The description of the assistant. The maximum length is 512 characters.""" + + instructions: Optional[str] + """The system instructions that the assistant uses. + + The maximum length is 256,000 characters. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: Optional[str] + """The name of the assistant. The maximum length is 256 characters.""" + + response_format: Optional[AssistantResponseFormatOptionParam] + """Specifies the format that the model must output. + + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + tool_resources: Optional[ToolResources] + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + tools: Iterable[AssistantToolParam] + """A list of tool enabled on the assistant. + + There can be a maximum of 128 tools per assistant. Tools can be of types + `code_interpreter`, `file_search`, or `function`. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a maximum of 20 files + associated with the tool. + """ + + +class ToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: FileChunkingStrategyParam + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. + """ + + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + add to the vector store. There can be a maximum of 10000 files in a vector + store. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this assistant. There can be a maximum of 1 vector store attached to + the assistant. + """ + + vector_stores: Iterable[ToolResourcesFileSearchVectorStore] + """ + A helper to create a + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + with file_ids and attach it to this assistant. There can be a maximum of 1 + vector store attached to the assistant. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_deleted.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_deleted.py new file mode 100644 index 0000000000000000000000000000000000000000..3be40cd6b8f088ff674c7f7d0f35995d1cab39eb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_deleted.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["AssistantDeleted"] + + +class AssistantDeleted(BaseModel): + id: str + + deleted: bool + + object: Literal["assistant.deleted"] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_list_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_list_params.py new file mode 100644 index 0000000000000000000000000000000000000000..834ffbcaf8fd7ffa4b8f4edb3786711ce3ca6e28 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_list_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["AssistantListParams"] + + +class AssistantListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option.py new file mode 100644 index 0000000000000000000000000000000000000000..6f06a3442fbd6294d1aca77800b009e627304b0c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from ..shared.response_format_text import ResponseFormatText +from ..shared.response_format_json_object import ResponseFormatJSONObject +from ..shared.response_format_json_schema import ResponseFormatJSONSchema + +__all__ = ["AssistantResponseFormatOption"] + +AssistantResponseFormatOption: TypeAlias = Union[ + Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option_param.py new file mode 100644 index 0000000000000000000000000000000000000000..5e724a4d9851c49e8b9cf0fd4ed1536a2f057698 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_response_format_option_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from ..shared_params.response_format_text import ResponseFormatText +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema + +__all__ = ["AssistantResponseFormatOptionParam"] + +AssistantResponseFormatOptionParam: TypeAlias = Union[ + Literal["auto"], ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_stream_event.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_stream_event.py new file mode 100644 index 0000000000000000000000000000000000000000..41d3a0c5eacea0f63fc9669878fbc797e09c2bb4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_stream_event.py @@ -0,0 +1,294 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from .thread import Thread +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .threads.run import Run +from .threads.message import Message +from ..shared.error_object import ErrorObject +from .threads.runs.run_step import RunStep +from .threads.message_delta_event import MessageDeltaEvent +from .threads.runs.run_step_delta_event import RunStepDeltaEvent + +__all__ = [ + "AssistantStreamEvent", + "ThreadCreated", + "ThreadRunCreated", + "ThreadRunQueued", + "ThreadRunInProgress", + "ThreadRunRequiresAction", + "ThreadRunCompleted", + "ThreadRunIncomplete", + "ThreadRunFailed", + "ThreadRunCancelling", + "ThreadRunCancelled", + "ThreadRunExpired", + "ThreadRunStepCreated", + "ThreadRunStepInProgress", + "ThreadRunStepDelta", + "ThreadRunStepCompleted", + "ThreadRunStepFailed", + "ThreadRunStepCancelled", + "ThreadRunStepExpired", + "ThreadMessageCreated", + "ThreadMessageInProgress", + "ThreadMessageDelta", + "ThreadMessageCompleted", + "ThreadMessageIncomplete", + "ErrorEvent", +] + + +class ThreadCreated(BaseModel): + data: Thread + """ + Represents a thread that contains + [messages](https://platform.openai.com/docs/api-reference/messages). + """ + + event: Literal["thread.created"] + + enabled: Optional[bool] = None + """Whether to enable input audio transcription.""" + + +class ThreadRunCreated(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.created"] + + +class ThreadRunQueued(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.queued"] + + +class ThreadRunInProgress(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.in_progress"] + + +class ThreadRunRequiresAction(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.requires_action"] + + +class ThreadRunCompleted(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.completed"] + + +class ThreadRunIncomplete(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.incomplete"] + + +class ThreadRunFailed(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.failed"] + + +class ThreadRunCancelling(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.cancelling"] + + +class ThreadRunCancelled(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.cancelled"] + + +class ThreadRunExpired(BaseModel): + data: Run + """ + Represents an execution run on a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.run.expired"] + + +class ThreadRunStepCreated(BaseModel): + data: RunStep + """Represents a step in execution of a run.""" + + event: Literal["thread.run.step.created"] + + +class ThreadRunStepInProgress(BaseModel): + data: RunStep + """Represents a step in execution of a run.""" + + event: Literal["thread.run.step.in_progress"] + + +class ThreadRunStepDelta(BaseModel): + data: RunStepDeltaEvent + """Represents a run step delta i.e. + + any changed fields on a run step during streaming. + """ + + event: Literal["thread.run.step.delta"] + + +class ThreadRunStepCompleted(BaseModel): + data: RunStep + """Represents a step in execution of a run.""" + + event: Literal["thread.run.step.completed"] + + +class ThreadRunStepFailed(BaseModel): + data: RunStep + """Represents a step in execution of a run.""" + + event: Literal["thread.run.step.failed"] + + +class ThreadRunStepCancelled(BaseModel): + data: RunStep + """Represents a step in execution of a run.""" + + event: Literal["thread.run.step.cancelled"] + + +class ThreadRunStepExpired(BaseModel): + data: RunStep + """Represents a step in execution of a run.""" + + event: Literal["thread.run.step.expired"] + + +class ThreadMessageCreated(BaseModel): + data: Message + """ + Represents a message within a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.message.created"] + + +class ThreadMessageInProgress(BaseModel): + data: Message + """ + Represents a message within a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.message.in_progress"] + + +class ThreadMessageDelta(BaseModel): + data: MessageDeltaEvent + """Represents a message delta i.e. + + any changed fields on a message during streaming. + """ + + event: Literal["thread.message.delta"] + + +class ThreadMessageCompleted(BaseModel): + data: Message + """ + Represents a message within a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.message.completed"] + + +class ThreadMessageIncomplete(BaseModel): + data: Message + """ + Represents a message within a + [thread](https://platform.openai.com/docs/api-reference/threads). + """ + + event: Literal["thread.message.incomplete"] + + +class ErrorEvent(BaseModel): + data: ErrorObject + + event: Literal["error"] + + +AssistantStreamEvent: TypeAlias = Annotated[ + Union[ + ThreadCreated, + ThreadRunCreated, + ThreadRunQueued, + ThreadRunInProgress, + ThreadRunRequiresAction, + ThreadRunCompleted, + ThreadRunIncomplete, + ThreadRunFailed, + ThreadRunCancelling, + ThreadRunCancelled, + ThreadRunExpired, + ThreadRunStepCreated, + ThreadRunStepInProgress, + ThreadRunStepDelta, + ThreadRunStepCompleted, + ThreadRunStepFailed, + ThreadRunStepCancelled, + ThreadRunStepExpired, + ThreadMessageCreated, + ThreadMessageInProgress, + ThreadMessageDelta, + ThreadMessageCompleted, + ThreadMessageIncomplete, + ErrorEvent, + ], + PropertyInfo(discriminator="event"), +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..1bde6858b1a6d401297ab8f5c19ea0343fb34332 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .function_tool import FunctionTool +from .file_search_tool import FileSearchTool +from .code_interpreter_tool import CodeInterpreterTool + +__all__ = ["AssistantTool"] + +AssistantTool: TypeAlias = Annotated[ + Union[CodeInterpreterTool, FileSearchTool, FunctionTool], PropertyInfo(discriminator="type") +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice.py new file mode 100644 index 0000000000000000000000000000000000000000..d73439f006d6259b71e0970083c2d377892c1a43 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .assistant_tool_choice_function import AssistantToolChoiceFunction + +__all__ = ["AssistantToolChoice"] + + +class AssistantToolChoice(BaseModel): + type: Literal["function", "code_interpreter", "file_search"] + """The type of the tool. If type is `function`, the function name must be set""" + + function: Optional[AssistantToolChoiceFunction] = None diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function.py new file mode 100644 index 0000000000000000000000000000000000000000..0c896d8087a5a4d72a64a9eaaf5bb23746d9c53c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from ..._models import BaseModel + +__all__ = ["AssistantToolChoiceFunction"] + + +class AssistantToolChoiceFunction(BaseModel): + name: str + """The name of the function to call.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function_param.py new file mode 100644 index 0000000000000000000000000000000000000000..428857de91543e81ed4f62bffefd3b9fc6dca057 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_function_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["AssistantToolChoiceFunctionParam"] + + +class AssistantToolChoiceFunctionParam(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option.py new file mode 100644 index 0000000000000000000000000000000000000000..e57c3278fb5ec720e27dc7da84b44892404790db --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from .assistant_tool_choice import AssistantToolChoice + +__all__ = ["AssistantToolChoiceOption"] + +AssistantToolChoiceOption: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoice] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option_param.py new file mode 100644 index 0000000000000000000000000000000000000000..cc0053d37e9b1159844c22767d7d780e02ef58b0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_option_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from .assistant_tool_choice_param import AssistantToolChoiceParam + +__all__ = ["AssistantToolChoiceOptionParam"] + +AssistantToolChoiceOptionParam: TypeAlias = Union[Literal["none", "auto", "required"], AssistantToolChoiceParam] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_param.py new file mode 100644 index 0000000000000000000000000000000000000000..904f489e2606168ba8eec96571cfc4e29a558b11 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_choice_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .assistant_tool_choice_function_param import AssistantToolChoiceFunctionParam + +__all__ = ["AssistantToolChoiceParam"] + + +class AssistantToolChoiceParam(TypedDict, total=False): + type: Required[Literal["function", "code_interpreter", "file_search"]] + """The type of the tool. If type is `function`, the function name must be set""" + + function: AssistantToolChoiceFunctionParam diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_param.py new file mode 100644 index 0000000000000000000000000000000000000000..321c4b1ddbe9b11232ba3ec88ace26644fd1b0e4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_tool_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .function_tool_param import FunctionToolParam +from .file_search_tool_param import FileSearchToolParam +from .code_interpreter_tool_param import CodeInterpreterToolParam + +__all__ = ["AssistantToolParam"] + +AssistantToolParam: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_update_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_update_params.py new file mode 100644 index 0000000000000000000000000000000000000000..35065ef61b17ed5a3bfb493076ef9defe0335c5f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/assistant_update_params.py @@ -0,0 +1,127 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable, Optional +from typing_extensions import TypedDict + +from .assistant_tool_param import AssistantToolParam +from ..shared_params.metadata import Metadata +from .assistant_response_format_option_param import AssistantResponseFormatOptionParam + +__all__ = ["AssistantUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] + + +class AssistantUpdateParams(TypedDict, total=False): + description: Optional[str] + """The description of the assistant. The maximum length is 512 characters.""" + + instructions: Optional[str] + """The system instructions that the assistant uses. + + The maximum length is 256,000 characters. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: str + """ID of the model to use. + + You can use the + [List models](https://platform.openai.com/docs/api-reference/models/list) API to + see all of your available models, or see our + [Model overview](https://platform.openai.com/docs/models) for descriptions of + them. + """ + + name: Optional[str] + """The name of the assistant. The maximum length is 256 characters.""" + + response_format: Optional[AssistantResponseFormatOptionParam] + """Specifies the format that the model must output. + + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + tool_resources: Optional[ToolResources] + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + tools: Iterable[AssistantToolParam] + """A list of tool enabled on the assistant. + + There can be a maximum of 128 tools per assistant. Tools can be of types + `code_interpreter`, `file_search`, or `function`. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + Overrides the list of + [file](https://platform.openai.com/docs/api-reference/files) IDs made available + to the `code_interpreter` tool. There can be a maximum of 20 files associated + with the tool. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + Overrides the + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this assistant. There can be a maximum of 1 vector store attached to + the assistant. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/auto_file_chunking_strategy_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/auto_file_chunking_strategy_param.py new file mode 100644 index 0000000000000000000000000000000000000000..6f17836bac55fea835ecb284fbc5a4414ee16d01 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/auto_file_chunking_strategy_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AutoFileChunkingStrategyParam"] + + +class AutoFileChunkingStrategyParam(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool.py b/.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..17ab3de629fd2ffe2f638338a0d8a5bc66a8b561 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["CodeInterpreterTool"] + + +class CodeInterpreterTool(BaseModel): + type: Literal["code_interpreter"] + """The type of tool being defined: `code_interpreter`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool_param.py new file mode 100644 index 0000000000000000000000000000000000000000..4f6916d7565f2066c75004f59058799f6fef49b6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/code_interpreter_tool_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["CodeInterpreterToolParam"] + + +class CodeInterpreterToolParam(TypedDict, total=False): + type: Required[Literal["code_interpreter"]] + """The type of tool being defined: `code_interpreter`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy.py b/.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..406d69dd0eb02cf28c39b91d69a0a8a7c35aa2da --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject +from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject + +__all__ = ["FileChunkingStrategy"] + +FileChunkingStrategy: TypeAlias = Annotated[ + Union[StaticFileChunkingStrategyObject, OtherFileChunkingStrategyObject], PropertyInfo(discriminator="type") +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy_param.py new file mode 100644 index 0000000000000000000000000000000000000000..25d94286d8bc54293365772cfc68642f27c4f6a1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/file_chunking_strategy_param.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam +from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam + +__all__ = ["FileChunkingStrategyParam"] + +FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool.py b/.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..89fc16c04ca3c6f9d6fd177cf7c8ec3f4ff4e6a4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FileSearchTool", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(BaseModel): + score_threshold: float + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + ranker: Optional[Literal["auto", "default_2024_08_21"]] = None + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ + + +class FileSearch(BaseModel): + max_num_results: Optional[int] = None + """The maximum number of results the file search tool should output. + + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + ranking_options: Optional[FileSearchRankingOptions] = None + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + +class FileSearchTool(BaseModel): + type: Literal["file_search"] + """The type of tool being defined: `file_search`""" + + file_search: Optional[FileSearch] = None + """Overrides for the file search tool.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool_param.py new file mode 100644 index 0000000000000000000000000000000000000000..c73d0af79dd4173aa24c01d74fc6ff5691245d8a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/file_search_tool_param.py @@ -0,0 +1,54 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["FileSearchToolParam", "FileSearch", "FileSearchRankingOptions"] + + +class FileSearchRankingOptions(TypedDict, total=False): + score_threshold: Required[float] + """The score threshold for the file search. + + All values must be a floating point number between 0 and 1. + """ + + ranker: Literal["auto", "default_2024_08_21"] + """The ranker to use for the file search. + + If not specified will use the `auto` ranker. + """ + + +class FileSearch(TypedDict, total=False): + max_num_results: int + """The maximum number of results the file search tool should output. + + The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number + should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than `max_num_results` results. + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + ranking_options: FileSearchRankingOptions + """The ranking options for the file search. + + If not specified, the file search tool will use the `auto` ranker and a + score_threshold of 0. + + See the + [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + for more information. + """ + + +class FileSearchToolParam(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + file_search: FileSearch + """Overrides for the file search tool.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/function_tool.py b/.venv/lib/python3.11/site-packages/openai/types/beta/function_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..f9227678dfd94a29988a3c4cda66e85c0cdc356a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/function_tool.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.function_definition import FunctionDefinition + +__all__ = ["FunctionTool"] + + +class FunctionTool(BaseModel): + function: FunctionDefinition + + type: Literal["function"] + """The type of tool being defined: `function`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/function_tool_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/function_tool_param.py new file mode 100644 index 0000000000000000000000000000000000000000..d906e02b8851941ff4ea38943a2c6ba7d3f228af --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/function_tool_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.function_definition import FunctionDefinition + +__all__ = ["FunctionToolParam"] + + +class FunctionToolParam(TypedDict, total=False): + function: Required[FunctionDefinition] + + type: Required[Literal["function"]] + """The type of tool being defined: `function`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/other_file_chunking_strategy_object.py b/.venv/lib/python3.11/site-packages/openai/types/beta/other_file_chunking_strategy_object.py new file mode 100644 index 0000000000000000000000000000000000000000..89da560be4c42f559c38a8ded6f94fc9bd1bbfda --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/other_file_chunking_strategy_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["OtherFileChunkingStrategyObject"] + + +class OtherFileChunkingStrategyObject(BaseModel): + type: Literal["other"] + """Always `other`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy.py b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..60800935174562e5f27e2313c1297c04e6f04c0a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from ..._models import BaseModel + +__all__ = ["StaticFileChunkingStrategy"] + + +class StaticFileChunkingStrategy(BaseModel): + chunk_overlap_tokens: int + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: int + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object.py b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object.py new file mode 100644 index 0000000000000000000000000000000000000000..896c4b8320c56a41ffa3c7dbc3c02182f070e6fa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from .static_file_chunking_strategy import StaticFileChunkingStrategy + +__all__ = ["StaticFileChunkingStrategyObject"] + + +class StaticFileChunkingStrategyObject(BaseModel): + static: StaticFileChunkingStrategy + + type: Literal["static"] + """Always `static`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object_param.py new file mode 100644 index 0000000000000000000000000000000000000000..0cdf35c0df3470fc56682bd032fb4a0457e4ae19 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_object_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam + +__all__ = ["StaticFileChunkingStrategyObjectParam"] + + +class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): + static: Required[StaticFileChunkingStrategyParam] + + type: Required[Literal["static"]] + """Always `static`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_param.py new file mode 100644 index 0000000000000000000000000000000000000000..f917ac56470cff3fa22929082358a5e958062339 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/static_file_chunking_strategy_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["StaticFileChunkingStrategyParam"] + + +class StaticFileChunkingStrategyParam(TypedDict, total=False): + chunk_overlap_tokens: Required[int] + """The number of tokens that overlap between chunks. The default value is `400`. + + Note that the overlap must not exceed half of `max_chunk_size_tokens`. + """ + + max_chunk_size_tokens: Required[int] + """The maximum number of tokens in each chunk. + + The default value is `800`. The minimum value is `100` and the maximum value is + `4096`. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/thread.py b/.venv/lib/python3.11/site-packages/openai/types/beta/thread.py new file mode 100644 index 0000000000000000000000000000000000000000..789f66e48b54e99fb3e3c63badb3f2697a70959f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/thread.py @@ -0,0 +1,63 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.metadata import Metadata + +__all__ = ["Thread", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] + + +class ToolResourcesCodeInterpreter(BaseModel): + file_ids: Optional[List[str]] = None + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a maximum of 20 files + associated with the tool. + """ + + +class ToolResourcesFileSearch(BaseModel): + vector_store_ids: Optional[List[str]] = None + """ + The + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector store attached to + the thread. + """ + + +class ToolResources(BaseModel): + code_interpreter: Optional[ToolResourcesCodeInterpreter] = None + + file_search: Optional[ToolResourcesFileSearch] = None + + +class Thread(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the thread was created.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + object: Literal["thread"] + """The object type, which is always `thread`.""" + + tool_resources: Optional[ToolResources] = None + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/thread_create_and_run_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_create_and_run_params.py new file mode 100644 index 0000000000000000000000000000000000000000..08f044c1be71a65d3616c57416df457e635a2282 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_create_and_run_params.py @@ -0,0 +1,366 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..chat_model import ChatModel +from .function_tool_param import FunctionToolParam +from .file_search_tool_param import FileSearchToolParam +from ..shared_params.metadata import Metadata +from .code_interpreter_tool_param import CodeInterpreterToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam +from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam +from .threads.message_content_part_param import MessageContentPartParam +from .assistant_response_format_option_param import AssistantResponseFormatOptionParam + +__all__ = [ + "ThreadCreateAndRunParamsBase", + "Thread", + "ThreadMessage", + "ThreadMessageAttachment", + "ThreadMessageAttachmentTool", + "ThreadMessageAttachmentToolFileSearch", + "ThreadToolResources", + "ThreadToolResourcesCodeInterpreter", + "ThreadToolResourcesFileSearch", + "ThreadToolResourcesFileSearchVectorStore", + "ToolResources", + "ToolResourcesCodeInterpreter", + "ToolResourcesFileSearch", + "Tool", + "TruncationStrategy", + "ThreadCreateAndRunParamsNonStreaming", + "ThreadCreateAndRunParamsStreaming", +] + + +class ThreadCreateAndRunParamsBase(TypedDict, total=False): + assistant_id: Required[str] + """ + The ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to + execute this run. + """ + + instructions: Optional[str] + """Override the default system message of the assistant. + + This is useful for modifying the behavior on a per-run basis. + """ + + max_completion_tokens: Optional[int] + """ + The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. If the run exceeds the number of + completion tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + """ + + max_prompt_tokens: Optional[int] + """The maximum number of prompt tokens that may be used over the course of the run. + + The run will make a best effort to use only the number of prompt tokens + specified, across multiple turns of the run. If the run exceeds the number of + prompt tokens specified, the run will end with status `incomplete`. See + `incomplete_details` for more info. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + model: Union[str, ChatModel, None] + """ + The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to + be used to execute this run. If a value is provided here, it will override the + model associated with the assistant. If not, the model associated with the + assistant will be used. + """ + + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + """ + + response_format: Optional[AssistantResponseFormatOptionParam] + """Specifies the format that the model must output. + + Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), + [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), + and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. + """ + + thread: Thread + """Options to create a new thread. + + If no thread is provided when running a request, an empty thread will be + created. + """ + + tool_choice: Optional[AssistantToolChoiceOptionParam] + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tools and instead generates a message. `auto` is the default value + and means the model can pick between generating a message or calling one or more + tools. `required` means the model must call one or more tools before responding + to the user. Specifying a particular tool like `{"type": "file_search"}` or + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + """ + + tool_resources: Optional[ToolResources] + """A set of resources that are used by the assistant's tools. + + The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + tools: Optional[Iterable[Tool]] + """Override the tools the assistant can use for this run. + + This is useful for modifying the behavior on a per-run basis. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + """ + + truncation_strategy: Optional[TruncationStrategy] + """Controls for how a thread will be truncated prior to the run. + + Use this to control the intial context window of the run. + """ + + +class ThreadMessageAttachmentToolFileSearch(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + +ThreadMessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, ThreadMessageAttachmentToolFileSearch] + + +class ThreadMessageAttachment(TypedDict, total=False): + file_id: str + """The ID of the file to attach to the message.""" + + tools: Iterable[ThreadMessageAttachmentTool] + """The tools to add this file to.""" + + +class ThreadMessage(TypedDict, total=False): + content: Required[Union[str, Iterable[MessageContentPartParam]]] + """The text contents of the message.""" + + role: Required[Literal["user", "assistant"]] + """The role of the entity that is creating the message. Allowed values include: + + - `user`: Indicates the message is sent by an actual user and should be used in + most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this + value to insert messages from the assistant into the conversation. + """ + + attachments: Optional[Iterable[ThreadMessageAttachment]] + """A list of files attached to the message, and the tools they should be added to.""" + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ThreadToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a maximum of 20 files + associated with the tool. + """ + + +class ThreadToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: FileChunkingStrategyParam + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. + """ + + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + add to the vector store. There can be a maximum of 10000 files in a vector + store. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ThreadToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector store attached to + the thread. + """ + + vector_stores: Iterable[ThreadToolResourcesFileSearchVectorStore] + """ + A helper to create a + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + with file_ids and attach it to this thread. There can be a maximum of 1 vector + store attached to the thread. + """ + + +class ThreadToolResources(TypedDict, total=False): + code_interpreter: ThreadToolResourcesCodeInterpreter + + file_search: ThreadToolResourcesFileSearch + + +class Thread(TypedDict, total=False): + messages: Iterable[ThreadMessage] + """ + A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + start the thread with. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + tool_resources: Optional[ThreadToolResources] + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a maximum of 20 files + associated with the tool. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The ID of the + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this assistant. There can be a maximum of 1 vector store attached to + the assistant. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch + + +Tool: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam] + + +class TruncationStrategy(TypedDict, total=False): + type: Required[Literal["auto", "last_messages"]] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ + + +class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False): + stream: Optional[Literal[False]] + """ + If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + """ + + +class ThreadCreateAndRunParamsStreaming(ThreadCreateAndRunParamsBase): + stream: Required[Literal[True]] + """ + If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + """ + + +ThreadCreateAndRunParams = Union[ThreadCreateAndRunParamsNonStreaming, ThreadCreateAndRunParamsStreaming] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/thread_create_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_create_params.py new file mode 100644 index 0000000000000000000000000000000000000000..127202753cb266ebeeddba4ea41cb99c87e90d37 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_create_params.py @@ -0,0 +1,151 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..shared_params.metadata import Metadata +from .code_interpreter_tool_param import CodeInterpreterToolParam +from .file_chunking_strategy_param import FileChunkingStrategyParam +from .threads.message_content_part_param import MessageContentPartParam + +__all__ = [ + "ThreadCreateParams", + "Message", + "MessageAttachment", + "MessageAttachmentTool", + "MessageAttachmentToolFileSearch", + "ToolResources", + "ToolResourcesCodeInterpreter", + "ToolResourcesFileSearch", + "ToolResourcesFileSearchVectorStore", +] + + +class ThreadCreateParams(TypedDict, total=False): + messages: Iterable[Message] + """ + A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + start the thread with. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + tool_resources: Optional[ToolResources] + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + +class MessageAttachmentToolFileSearch(TypedDict, total=False): + type: Required[Literal["file_search"]] + """The type of tool being defined: `file_search`""" + + +MessageAttachmentTool: TypeAlias = Union[CodeInterpreterToolParam, MessageAttachmentToolFileSearch] + + +class MessageAttachment(TypedDict, total=False): + file_id: str + """The ID of the file to attach to the message.""" + + tools: Iterable[MessageAttachmentTool] + """The tools to add this file to.""" + + +class Message(TypedDict, total=False): + content: Required[Union[str, Iterable[MessageContentPartParam]]] + """The text contents of the message.""" + + role: Required[Literal["user", "assistant"]] + """The role of the entity that is creating the message. Allowed values include: + + - `user`: Indicates the message is sent by an actual user and should be used in + most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this + value to insert messages from the assistant into the conversation. + """ + + attachments: Optional[Iterable[MessageAttachment]] + """A list of files attached to the message, and the tools they should be added to.""" + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a maximum of 20 files + associated with the tool. + """ + + +class ToolResourcesFileSearchVectorStore(TypedDict, total=False): + chunking_strategy: FileChunkingStrategyParam + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. + """ + + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + add to the vector store. There can be a maximum of 10000 files in a vector + store. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector store attached to + the thread. + """ + + vector_stores: Iterable[ToolResourcesFileSearchVectorStore] + """ + A helper to create a + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + with file_ids and attach it to this thread. There can be a maximum of 1 vector + store attached to the thread. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/thread_deleted.py b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_deleted.py new file mode 100644 index 0000000000000000000000000000000000000000..d385626319659d2cb2cf8e8428e94d40f5d764d0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_deleted.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ThreadDeleted"] + + +class ThreadDeleted(BaseModel): + id: str + + deleted: bool + + object: Literal["thread.deleted"] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/thread_update_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_update_params.py new file mode 100644 index 0000000000000000000000000000000000000000..b47ea8f3b0c4b39ffaa7020ffb0469f62edfad48 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/thread_update_params.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import TypedDict + +from ..shared_params.metadata import Metadata + +__all__ = ["ThreadUpdateParams", "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch"] + + +class ThreadUpdateParams(TypedDict, total=False): + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + tool_resources: Optional[ToolResources] + """ + A set of resources that are made available to the assistant's tools in this + thread. The resources are specific to the type of tool. For example, the + `code_interpreter` tool requires a list of file IDs, while the `file_search` + tool requires a list of vector store IDs. + """ + + +class ToolResourcesCodeInterpreter(TypedDict, total=False): + file_ids: List[str] + """ + A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + available to the `code_interpreter` tool. There can be a maximum of 20 files + associated with the tool. + """ + + +class ToolResourcesFileSearch(TypedDict, total=False): + vector_store_ids: List[str] + """ + The + [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + attached to this thread. There can be a maximum of 1 vector store attached to + the thread. + """ + + +class ToolResources(TypedDict, total=False): + code_interpreter: ToolResourcesCodeInterpreter + + file_search: ToolResourcesFileSearch diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/annotation.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..13c10abf4d840c51453a26ce7b2d0e38476f4f20 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/annotation.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ...._utils import PropertyInfo +from .file_path_annotation import FilePathAnnotation +from .file_citation_annotation import FileCitationAnnotation + +__all__ = ["Annotation"] + +Annotation: TypeAlias = Annotated[Union[FileCitationAnnotation, FilePathAnnotation], PropertyInfo(discriminator="type")] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/file_path_annotation.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/file_path_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..9812737ece4c817e0a96313af42d8460aa9fbb25 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/file_path_annotation.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["FilePathAnnotation", "FilePath"] + + +class FilePath(BaseModel): + file_id: str + """The ID of the file that was generated.""" + + +class FilePathAnnotation(BaseModel): + end_index: int + + file_path: FilePath + + start_index: int + + text: str + """The text in the message content that needs to be replaced.""" + + type: Literal["file_path"] + """Always `file_path`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/file_path_delta_annotation.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/file_path_delta_annotation.py new file mode 100644 index 0000000000000000000000000000000000000000..0cbb445e48193ba721ac17965feabdb5511eeb2f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/file_path_delta_annotation.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["FilePathDeltaAnnotation", "FilePath"] + + +class FilePath(BaseModel): + file_id: Optional[str] = None + """The ID of the file that was generated.""" + + +class FilePathDeltaAnnotation(BaseModel): + index: int + """The index of the annotation in the text content part.""" + + type: Literal["file_path"] + """Always `file_path`.""" + + end_index: Optional[int] = None + + file_path: Optional[FilePath] = None + + start_index: Optional[int] = None + + text: Optional[str] = None + """The text in the message content that needs to be replaced.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file.py new file mode 100644 index 0000000000000000000000000000000000000000..6000d97500580e64134b0a5f7b39dbbc90fe2444 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ImageFile"] + + +class ImageFile(BaseModel): + file_id: str + """ + The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + in the message content. Set `purpose="vision"` when uploading the File if you + need to later display the file content. + """ + + detail: Optional[Literal["auto", "low", "high"]] = None + """Specifies the detail level of the image if specified by the user. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file_delta_block.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file_delta_block.py new file mode 100644 index 0000000000000000000000000000000000000000..0a5a2e8a5ff2e100bb83ae5f5992f46a866fb5ae --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file_delta_block.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .image_file_delta import ImageFileDelta + +__all__ = ["ImageFileDeltaBlock"] + + +class ImageFileDeltaBlock(BaseModel): + index: int + """The index of the content part in the message.""" + + type: Literal["image_file"] + """Always `image_file`.""" + + image_file: Optional[ImageFileDelta] = None diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file_param.py new file mode 100644 index 0000000000000000000000000000000000000000..e4a85358b992ce142009ca8d4816d240cb5c442e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_file_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ImageFileParam"] + + +class ImageFileParam(TypedDict, total=False): + file_id: Required[str] + """ + The [File](https://platform.openai.com/docs/api-reference/files) ID of the image + in the message content. Set `purpose="vision"` when uploading the File if you + need to later display the file content. + """ + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image if specified by the user. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_content_block.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_content_block.py new file mode 100644 index 0000000000000000000000000000000000000000..40a16c1df88829c3091951ed3792fc22f0a0bd71 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_content_block.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .image_url import ImageURL +from ...._models import BaseModel + +__all__ = ["ImageURLContentBlock"] + + +class ImageURLContentBlock(BaseModel): + image_url: ImageURL + + type: Literal["image_url"] + """The type of the content part.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_delta.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_delta.py new file mode 100644 index 0000000000000000000000000000000000000000..e402671908d507e41bbc5efcad785f9db75b61ec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_delta.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["ImageURLDelta"] + + +class ImageURLDelta(BaseModel): + detail: Optional[Literal["auto", "low", "high"]] = None + """Specifies the detail level of the image. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. + """ + + url: Optional[str] = None + """ + The URL of the image, must be a supported image types: jpeg, jpg, png, gif, + webp. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_delta_block.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_delta_block.py new file mode 100644 index 0000000000000000000000000000000000000000..5252da12ddb3260dc9e0411e4cbee2e1b4d0acdc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_delta_block.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .image_url_delta import ImageURLDelta + +__all__ = ["ImageURLDeltaBlock"] + + +class ImageURLDeltaBlock(BaseModel): + index: int + """The index of the content part in the message.""" + + type: Literal["image_url"] + """Always `image_url`.""" + + image_url: Optional[ImageURLDelta] = None diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_param.py new file mode 100644 index 0000000000000000000000000000000000000000..6b7e427eddab862e69bd8aedf8ad42d5f22caf32 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/image_url_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ImageURLParam"] + + +class ImageURLParam(TypedDict, total=False): + url: Required[str] + """ + The external URL of the image, must be a supported image types: jpeg, jpg, png, + gif, webp. + """ + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image. + + `low` uses fewer tokens, you can opt in to high resolution using `high`. Default + value is `auto` + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message.py new file mode 100644 index 0000000000000000000000000000000000000000..4a05a128eb602264d49e68e5667a55012f743d87 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message.py @@ -0,0 +1,103 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias + +from ...._models import BaseModel +from .message_content import MessageContent +from ...shared.metadata import Metadata +from ..code_interpreter_tool import CodeInterpreterTool + +__all__ = [ + "Message", + "Attachment", + "AttachmentTool", + "AttachmentToolAssistantToolsFileSearchTypeOnly", + "IncompleteDetails", +] + + +class AttachmentToolAssistantToolsFileSearchTypeOnly(BaseModel): + type: Literal["file_search"] + """The type of tool being defined: `file_search`""" + + +AttachmentTool: TypeAlias = Union[CodeInterpreterTool, AttachmentToolAssistantToolsFileSearchTypeOnly] + + +class Attachment(BaseModel): + file_id: Optional[str] = None + """The ID of the file to attach to the message.""" + + tools: Optional[List[AttachmentTool]] = None + """The tools to add this file to.""" + + +class IncompleteDetails(BaseModel): + reason: Literal["content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed"] + """The reason the message is incomplete.""" + + +class Message(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + assistant_id: Optional[str] = None + """ + If applicable, the ID of the + [assistant](https://platform.openai.com/docs/api-reference/assistants) that + authored this message. + """ + + attachments: Optional[List[Attachment]] = None + """A list of files attached to the message, and the tools they were added to.""" + + completed_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the message was completed.""" + + content: List[MessageContent] + """The content of the message in array of text and/or images.""" + + created_at: int + """The Unix timestamp (in seconds) for when the message was created.""" + + incomplete_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the message was marked as incomplete.""" + + incomplete_details: Optional[IncompleteDetails] = None + """On an incomplete message, details about why the message is incomplete.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + object: Literal["thread.message"] + """The object type, which is always `thread.message`.""" + + role: Literal["user", "assistant"] + """The entity that produced the message. One of `user` or `assistant`.""" + + run_id: Optional[str] = None + """ + The ID of the [run](https://platform.openai.com/docs/api-reference/runs) + associated with the creation of this message. Value is `null` when messages are + created manually using the create message or create thread endpoints. + """ + + status: Literal["in_progress", "incomplete", "completed"] + """ + The status of the message, which can be either `in_progress`, `incomplete`, or + `completed`. + """ + + thread_id: str + """ + The [thread](https://platform.openai.com/docs/api-reference/threads) ID that + this message belongs to. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_content.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_content.py new file mode 100644 index 0000000000000000000000000000000000000000..9523c1e1b9059f4a5af30485d5dbcb46c95d6b2e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_content.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ...._utils import PropertyInfo +from .text_content_block import TextContentBlock +from .refusal_content_block import RefusalContentBlock +from .image_url_content_block import ImageURLContentBlock +from .image_file_content_block import ImageFileContentBlock + +__all__ = ["MessageContent"] + + +MessageContent: TypeAlias = Annotated[ + Union[ImageFileContentBlock, ImageURLContentBlock, TextContentBlock, RefusalContentBlock], + PropertyInfo(discriminator="type"), +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_content_part_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_content_part_param.py new file mode 100644 index 0000000000000000000000000000000000000000..dc09a01c2706324aee2cafba19004973018956e8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_content_part_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .text_content_block_param import TextContentBlockParam +from .image_url_content_block_param import ImageURLContentBlockParam +from .image_file_content_block_param import ImageFileContentBlockParam + +__all__ = ["MessageContentPartParam"] + +MessageContentPartParam: TypeAlias = Union[ImageFileContentBlockParam, ImageURLContentBlockParam, TextContentBlockParam] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_delta.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_delta.py new file mode 100644 index 0000000000000000000000000000000000000000..ecd0dfe319a600979c0975fd50f07ac0f6de97d0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/message_delta.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ...._models import BaseModel +from .message_content_delta import MessageContentDelta + +__all__ = ["MessageDelta"] + + +class MessageDelta(BaseModel): + content: Optional[List[MessageContentDelta]] = None + """The content of the message in array of text and/or images.""" + + role: Optional[Literal["user", "assistant"]] = None + """The entity that produced the message. One of `user` or `assistant`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/refusal_delta_block.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/refusal_delta_block.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd8e62697560c51eb76829bb478682f84b9ffe8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/refusal_delta_block.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ...._models import BaseModel + +__all__ = ["RefusalDeltaBlock"] + + +class RefusalDeltaBlock(BaseModel): + index: int + """The index of the refusal part in the message.""" + + type: Literal["refusal"] + """Always `refusal`.""" + + refusal: Optional[str] = None diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/run_status.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/run_status.py new file mode 100644 index 0000000000000000000000000000000000000000..47c7cbd0073571fd2085a6ab69dc2ed106104e39 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/run_status.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["RunStatus"] + +RunStatus: TypeAlias = Literal[ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "incomplete", + "expired", +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py new file mode 100644 index 0000000000000000000000000000000000000000..147728603a2dec6b9f341f358beeecc05d6ee69b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/run_submit_tool_outputs_params.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = [ + "RunSubmitToolOutputsParamsBase", + "ToolOutput", + "RunSubmitToolOutputsParamsNonStreaming", + "RunSubmitToolOutputsParamsStreaming", +] + + +class RunSubmitToolOutputsParamsBase(TypedDict, total=False): + thread_id: Required[str] + + tool_outputs: Required[Iterable[ToolOutput]] + """A list of tools for which the outputs are being submitted.""" + + +class ToolOutput(TypedDict, total=False): + output: str + """The output of the tool call to be submitted to continue the run.""" + + tool_call_id: str + """ + The ID of the tool call in the `required_action` object within the run object + the output is being submitted for. + """ + + +class RunSubmitToolOutputsParamsNonStreaming(RunSubmitToolOutputsParamsBase, total=False): + stream: Optional[Literal[False]] + """ + If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + """ + + +class RunSubmitToolOutputsParamsStreaming(RunSubmitToolOutputsParamsBase): + stream: Required[Literal[True]] + """ + If `true`, returns a stream of events that happen during the Run as server-sent + events, terminating when the Run enters a terminal state with a `data: [DONE]` + message. + """ + + +RunSubmitToolOutputsParams = Union[RunSubmitToolOutputsParamsNonStreaming, RunSubmitToolOutputsParamsStreaming] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/text_content_block.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/text_content_block.py new file mode 100644 index 0000000000000000000000000000000000000000..3706d6b9d82f879dd3b3fe48f6ab2f4c8203e0a9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/text_content_block.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .text import Text +from ...._models import BaseModel + +__all__ = ["TextContentBlock"] + + +class TextContentBlock(BaseModel): + text: Text + + type: Literal["text"] + """Always `text`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/threads/text_content_block_param.py b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/text_content_block_param.py new file mode 100644 index 0000000000000000000000000000000000000000..6313de32ccbcd13df936999fca7a1c34c9341d44 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/threads/text_content_block_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["TextContentBlockParam"] + + +class TextContentBlockParam(TypedDict, total=False): + text: Required[str] + """Text content to be sent to the model""" + + type: Required[Literal["text"]] + """Always `text`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store.py b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store.py new file mode 100644 index 0000000000000000000000000000000000000000..b947dfb79db54d028554be51ffbdf0f292fc0ef1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store.py @@ -0,0 +1,82 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.metadata import Metadata + +__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"] + + +class FileCounts(BaseModel): + cancelled: int + """The number of files that were cancelled.""" + + completed: int + """The number of files that have been successfully processed.""" + + failed: int + """The number of files that have failed to process.""" + + in_progress: int + """The number of files that are currently being processed.""" + + total: int + """The total number of files.""" + + +class ExpiresAfter(BaseModel): + anchor: Literal["last_active_at"] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: int + """The number of days after the anchor time that the vector store will expire.""" + + +class VectorStore(BaseModel): + id: str + """The identifier, which can be referenced in API endpoints.""" + + created_at: int + """The Unix timestamp (in seconds) for when the vector store was created.""" + + file_counts: FileCounts + + last_active_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the vector store was last active.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: str + """The name of the vector store.""" + + object: Literal["vector_store"] + """The object type, which is always `vector_store`.""" + + status: Literal["expired", "in_progress", "completed"] + """ + The status of the vector store, which can be either `expired`, `in_progress`, or + `completed`. A status of `completed` indicates that the vector store is ready + for use. + """ + + usage_bytes: int + """The total number of bytes used by the files in the vector store.""" + + expires_after: Optional[ExpiresAfter] = None + """The expiration policy for a vector store.""" + + expires_at: Optional[int] = None + """The Unix timestamp (in seconds) for when the vector store will expire.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_create_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_create_params.py new file mode 100644 index 0000000000000000000000000000000000000000..faca6d90008435aa80e521aaa3389b56e7e717e3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_create_params.py @@ -0,0 +1,54 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.metadata import Metadata +from .file_chunking_strategy_param import FileChunkingStrategyParam + +__all__ = ["VectorStoreCreateParams", "ExpiresAfter"] + + +class VectorStoreCreateParams(TypedDict, total=False): + chunking_strategy: FileChunkingStrategyParam + """The chunking strategy used to chunk the file(s). + + If not set, will use the `auto` strategy. Only applicable if `file_ids` is + non-empty. + """ + + expires_after: ExpiresAfter + """The expiration policy for a vector store.""" + + file_ids: List[str] + """ + A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that + the vector store should use. Useful for tools like `file_search` that can access + files. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: str + """The name of the vector store.""" + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["last_active_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: Required[int] + """The number of days after the anchor time that the vector store will expire.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_deleted.py b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_deleted.py new file mode 100644 index 0000000000000000000000000000000000000000..21ccda1db5a12cc8dfc0538fba6ad569ff107e80 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_deleted.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["VectorStoreDeleted"] + + +class VectorStoreDeleted(BaseModel): + id: str + + deleted: bool + + object: Literal["vector_store.deleted"] diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_list_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_list_params.py new file mode 100644 index 0000000000000000000000000000000000000000..e26ff90a85b7d8c7c98ecb50b19e86622e3e29f0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_list_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VectorStoreListParams"] + + +class VectorStoreListParams(TypedDict, total=False): + after: str + """A cursor for use in pagination. + + `after` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, ending with obj_foo, your + subsequent call can include after=obj_foo in order to fetch the next page of the + list. + """ + + before: str + """A cursor for use in pagination. + + `before` is an object ID that defines your place in the list. For instance, if + you make a list request and receive 100 objects, starting with obj_foo, your + subsequent call can include before=obj_foo in order to fetch the previous page + of the list. + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """Sort order by the `created_at` timestamp of the objects. + + `asc` for ascending order and `desc` for descending order. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_update_params.py b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_update_params.py new file mode 100644 index 0000000000000000000000000000000000000000..e91b3ba5ade6a5d12853753e6c6d088cda1d6dcc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/beta/vector_store_update_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.metadata import Metadata + +__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"] + + +class VectorStoreUpdateParams(TypedDict, total=False): + expires_after: Optional[ExpiresAfter] + """The expiration policy for a vector store.""" + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + name: Optional[str] + """The name of the vector store.""" + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["last_active_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: Required[int] + """The number of days after the anchor time that the vector store will expire."""