mygitphase commited on
Commit
5b6b230
·
verified ·
1 Parent(s): 4daf03b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.12/site-packages/anyio-4.13.0.dist-info/licenses/LICENSE +20 -0
  2. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-312.pyc +0 -0
  3. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_log.cpython-312.pyc +0 -0
  4. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-312.pyc +0 -0
  5. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compat.cpython-312.pyc +0 -0
  6. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-312.pyc +0 -0
  7. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-312.pyc +0 -0
  8. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-312.pyc +0 -0
  9. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-312.pyc +0 -0
  10. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-312.pyc +0 -0
  11. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-312.pyc +0 -0
  12. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-312.pyc +0 -0
  13. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-312.pyc +0 -0
  14. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/logging.cpython-312.pyc +0 -0
  15. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/models.cpython-312.pyc +0 -0
  16. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-312.pyc +0 -0
  17. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-312.pyc +0 -0
  18. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-312.pyc +0 -0
  19. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/urls.cpython-312.pyc +0 -0
  20. venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-312.pyc +0 -0
  21. venv/lib/python3.12/site-packages/sarvamai/__pycache__/__init__.cpython-312.pyc +0 -0
  22. venv/lib/python3.12/site-packages/sarvamai/__pycache__/client.cpython-312.pyc +0 -0
  23. venv/lib/python3.12/site-packages/sarvamai/__pycache__/environment.cpython-312.pyc +0 -0
  24. venv/lib/python3.12/site-packages/sarvamai/__pycache__/play.cpython-312.pyc +0 -0
  25. venv/lib/python3.12/site-packages/sarvamai/__pycache__/version.cpython-312.pyc +0 -0
  26. venv/lib/python3.12/site-packages/sarvamai/chat/__init__.py +4 -0
  27. venv/lib/python3.12/site-packages/sarvamai/chat/__pycache__/__init__.cpython-312.pyc +0 -0
  28. venv/lib/python3.12/site-packages/sarvamai/chat/__pycache__/client.cpython-312.pyc +0 -0
  29. venv/lib/python3.12/site-packages/sarvamai/chat/__pycache__/raw_client.cpython-312.pyc +0 -0
  30. venv/lib/python3.12/site-packages/sarvamai/chat/client.py +446 -0
  31. venv/lib/python3.12/site-packages/sarvamai/chat/raw_client.py +718 -0
  32. venv/lib/python3.12/site-packages/sarvamai/core/__init__.py +132 -0
  33. venv/lib/python3.12/site-packages/sarvamai/core/api_error.py +23 -0
  34. venv/lib/python3.12/site-packages/sarvamai/core/client_wrapper.py +111 -0
  35. venv/lib/python3.12/site-packages/sarvamai/core/datetime_utils.py +70 -0
  36. venv/lib/python3.12/site-packages/sarvamai/core/events.py +38 -0
  37. venv/lib/python3.12/site-packages/sarvamai/core/file.py +67 -0
  38. venv/lib/python3.12/site-packages/sarvamai/core/force_multipart.py +18 -0
  39. venv/lib/python3.12/site-packages/sarvamai/core/http_client.py +776 -0
  40. venv/lib/python3.12/site-packages/sarvamai/core/http_response.py +59 -0
  41. venv/lib/python3.12/site-packages/sarvamai/core/jsonable_encoder.py +108 -0
  42. venv/lib/python3.12/site-packages/sarvamai/core/logging.py +107 -0
  43. venv/lib/python3.12/site-packages/sarvamai/core/pydantic_utilities.py +577 -0
  44. venv/lib/python3.12/site-packages/sarvamai/core/query_encoder.py +58 -0
  45. venv/lib/python3.12/site-packages/sarvamai/core/remove_none_from_dict.py +11 -0
  46. venv/lib/python3.12/site-packages/sarvamai/core/request_options.py +35 -0
  47. venv/lib/python3.12/site-packages/sarvamai/core/serialization.py +276 -0
  48. venv/lib/python3.12/site-packages/sarvamai/core/websocket_compat.py +13 -0
  49. venv/lib/python3.12/site-packages/sarvamai/document_intelligence/__init__.py +4 -0
  50. venv/lib/python3.12/site-packages/sarvamai/document_intelligence/__pycache__/__init__.cpython-312.pyc +0 -0
venv/lib/python3.12/site-packages/anyio-4.13.0.dist-info/licenses/LICENSE ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2018 Alex Grönholm
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
6
+ this software and associated documentation files (the "Software"), to deal in
7
+ the Software without restriction, including without limitation the rights to
8
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9
+ the Software, and to permit persons to whom the Software is furnished to do so,
10
+ subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17
+ FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18
+ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_jaraco_text.cpython-312.pyc ADDED
Binary file (4.53 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/_log.cpython-312.pyc ADDED
Binary file (1.86 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-312.pyc ADDED
Binary file (2.41 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/compat.cpython-312.pyc ADDED
Binary file (2.21 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/datetime.cpython-312.pyc ADDED
Binary file (681 Bytes). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-312.pyc ADDED
Binary file (4.18 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/direct_url_helpers.cpython-312.pyc ADDED
Binary file (3.56 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/egg_link.cpython-312.pyc ADDED
Binary file (3.22 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-312.pyc ADDED
Binary file (2.16 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/entrypoints.cpython-312.pyc ADDED
Binary file (3.99 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-312.pyc ADDED
Binary file (7.46 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/filetypes.cpython-312.pyc ADDED
Binary file (1.16 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/logging.cpython-312.pyc ADDED
Binary file (13.6 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/models.cpython-312.pyc ADDED
Binary file (2.71 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/subprocess.cpython-312.pyc ADDED
Binary file (8.72 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-312.pyc ADDED
Binary file (12.1 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/unpacking.cpython-312.pyc ADDED
Binary file (11.1 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/urls.cpython-312.pyc ADDED
Binary file (2.4 kB). View file
 
venv/lib/python3.12/site-packages/pip/_internal/utils/__pycache__/wheel.cpython-312.pyc ADDED
Binary file (5.92 kB). View file
 
venv/lib/python3.12/site-packages/sarvamai/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (19.7 kB). View file
 
venv/lib/python3.12/site-packages/sarvamai/__pycache__/client.cpython-312.pyc ADDED
Binary file (15.9 kB). View file
 
venv/lib/python3.12/site-packages/sarvamai/__pycache__/environment.cpython-312.pyc ADDED
Binary file (789 Bytes). View file
 
venv/lib/python3.12/site-packages/sarvamai/__pycache__/play.cpython-312.pyc ADDED
Binary file (4.67 kB). View file
 
venv/lib/python3.12/site-packages/sarvamai/__pycache__/version.cpython-312.pyc ADDED
Binary file (311 Bytes). View file
 
venv/lib/python3.12/site-packages/sarvamai/chat/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ # isort: skip_file
4
+
venv/lib/python3.12/site-packages/sarvamai/chat/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (186 Bytes). View file
 
venv/lib/python3.12/site-packages/sarvamai/chat/__pycache__/client.cpython-312.pyc ADDED
Binary file (19.7 kB). View file
 
venv/lib/python3.12/site-packages/sarvamai/chat/__pycache__/raw_client.cpython-312.pyc ADDED
Binary file (31.7 kB). View file
 
venv/lib/python3.12/site-packages/sarvamai/chat/client.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
+ from ..core.request_options import RequestOptions
7
+ from ..requests.chat_completion_request_message import ChatCompletionRequestMessageParams
8
+ from ..requests.chat_completion_tool import ChatCompletionToolParams
9
+ from ..requests.stop_configuration import StopConfigurationParams
10
+ from ..requests.tool_choice_option import ToolChoiceOptionParams
11
+ from ..types.chat_completion_chunk import ChatCompletionChunk
12
+ from ..types.create_chat_completion_response import CreateChatCompletionResponse
13
+ from ..types.reasoning_effort import ReasoningEffort
14
+ from ..types.sarvam_model_ids import SarvamModelIds
15
+ from .raw_client import AsyncRawChatClient, RawChatClient
16
+
17
+ # this is used as the default value for optional parameters
18
+ OMIT = typing.cast(typing.Any, ...)
19
+
20
+
21
+ class ChatClient:
22
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
23
+ self._raw_client = RawChatClient(client_wrapper=client_wrapper)
24
+
25
+ @property
26
+ def with_raw_response(self) -> RawChatClient:
27
+ """
28
+ Retrieves a raw implementation of this client that returns raw responses.
29
+
30
+ Returns
31
+ -------
32
+ RawChatClient
33
+ """
34
+ return self._raw_client
35
+
36
+ @typing.overload
37
+ def completions(
38
+ self,
39
+ *,
40
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
41
+ model: SarvamModelIds,
42
+ temperature: typing.Optional[float] = ...,
43
+ top_p: typing.Optional[float] = ...,
44
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
45
+ max_tokens: typing.Optional[int] = ...,
46
+ stream: typing.Literal[True],
47
+ stop: typing.Optional[StopConfigurationParams] = ...,
48
+ n: typing.Optional[int] = ...,
49
+ seed: typing.Optional[int] = ...,
50
+ frequency_penalty: typing.Optional[float] = ...,
51
+ presence_penalty: typing.Optional[float] = ...,
52
+ wiki_grounding: typing.Optional[bool] = ...,
53
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
54
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
55
+ request_options: typing.Optional[RequestOptions] = ...,
56
+ ) -> typing.Iterator[ChatCompletionChunk]: ...
57
+
58
+ @typing.overload
59
+ def completions(
60
+ self,
61
+ *,
62
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
63
+ model: SarvamModelIds,
64
+ temperature: typing.Optional[float] = ...,
65
+ top_p: typing.Optional[float] = ...,
66
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
67
+ max_tokens: typing.Optional[int] = ...,
68
+ stream: typing.Optional[typing.Literal[False]] = ...,
69
+ stop: typing.Optional[StopConfigurationParams] = ...,
70
+ n: typing.Optional[int] = ...,
71
+ seed: typing.Optional[int] = ...,
72
+ frequency_penalty: typing.Optional[float] = ...,
73
+ presence_penalty: typing.Optional[float] = ...,
74
+ wiki_grounding: typing.Optional[bool] = ...,
75
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
76
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
77
+ request_options: typing.Optional[RequestOptions] = ...,
78
+ ) -> CreateChatCompletionResponse: ...
79
+
80
+ def completions(
81
+ self,
82
+ *,
83
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
84
+ model: SarvamModelIds,
85
+ temperature: typing.Optional[float] = OMIT,
86
+ top_p: typing.Optional[float] = OMIT,
87
+ reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
88
+ max_tokens: typing.Optional[int] = OMIT,
89
+ stream: typing.Optional[bool] = OMIT,
90
+ stop: typing.Optional[StopConfigurationParams] = OMIT,
91
+ n: typing.Optional[int] = OMIT,
92
+ seed: typing.Optional[int] = OMIT,
93
+ frequency_penalty: typing.Optional[float] = OMIT,
94
+ presence_penalty: typing.Optional[float] = OMIT,
95
+ wiki_grounding: typing.Optional[bool] = OMIT,
96
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = OMIT,
97
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = OMIT,
98
+ request_options: typing.Optional[RequestOptions] = None,
99
+ ) -> typing.Union[CreateChatCompletionResponse, typing.Iterator[ChatCompletionChunk]]:
100
+ """
101
+ Parameters
102
+ ----------
103
+ messages : typing.Sequence[ChatCompletionRequestMessageParams]
104
+ A list of messages comprising the conversation so far.
105
+
106
+ model : SarvamModelIds
107
+ Model ID used to generate the response, like `sarvam-m`.
108
+
109
+ temperature : typing.Optional[float]
110
+ What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
111
+ We generally recommend altering this or `top_p` but not both.
112
+
113
+ top_p : typing.Optional[float]
114
+ An alternative to sampling with temperature, called nucleus sampling,
115
+ where the model considers the results of the tokens with top_p probability
116
+ mass. So 0.1 means only the tokens comprising the top 10% probability mass
117
+ are considered.
118
+
119
+ We generally recommend altering this or `temperature` but not both.
120
+
121
+ reasoning_effort : typing.Optional[ReasoningEffort]
122
+ The effort to use for reasoning
123
+
124
+ max_tokens : typing.Optional[int]
125
+ The maximum number of tokens that can be generated in the chat completion.
126
+
127
+ stream : typing.Optional[bool]
128
+ If set to true, the model response data will be streamed to the client
129
+ as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
130
+ When true, returns an Iterator[ChatCompletionChunk] instead of CreateChatCompletionResponse.
131
+
132
+ stop : typing.Optional[StopConfigurationParams]
133
+
134
+ n : typing.Optional[int]
135
+ How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
136
+
137
+ seed : typing.Optional[int]
138
+ This feature is in Beta.
139
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
140
+ Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
141
+
142
+ frequency_penalty : typing.Optional[float]
143
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
144
+ their existing frequency in the text so far, decreasing the model's
145
+ likelihood to repeat the same line verbatim.
146
+
147
+ presence_penalty : typing.Optional[float]
148
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
149
+ whether they appear in the text so far, increasing the model's likelihood
150
+ to talk about new topics.
151
+
152
+ wiki_grounding : typing.Optional[bool]
153
+ If set to true, the model response will be wiki grounded.
154
+
155
+ tools : typing.Optional[typing.Sequence[ChatCompletionToolParams]]
156
+ A list of tools the model may call. Currently, only functions are supported as a tool.
157
+
158
+ tool_choice : typing.Optional[ToolChoiceOptionParams]
159
+ Controls which (if any) tool is called by the model.
160
+
161
+ request_options : typing.Optional[RequestOptions]
162
+ Request-specific configuration.
163
+
164
+ Returns
165
+ -------
166
+ CreateChatCompletionResponse or Iterator[ChatCompletionChunk]
167
+ When stream=False (default): CreateChatCompletionResponse.
168
+ When stream=True: Iterator yielding ChatCompletionChunk objects.
169
+
170
+ Examples
171
+ --------
172
+ from sarvamai import SarvamAI
173
+
174
+ client = SarvamAI(
175
+ api_subscription_key="YOUR_API_SUBSCRIPTION_KEY",
176
+ )
177
+
178
+ # Non-streaming
179
+ response = client.chat.completions(
180
+ messages=[{"role": "user", "content": "Hello"}],
181
+ model="sarvam-m",
182
+ )
183
+
184
+ # Streaming
185
+ for chunk in client.chat.completions(
186
+ messages=[{"role": "user", "content": "Hello"}],
187
+ model="sarvam-m",
188
+ stream=True,
189
+ ):
190
+ print(chunk)
191
+ """
192
+ if stream is True:
193
+ return self._raw_client.completions(
194
+ messages=messages,
195
+ model=model,
196
+ temperature=temperature,
197
+ top_p=top_p,
198
+ reasoning_effort=reasoning_effort,
199
+ max_tokens=max_tokens,
200
+ stream=True,
201
+ stop=stop,
202
+ n=n,
203
+ seed=seed,
204
+ frequency_penalty=frequency_penalty,
205
+ presence_penalty=presence_penalty,
206
+ wiki_grounding=wiki_grounding,
207
+ tools=tools,
208
+ tool_choice=tool_choice,
209
+ request_options=request_options,
210
+ )
211
+
212
+ _response = self._raw_client.completions(
213
+ messages=messages,
214
+ model=model,
215
+ temperature=temperature,
216
+ top_p=top_p,
217
+ reasoning_effort=reasoning_effort,
218
+ max_tokens=max_tokens,
219
+ stream=stream,
220
+ stop=stop,
221
+ n=n,
222
+ seed=seed,
223
+ frequency_penalty=frequency_penalty,
224
+ presence_penalty=presence_penalty,
225
+ wiki_grounding=wiki_grounding,
226
+ tools=tools,
227
+ tool_choice=tool_choice,
228
+ request_options=request_options,
229
+ )
230
+ return _response.data
231
+
232
+
233
+ class AsyncChatClient:
234
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
235
+ self._raw_client = AsyncRawChatClient(client_wrapper=client_wrapper)
236
+
237
+ @property
238
+ def with_raw_response(self) -> AsyncRawChatClient:
239
+ """
240
+ Retrieves a raw implementation of this client that returns raw responses.
241
+
242
+ Returns
243
+ -------
244
+ AsyncRawChatClient
245
+ """
246
+ return self._raw_client
247
+
248
+ @typing.overload
249
+ async def completions(
250
+ self,
251
+ *,
252
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
253
+ model: SarvamModelIds,
254
+ temperature: typing.Optional[float] = ...,
255
+ top_p: typing.Optional[float] = ...,
256
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
257
+ max_tokens: typing.Optional[int] = ...,
258
+ stream: typing.Literal[True],
259
+ stop: typing.Optional[StopConfigurationParams] = ...,
260
+ n: typing.Optional[int] = ...,
261
+ seed: typing.Optional[int] = ...,
262
+ frequency_penalty: typing.Optional[float] = ...,
263
+ presence_penalty: typing.Optional[float] = ...,
264
+ wiki_grounding: typing.Optional[bool] = ...,
265
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
266
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
267
+ request_options: typing.Optional[RequestOptions] = ...,
268
+ ) -> typing.AsyncIterator[ChatCompletionChunk]: ...
269
+
270
+ @typing.overload
271
+ async def completions(
272
+ self,
273
+ *,
274
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
275
+ model: SarvamModelIds,
276
+ temperature: typing.Optional[float] = ...,
277
+ top_p: typing.Optional[float] = ...,
278
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
279
+ max_tokens: typing.Optional[int] = ...,
280
+ stream: typing.Optional[typing.Literal[False]] = ...,
281
+ stop: typing.Optional[StopConfigurationParams] = ...,
282
+ n: typing.Optional[int] = ...,
283
+ seed: typing.Optional[int] = ...,
284
+ frequency_penalty: typing.Optional[float] = ...,
285
+ presence_penalty: typing.Optional[float] = ...,
286
+ wiki_grounding: typing.Optional[bool] = ...,
287
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
288
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
289
+ request_options: typing.Optional[RequestOptions] = ...,
290
+ ) -> CreateChatCompletionResponse: ...
291
+
292
+ async def completions(
293
+ self,
294
+ *,
295
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
296
+ model: SarvamModelIds,
297
+ temperature: typing.Optional[float] = OMIT,
298
+ top_p: typing.Optional[float] = OMIT,
299
+ reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
300
+ max_tokens: typing.Optional[int] = OMIT,
301
+ stream: typing.Optional[bool] = OMIT,
302
+ stop: typing.Optional[StopConfigurationParams] = OMIT,
303
+ n: typing.Optional[int] = OMIT,
304
+ seed: typing.Optional[int] = OMIT,
305
+ frequency_penalty: typing.Optional[float] = OMIT,
306
+ presence_penalty: typing.Optional[float] = OMIT,
307
+ wiki_grounding: typing.Optional[bool] = OMIT,
308
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = OMIT,
309
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = OMIT,
310
+ request_options: typing.Optional[RequestOptions] = None,
311
+ ) -> typing.Union[CreateChatCompletionResponse, typing.AsyncIterator[ChatCompletionChunk]]:
312
+ """
313
+ Parameters
314
+ ----------
315
+ messages : typing.Sequence[ChatCompletionRequestMessageParams]
316
+ A list of messages comprising the conversation so far.
317
+
318
+ model : SarvamModelIds
319
+ Model ID used to generate the response, like `sarvam-m`.
320
+
321
+ temperature : typing.Optional[float]
322
+ What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
323
+ We generally recommend altering this or `top_p` but not both.
324
+
325
+ top_p : typing.Optional[float]
326
+ An alternative to sampling with temperature, called nucleus sampling,
327
+ where the model considers the results of the tokens with top_p probability
328
+ mass. So 0.1 means only the tokens comprising the top 10% probability mass
329
+ are considered.
330
+
331
+ We generally recommend altering this or `temperature` but not both.
332
+
333
+ reasoning_effort : typing.Optional[ReasoningEffort]
334
+ The effort to use for reasoning
335
+
336
+ max_tokens : typing.Optional[int]
337
+ The maximum number of tokens that can be generated in the chat completion.
338
+
339
+ stream : typing.Optional[bool]
340
+ If set to true, the model response data will be streamed to the client
341
+ as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
342
+ When true, returns an AsyncIterator[ChatCompletionChunk] instead of CreateChatCompletionResponse.
343
+
344
+ stop : typing.Optional[StopConfigurationParams]
345
+
346
+ n : typing.Optional[int]
347
+ How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
348
+
349
+ seed : typing.Optional[int]
350
+ This feature is in Beta.
351
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
352
+ Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
353
+
354
+ frequency_penalty : typing.Optional[float]
355
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
356
+ their existing frequency in the text so far, decreasing the model's
357
+ likelihood to repeat the same line verbatim.
358
+
359
+ presence_penalty : typing.Optional[float]
360
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
361
+ whether they appear in the text so far, increasing the model's likelihood
362
+ to talk about new topics.
363
+
364
+ wiki_grounding : typing.Optional[bool]
365
+ If set to true, the model response will be wiki grounded.
366
+
367
+ tools : typing.Optional[typing.Sequence[ChatCompletionToolParams]]
368
+ A list of tools the model may call. Currently, only functions are supported as a tool.
369
+
370
+ tool_choice : typing.Optional[ToolChoiceOptionParams]
371
+ Controls which (if any) tool is called by the model.
372
+
373
+ request_options : typing.Optional[RequestOptions]
374
+ Request-specific configuration.
375
+
376
+ Returns
377
+ -------
378
+ CreateChatCompletionResponse or AsyncIterator[ChatCompletionChunk]
379
+ When stream=False (default): CreateChatCompletionResponse.
380
+ When stream=True: AsyncIterator yielding ChatCompletionChunk objects.
381
+
382
+ Examples
383
+ --------
384
+ import asyncio
385
+ from sarvamai import AsyncSarvamAI
386
+
387
+ client = AsyncSarvamAI(
388
+ api_subscription_key="YOUR_API_SUBSCRIPTION_KEY",
389
+ )
390
+
391
+ async def main() -> None:
392
+ # Non-streaming
393
+ response = await client.chat.completions(
394
+ messages=[{"role": "user", "content": "Hello"}],
395
+ model="sarvam-m",
396
+ )
397
+
398
+ # Streaming
399
+ async for chunk in client.chat.completions(
400
+ messages=[{"role": "user", "content": "Hello"}],
401
+ model="sarvam-m",
402
+ stream=True,
403
+ ):
404
+ print(chunk)
405
+
406
+ asyncio.run(main())
407
+ """
408
+ if stream is True:
409
+ return await self._raw_client.completions(
410
+ messages=messages,
411
+ model=model,
412
+ temperature=temperature,
413
+ top_p=top_p,
414
+ reasoning_effort=reasoning_effort,
415
+ max_tokens=max_tokens,
416
+ stream=True,
417
+ stop=stop,
418
+ n=n,
419
+ seed=seed,
420
+ frequency_penalty=frequency_penalty,
421
+ presence_penalty=presence_penalty,
422
+ wiki_grounding=wiki_grounding,
423
+ tools=tools,
424
+ tool_choice=tool_choice,
425
+ request_options=request_options,
426
+ )
427
+
428
+ _response = await self._raw_client.completions(
429
+ messages=messages,
430
+ model=model,
431
+ temperature=temperature,
432
+ top_p=top_p,
433
+ reasoning_effort=reasoning_effort,
434
+ max_tokens=max_tokens,
435
+ stream=stream,
436
+ stop=stop,
437
+ n=n,
438
+ seed=seed,
439
+ frequency_penalty=frequency_penalty,
440
+ presence_penalty=presence_penalty,
441
+ wiki_grounding=wiki_grounding,
442
+ tools=tools,
443
+ tool_choice=tool_choice,
444
+ request_options=request_options,
445
+ )
446
+ return _response.data
venv/lib/python3.12/site-packages/sarvamai/chat/raw_client.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import json
4
+ import typing
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ..core.api_error import ApiError
8
+ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ..core.http_response import AsyncHttpResponse, HttpResponse
10
+ from ..core.pydantic_utilities import parse_obj_as
11
+ from ..core.request_options import RequestOptions
12
+ from ..core.serialization import convert_and_respect_annotation_metadata
13
+ from ..errors.bad_request_error import BadRequestError
14
+ from ..errors.forbidden_error import ForbiddenError
15
+ from ..errors.internal_server_error import InternalServerError
16
+ from ..errors.too_many_requests_error import TooManyRequestsError
17
+ from ..errors.unprocessable_entity_error import UnprocessableEntityError
18
+ from ..requests.chat_completion_request_message import ChatCompletionRequestMessageParams
19
+ from ..requests.chat_completion_tool import ChatCompletionToolParams
20
+ from ..requests.stop_configuration import StopConfigurationParams
21
+ from ..requests.tool_choice_option import ToolChoiceOptionParams
22
+ from ..types.chat_completion_chunk import ChatCompletionChunk
23
+ from ..types.create_chat_completion_response import CreateChatCompletionResponse
24
+ from ..types.reasoning_effort import ReasoningEffort
25
+ from ..types.sarvam_model_ids import SarvamModelIds
26
+
27
+ # this is used as the default value for optional parameters
28
+ OMIT = typing.cast(typing.Any, ...)
29
+
30
+
31
+ class RawChatClient:
32
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
33
+ self._client_wrapper = client_wrapper
34
+
35
+ @typing.overload
36
+ def completions(
37
+ self,
38
+ *,
39
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
40
+ model: SarvamModelIds,
41
+ temperature: typing.Optional[float] = ...,
42
+ top_p: typing.Optional[float] = ...,
43
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
44
+ max_tokens: typing.Optional[int] = ...,
45
+ stream: typing.Literal[True],
46
+ stop: typing.Optional[StopConfigurationParams] = ...,
47
+ n: typing.Optional[int] = ...,
48
+ seed: typing.Optional[int] = ...,
49
+ frequency_penalty: typing.Optional[float] = ...,
50
+ presence_penalty: typing.Optional[float] = ...,
51
+ wiki_grounding: typing.Optional[bool] = ...,
52
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
53
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
54
+ request_options: typing.Optional[RequestOptions] = ...,
55
+ ) -> typing.Iterator[ChatCompletionChunk]: ...
56
+
57
+ @typing.overload
58
+ def completions(
59
+ self,
60
+ *,
61
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
62
+ model: SarvamModelIds,
63
+ temperature: typing.Optional[float] = ...,
64
+ top_p: typing.Optional[float] = ...,
65
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
66
+ max_tokens: typing.Optional[int] = ...,
67
+ stream: typing.Optional[typing.Literal[False]] = ...,
68
+ stop: typing.Optional[StopConfigurationParams] = ...,
69
+ n: typing.Optional[int] = ...,
70
+ seed: typing.Optional[int] = ...,
71
+ frequency_penalty: typing.Optional[float] = ...,
72
+ presence_penalty: typing.Optional[float] = ...,
73
+ wiki_grounding: typing.Optional[bool] = ...,
74
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
75
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
76
+ request_options: typing.Optional[RequestOptions] = ...,
77
+ ) -> HttpResponse[CreateChatCompletionResponse]: ...
78
+
79
+ def completions(
80
+ self,
81
+ *,
82
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
83
+ model: SarvamModelIds,
84
+ temperature: typing.Optional[float] = OMIT,
85
+ top_p: typing.Optional[float] = OMIT,
86
+ reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
87
+ max_tokens: typing.Optional[int] = OMIT,
88
+ stream: typing.Optional[bool] = OMIT,
89
+ stop: typing.Optional[StopConfigurationParams] = OMIT,
90
+ n: typing.Optional[int] = OMIT,
91
+ seed: typing.Optional[int] = OMIT,
92
+ frequency_penalty: typing.Optional[float] = OMIT,
93
+ presence_penalty: typing.Optional[float] = OMIT,
94
+ wiki_grounding: typing.Optional[bool] = OMIT,
95
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = OMIT,
96
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = OMIT,
97
+ request_options: typing.Optional[RequestOptions] = None,
98
+ ) -> typing.Union[HttpResponse[CreateChatCompletionResponse], typing.Iterator[ChatCompletionChunk]]:
99
+ """
100
+ Parameters
101
+ ----------
102
+ messages : typing.Sequence[ChatCompletionRequestMessageParams]
103
+ A list of messages comprising the conversation so far.
104
+
105
+ model : SarvamModelIds
106
+ Model ID used to generate the response, like `sarvam-m`.
107
+
108
+ temperature : typing.Optional[float]
109
+ What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
110
+ We generally recommend altering this or `top_p` but not both.
111
+
112
+ top_p : typing.Optional[float]
113
+ An alternative to sampling with temperature, called nucleus sampling,
114
+ where the model considers the results of the tokens with top_p probability
115
+ mass. So 0.1 means only the tokens comprising the top 10% probability mass
116
+ are considered.
117
+
118
+ We generally recommend altering this or `temperature` but not both.
119
+
120
+ reasoning_effort : typing.Optional[ReasoningEffort]
121
+ The effort to use for reasoning
122
+
123
+ max_tokens : typing.Optional[int]
124
+ The maximum number of tokens that can be generated in the chat completion.
125
+
126
+ stream : typing.Optional[bool]
127
+ If set to true, the model response data will be streamed to the client
128
+ as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
129
+ When true, returns an Iterator[ChatCompletionChunk] instead of HttpResponse.
130
+
131
+ stop : typing.Optional[StopConfigurationParams]
132
+
133
+ n : typing.Optional[int]
134
+ How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
135
+
136
+ seed : typing.Optional[int]
137
+ This feature is in Beta.
138
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
139
+ Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
140
+
141
+ frequency_penalty : typing.Optional[float]
142
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
143
+ their existing frequency in the text so far, decreasing the model's
144
+ likelihood to repeat the same line verbatim.
145
+
146
+ presence_penalty : typing.Optional[float]
147
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
148
+ whether they appear in the text so far, increasing the model's likelihood
149
+ to talk about new topics.
150
+
151
+ wiki_grounding : typing.Optional[bool]
152
+ If set to true, the model response will be wiki grounded.
153
+
154
+ tools : typing.Optional[typing.Sequence[ChatCompletionToolParams]]
155
+ A list of tools the model may call. Currently, only functions are supported as a tool.
156
+
157
+ tool_choice : typing.Optional[ToolChoiceOptionParams]
158
+ Controls which (if any) tool is called by the model.
159
+
160
+ request_options : typing.Optional[RequestOptions]
161
+ Request-specific configuration.
162
+
163
+ Returns
164
+ -------
165
+ HttpResponse[CreateChatCompletionResponse] or Iterator[ChatCompletionChunk]
166
+ When stream=False (default): HttpResponse wrapping CreateChatCompletionResponse.
167
+ When stream=True: Iterator yielding ChatCompletionChunk objects.
168
+ """
169
+ if stream is True:
170
+ return self._completions_stream(
171
+ messages=messages,
172
+ model=model,
173
+ temperature=temperature,
174
+ top_p=top_p,
175
+ reasoning_effort=reasoning_effort,
176
+ max_tokens=max_tokens,
177
+ stop=stop,
178
+ n=n,
179
+ seed=seed,
180
+ frequency_penalty=frequency_penalty,
181
+ presence_penalty=presence_penalty,
182
+ wiki_grounding=wiki_grounding,
183
+ tools=tools,
184
+ tool_choice=tool_choice,
185
+ request_options=request_options,
186
+ )
187
+
188
+ _response = self._client_wrapper.httpx_client.request(
189
+ "v1/chat/completions",
190
+ base_url=self._client_wrapper.get_environment().base,
191
+ method="POST",
192
+ json={
193
+ "messages": convert_and_respect_annotation_metadata(
194
+ object_=messages, annotation=typing.Sequence[ChatCompletionRequestMessageParams], direction="write"
195
+ ),
196
+ "model": model,
197
+ "temperature": temperature,
198
+ "top_p": top_p,
199
+ "reasoning_effort": reasoning_effort,
200
+ "max_tokens": max_tokens,
201
+ "stream": stream,
202
+ "stop": convert_and_respect_annotation_metadata(
203
+ object_=stop, annotation=StopConfigurationParams, direction="write"
204
+ ),
205
+ "n": n,
206
+ "seed": seed,
207
+ "frequency_penalty": frequency_penalty,
208
+ "presence_penalty": presence_penalty,
209
+ "wiki_grounding": wiki_grounding,
210
+ "tools": convert_and_respect_annotation_metadata(
211
+ object_=tools, annotation=typing.Sequence[ChatCompletionToolParams], direction="write"
212
+ ),
213
+ "tool_choice": convert_and_respect_annotation_metadata(
214
+ object_=tool_choice, annotation=ToolChoiceOptionParams, direction="write"
215
+ ),
216
+ },
217
+ headers={
218
+ "content-type": "application/json",
219
+ },
220
+ request_options=request_options,
221
+ omit=OMIT,
222
+ )
223
+ try:
224
+ if 200 <= _response.status_code < 300:
225
+ _data = typing.cast(
226
+ CreateChatCompletionResponse,
227
+ parse_obj_as(
228
+ type_=CreateChatCompletionResponse, # type: ignore
229
+ object_=_response.json(),
230
+ ),
231
+ )
232
+ return HttpResponse(response=_response, data=_data)
233
+ if _response.status_code == 400:
234
+ raise BadRequestError(
235
+ headers=dict(_response.headers),
236
+ body=typing.cast(
237
+ typing.Optional[typing.Any],
238
+ parse_obj_as(
239
+ type_=typing.Optional[typing.Any], # type: ignore
240
+ object_=_response.json(),
241
+ ),
242
+ ),
243
+ )
244
+ if _response.status_code == 403:
245
+ raise ForbiddenError(
246
+ headers=dict(_response.headers),
247
+ body=typing.cast(
248
+ typing.Optional[typing.Any],
249
+ parse_obj_as(
250
+ type_=typing.Optional[typing.Any], # type: ignore
251
+ object_=_response.json(),
252
+ ),
253
+ ),
254
+ )
255
+ if _response.status_code == 422:
256
+ raise UnprocessableEntityError(
257
+ headers=dict(_response.headers),
258
+ body=typing.cast(
259
+ typing.Optional[typing.Any],
260
+ parse_obj_as(
261
+ type_=typing.Optional[typing.Any], # type: ignore
262
+ object_=_response.json(),
263
+ ),
264
+ ),
265
+ )
266
+ if _response.status_code == 429:
267
+ raise TooManyRequestsError(
268
+ headers=dict(_response.headers),
269
+ body=typing.cast(
270
+ typing.Optional[typing.Any],
271
+ parse_obj_as(
272
+ type_=typing.Optional[typing.Any], # type: ignore
273
+ object_=_response.json(),
274
+ ),
275
+ ),
276
+ )
277
+ if _response.status_code == 500:
278
+ raise InternalServerError(
279
+ headers=dict(_response.headers),
280
+ body=typing.cast(
281
+ typing.Optional[typing.Any],
282
+ parse_obj_as(
283
+ type_=typing.Optional[typing.Any], # type: ignore
284
+ object_=_response.json(),
285
+ ),
286
+ ),
287
+ )
288
+ _response_json = _response.json()
289
+ except JSONDecodeError:
290
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
291
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
292
+
293
+ def _completions_stream(
294
+ self,
295
+ *,
296
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
297
+ model: SarvamModelIds,
298
+ temperature: typing.Optional[float] = OMIT,
299
+ top_p: typing.Optional[float] = OMIT,
300
+ reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
301
+ max_tokens: typing.Optional[int] = OMIT,
302
+ stop: typing.Optional[StopConfigurationParams] = OMIT,
303
+ n: typing.Optional[int] = OMIT,
304
+ seed: typing.Optional[int] = OMIT,
305
+ frequency_penalty: typing.Optional[float] = OMIT,
306
+ presence_penalty: typing.Optional[float] = OMIT,
307
+ wiki_grounding: typing.Optional[bool] = OMIT,
308
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = OMIT,
309
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = OMIT,
310
+ request_options: typing.Optional[RequestOptions] = None,
311
+ ) -> typing.Iterator[ChatCompletionChunk]:
312
+ with self._client_wrapper.httpx_client.stream(
313
+ "v1/chat/completions",
314
+ base_url=self._client_wrapper.get_environment().base,
315
+ method="POST",
316
+ json={
317
+ "messages": convert_and_respect_annotation_metadata(
318
+ object_=messages, annotation=typing.Sequence[ChatCompletionRequestMessageParams], direction="write"
319
+ ),
320
+ "model": model,
321
+ "temperature": temperature,
322
+ "top_p": top_p,
323
+ "reasoning_effort": reasoning_effort,
324
+ "max_tokens": max_tokens,
325
+ "stream": True,
326
+ "stop": convert_and_respect_annotation_metadata(
327
+ object_=stop, annotation=StopConfigurationParams, direction="write"
328
+ ),
329
+ "n": n,
330
+ "seed": seed,
331
+ "frequency_penalty": frequency_penalty,
332
+ "presence_penalty": presence_penalty,
333
+ "wiki_grounding": wiki_grounding,
334
+ "tools": convert_and_respect_annotation_metadata(
335
+ object_=tools, annotation=typing.Sequence[ChatCompletionToolParams], direction="write"
336
+ ),
337
+ "tool_choice": convert_and_respect_annotation_metadata(
338
+ object_=tool_choice, annotation=ToolChoiceOptionParams, direction="write"
339
+ ),
340
+ },
341
+ headers={
342
+ "content-type": "application/json",
343
+ },
344
+ request_options=request_options,
345
+ omit=OMIT,
346
+ ) as _response:
347
+ if not (200 <= _response.status_code < 300):
348
+ _response.read()
349
+ try:
350
+ _body = _response.json()
351
+ except Exception:
352
+ _body = _response.text
353
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_body)
354
+
355
+ for _line in _response.iter_lines():
356
+ if not _line:
357
+ continue
358
+ if _line.startswith("data: "):
359
+ _data_str = _line[len("data: "):]
360
+ if _data_str.strip() == "[DONE]":
361
+ return
362
+ try:
363
+ _chunk_json = json.loads(_data_str)
364
+ _chunk = typing.cast(
365
+ ChatCompletionChunk,
366
+ parse_obj_as(
367
+ type_=ChatCompletionChunk, # type: ignore
368
+ object_=_chunk_json,
369
+ ),
370
+ )
371
+ yield _chunk
372
+ except json.JSONDecodeError:
373
+ continue
374
+
375
+
376
+ class AsyncRawChatClient:
377
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
378
+ self._client_wrapper = client_wrapper
379
+
380
+ @typing.overload
381
+ async def completions(
382
+ self,
383
+ *,
384
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
385
+ model: SarvamModelIds,
386
+ temperature: typing.Optional[float] = ...,
387
+ top_p: typing.Optional[float] = ...,
388
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
389
+ max_tokens: typing.Optional[int] = ...,
390
+ stream: typing.Literal[True],
391
+ stop: typing.Optional[StopConfigurationParams] = ...,
392
+ n: typing.Optional[int] = ...,
393
+ seed: typing.Optional[int] = ...,
394
+ frequency_penalty: typing.Optional[float] = ...,
395
+ presence_penalty: typing.Optional[float] = ...,
396
+ wiki_grounding: typing.Optional[bool] = ...,
397
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
398
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
399
+ request_options: typing.Optional[RequestOptions] = ...,
400
+ ) -> typing.AsyncIterator[ChatCompletionChunk]: ...
401
+
402
+ @typing.overload
403
+ async def completions(
404
+ self,
405
+ *,
406
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
407
+ model: SarvamModelIds,
408
+ temperature: typing.Optional[float] = ...,
409
+ top_p: typing.Optional[float] = ...,
410
+ reasoning_effort: typing.Optional[ReasoningEffort] = ...,
411
+ max_tokens: typing.Optional[int] = ...,
412
+ stream: typing.Optional[typing.Literal[False]] = ...,
413
+ stop: typing.Optional[StopConfigurationParams] = ...,
414
+ n: typing.Optional[int] = ...,
415
+ seed: typing.Optional[int] = ...,
416
+ frequency_penalty: typing.Optional[float] = ...,
417
+ presence_penalty: typing.Optional[float] = ...,
418
+ wiki_grounding: typing.Optional[bool] = ...,
419
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = ...,
420
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = ...,
421
+ request_options: typing.Optional[RequestOptions] = ...,
422
+ ) -> AsyncHttpResponse[CreateChatCompletionResponse]: ...
423
+
424
+ async def completions(
425
+ self,
426
+ *,
427
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
428
+ model: SarvamModelIds,
429
+ temperature: typing.Optional[float] = OMIT,
430
+ top_p: typing.Optional[float] = OMIT,
431
+ reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
432
+ max_tokens: typing.Optional[int] = OMIT,
433
+ stream: typing.Optional[bool] = OMIT,
434
+ stop: typing.Optional[StopConfigurationParams] = OMIT,
435
+ n: typing.Optional[int] = OMIT,
436
+ seed: typing.Optional[int] = OMIT,
437
+ frequency_penalty: typing.Optional[float] = OMIT,
438
+ presence_penalty: typing.Optional[float] = OMIT,
439
+ wiki_grounding: typing.Optional[bool] = OMIT,
440
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = OMIT,
441
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = OMIT,
442
+ request_options: typing.Optional[RequestOptions] = None,
443
+ ) -> typing.Union[AsyncHttpResponse[CreateChatCompletionResponse], typing.AsyncIterator[ChatCompletionChunk]]:
444
+ """
445
+ Parameters
446
+ ----------
447
+ messages : typing.Sequence[ChatCompletionRequestMessageParams]
448
+ A list of messages comprising the conversation so far.
449
+
450
+ model : SarvamModelIds
451
+ Model ID used to generate the response, like `sarvam-m`.
452
+
453
+ temperature : typing.Optional[float]
454
+ What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
455
+ We generally recommend altering this or `top_p` but not both.
456
+
457
+ top_p : typing.Optional[float]
458
+ An alternative to sampling with temperature, called nucleus sampling,
459
+ where the model considers the results of the tokens with top_p probability
460
+ mass. So 0.1 means only the tokens comprising the top 10% probability mass
461
+ are considered.
462
+
463
+ We generally recommend altering this or `temperature` but not both.
464
+
465
+ reasoning_effort : typing.Optional[ReasoningEffort]
466
+ The effort to use for reasoning
467
+
468
+ max_tokens : typing.Optional[int]
469
+ The maximum number of tokens that can be generated in the chat completion.
470
+
471
+ stream : typing.Optional[bool]
472
+ If set to true, the model response data will be streamed to the client
473
+ as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
474
+ When true, returns an AsyncIterator[ChatCompletionChunk] instead of AsyncHttpResponse.
475
+
476
+ stop : typing.Optional[StopConfigurationParams]
477
+
478
+ n : typing.Optional[int]
479
+ How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
480
+
481
+ seed : typing.Optional[int]
482
+ This feature is in Beta.
483
+ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
484
+ Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
485
+
486
+ frequency_penalty : typing.Optional[float]
487
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
488
+ their existing frequency in the text so far, decreasing the model's
489
+ likelihood to repeat the same line verbatim.
490
+
491
+ presence_penalty : typing.Optional[float]
492
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on
493
+ whether they appear in the text so far, increasing the model's likelihood
494
+ to talk about new topics.
495
+
496
+ wiki_grounding : typing.Optional[bool]
497
+ If set to true, the model response will be wiki grounded.
498
+
499
+ tools : typing.Optional[typing.Sequence[ChatCompletionToolParams]]
500
+ A list of tools the model may call. Currently, only functions are supported as a tool.
501
+
502
+ tool_choice : typing.Optional[ToolChoiceOptionParams]
503
+ Controls which (if any) tool is called by the model.
504
+
505
+ request_options : typing.Optional[RequestOptions]
506
+ Request-specific configuration.
507
+
508
+ Returns
509
+ -------
510
+ AsyncHttpResponse[CreateChatCompletionResponse] or AsyncIterator[ChatCompletionChunk]
511
+ When stream=False (default): AsyncHttpResponse wrapping CreateChatCompletionResponse.
512
+ When stream=True: AsyncIterator yielding ChatCompletionChunk objects.
513
+ """
514
+ if stream is True:
515
+ return self._completions_stream(
516
+ messages=messages,
517
+ model=model,
518
+ temperature=temperature,
519
+ top_p=top_p,
520
+ reasoning_effort=reasoning_effort,
521
+ max_tokens=max_tokens,
522
+ stop=stop,
523
+ n=n,
524
+ seed=seed,
525
+ frequency_penalty=frequency_penalty,
526
+ presence_penalty=presence_penalty,
527
+ wiki_grounding=wiki_grounding,
528
+ tools=tools,
529
+ tool_choice=tool_choice,
530
+ request_options=request_options,
531
+ )
532
+
533
+ _response = await self._client_wrapper.httpx_client.request(
534
+ "v1/chat/completions",
535
+ base_url=self._client_wrapper.get_environment().base,
536
+ method="POST",
537
+ json={
538
+ "messages": convert_and_respect_annotation_metadata(
539
+ object_=messages, annotation=typing.Sequence[ChatCompletionRequestMessageParams], direction="write"
540
+ ),
541
+ "model": model,
542
+ "temperature": temperature,
543
+ "top_p": top_p,
544
+ "reasoning_effort": reasoning_effort,
545
+ "max_tokens": max_tokens,
546
+ "stream": stream,
547
+ "stop": convert_and_respect_annotation_metadata(
548
+ object_=stop, annotation=StopConfigurationParams, direction="write"
549
+ ),
550
+ "n": n,
551
+ "seed": seed,
552
+ "frequency_penalty": frequency_penalty,
553
+ "presence_penalty": presence_penalty,
554
+ "wiki_grounding": wiki_grounding,
555
+ "tools": convert_and_respect_annotation_metadata(
556
+ object_=tools, annotation=typing.Sequence[ChatCompletionToolParams], direction="write"
557
+ ),
558
+ "tool_choice": convert_and_respect_annotation_metadata(
559
+ object_=tool_choice, annotation=ToolChoiceOptionParams, direction="write"
560
+ ),
561
+ },
562
+ headers={
563
+ "content-type": "application/json",
564
+ },
565
+ request_options=request_options,
566
+ omit=OMIT,
567
+ )
568
+ try:
569
+ if 200 <= _response.status_code < 300:
570
+ _data = typing.cast(
571
+ CreateChatCompletionResponse,
572
+ parse_obj_as(
573
+ type_=CreateChatCompletionResponse, # type: ignore
574
+ object_=_response.json(),
575
+ ),
576
+ )
577
+ return AsyncHttpResponse(response=_response, data=_data)
578
+ if _response.status_code == 400:
579
+ raise BadRequestError(
580
+ headers=dict(_response.headers),
581
+ body=typing.cast(
582
+ typing.Optional[typing.Any],
583
+ parse_obj_as(
584
+ type_=typing.Optional[typing.Any], # type: ignore
585
+ object_=_response.json(),
586
+ ),
587
+ ),
588
+ )
589
+ if _response.status_code == 403:
590
+ raise ForbiddenError(
591
+ headers=dict(_response.headers),
592
+ body=typing.cast(
593
+ typing.Optional[typing.Any],
594
+ parse_obj_as(
595
+ type_=typing.Optional[typing.Any], # type: ignore
596
+ object_=_response.json(),
597
+ ),
598
+ ),
599
+ )
600
+ if _response.status_code == 422:
601
+ raise UnprocessableEntityError(
602
+ headers=dict(_response.headers),
603
+ body=typing.cast(
604
+ typing.Optional[typing.Any],
605
+ parse_obj_as(
606
+ type_=typing.Optional[typing.Any], # type: ignore
607
+ object_=_response.json(),
608
+ ),
609
+ ),
610
+ )
611
+ if _response.status_code == 429:
612
+ raise TooManyRequestsError(
613
+ headers=dict(_response.headers),
614
+ body=typing.cast(
615
+ typing.Optional[typing.Any],
616
+ parse_obj_as(
617
+ type_=typing.Optional[typing.Any], # type: ignore
618
+ object_=_response.json(),
619
+ ),
620
+ ),
621
+ )
622
+ if _response.status_code == 500:
623
+ raise InternalServerError(
624
+ headers=dict(_response.headers),
625
+ body=typing.cast(
626
+ typing.Optional[typing.Any],
627
+ parse_obj_as(
628
+ type_=typing.Optional[typing.Any], # type: ignore
629
+ object_=_response.json(),
630
+ ),
631
+ ),
632
+ )
633
+ _response_json = _response.json()
634
+ except JSONDecodeError:
635
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
636
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
637
+
638
+ async def _completions_stream(
639
+ self,
640
+ *,
641
+ messages: typing.Sequence[ChatCompletionRequestMessageParams],
642
+ model: SarvamModelIds,
643
+ temperature: typing.Optional[float] = OMIT,
644
+ top_p: typing.Optional[float] = OMIT,
645
+ reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
646
+ max_tokens: typing.Optional[int] = OMIT,
647
+ stop: typing.Optional[StopConfigurationParams] = OMIT,
648
+ n: typing.Optional[int] = OMIT,
649
+ seed: typing.Optional[int] = OMIT,
650
+ frequency_penalty: typing.Optional[float] = OMIT,
651
+ presence_penalty: typing.Optional[float] = OMIT,
652
+ wiki_grounding: typing.Optional[bool] = OMIT,
653
+ tools: typing.Optional[typing.Sequence[ChatCompletionToolParams]] = OMIT,
654
+ tool_choice: typing.Optional[ToolChoiceOptionParams] = OMIT,
655
+ request_options: typing.Optional[RequestOptions] = None,
656
+ ) -> typing.AsyncIterator[ChatCompletionChunk]:
657
+ async with self._client_wrapper.httpx_client.stream(
658
+ "v1/chat/completions",
659
+ base_url=self._client_wrapper.get_environment().base,
660
+ method="POST",
661
+ json={
662
+ "messages": convert_and_respect_annotation_metadata(
663
+ object_=messages, annotation=typing.Sequence[ChatCompletionRequestMessageParams], direction="write"
664
+ ),
665
+ "model": model,
666
+ "temperature": temperature,
667
+ "top_p": top_p,
668
+ "reasoning_effort": reasoning_effort,
669
+ "max_tokens": max_tokens,
670
+ "stream": True,
671
+ "stop": convert_and_respect_annotation_metadata(
672
+ object_=stop, annotation=StopConfigurationParams, direction="write"
673
+ ),
674
+ "n": n,
675
+ "seed": seed,
676
+ "frequency_penalty": frequency_penalty,
677
+ "presence_penalty": presence_penalty,
678
+ "wiki_grounding": wiki_grounding,
679
+ "tools": convert_and_respect_annotation_metadata(
680
+ object_=tools, annotation=typing.Sequence[ChatCompletionToolParams], direction="write"
681
+ ),
682
+ "tool_choice": convert_and_respect_annotation_metadata(
683
+ object_=tool_choice, annotation=ToolChoiceOptionParams, direction="write"
684
+ ),
685
+ },
686
+ headers={
687
+ "content-type": "application/json",
688
+ },
689
+ request_options=request_options,
690
+ omit=OMIT,
691
+ ) as _response:
692
+ if not (200 <= _response.status_code < 300):
693
+ await _response.aread()
694
+ try:
695
+ _body = _response.json()
696
+ except Exception:
697
+ _body = _response.text
698
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_body)
699
+
700
+ async for _line in _response.aiter_lines():
701
+ if not _line:
702
+ continue
703
+ if _line.startswith("data: "):
704
+ _data_str = _line[len("data: "):]
705
+ if _data_str.strip() == "[DONE]":
706
+ return
707
+ try:
708
+ _chunk_json = json.loads(_data_str)
709
+ _chunk = typing.cast(
710
+ ChatCompletionChunk,
711
+ parse_obj_as(
712
+ type_=ChatCompletionChunk, # type: ignore
713
+ object_=_chunk_json,
714
+ ),
715
+ )
716
+ yield _chunk
717
+ except json.JSONDecodeError:
718
+ continue
venv/lib/python3.12/site-packages/sarvamai/core/__init__.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ # isort: skip_file
4
+
5
+ import typing
6
+ from importlib import import_module
7
+
8
+ if typing.TYPE_CHECKING:
9
+ from .api_error import ApiError
10
+ from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper
11
+ from .datetime_utils import Rfc2822DateTime, parse_rfc2822_datetime, serialize_datetime
12
+ from .events import EventEmitterMixin, EventType
13
+ from .file import File, convert_file_dict_to_httpx_tuples, with_content_type
14
+ from .http_client import AsyncHttpClient, HttpClient
15
+ from .http_response import AsyncHttpResponse, HttpResponse
16
+ from .jsonable_encoder import jsonable_encoder
17
+ from .logging import ConsoleLogger, ILogger, LogConfig, LogLevel, Logger, create_logger
18
+ from .pydantic_utilities import (
19
+ IS_PYDANTIC_V2,
20
+ UniversalBaseModel,
21
+ UniversalRootModel,
22
+ parse_obj_as,
23
+ universal_field_validator,
24
+ universal_root_validator,
25
+ update_forward_refs,
26
+ )
27
+ from .query_encoder import encode_query
28
+ from .remove_none_from_dict import remove_none_from_dict
29
+ from .request_options import RequestOptions
30
+ from .serialization import FieldMetadata, convert_and_respect_annotation_metadata
31
+ from .websocket_compat import InvalidWebSocketStatus, get_status_code
32
+ _dynamic_imports: typing.Dict[str, str] = {
33
+ "ApiError": ".api_error",
34
+ "AsyncClientWrapper": ".client_wrapper",
35
+ "AsyncHttpClient": ".http_client",
36
+ "AsyncHttpResponse": ".http_response",
37
+ "BaseClientWrapper": ".client_wrapper",
38
+ "ConsoleLogger": ".logging",
39
+ "EventEmitterMixin": ".events",
40
+ "EventType": ".events",
41
+ "FieldMetadata": ".serialization",
42
+ "File": ".file",
43
+ "HttpClient": ".http_client",
44
+ "HttpResponse": ".http_response",
45
+ "ILogger": ".logging",
46
+ "IS_PYDANTIC_V2": ".pydantic_utilities",
47
+ "InvalidWebSocketStatus": ".websocket_compat",
48
+ "LogConfig": ".logging",
49
+ "LogLevel": ".logging",
50
+ "Logger": ".logging",
51
+ "RequestOptions": ".request_options",
52
+ "Rfc2822DateTime": ".datetime_utils",
53
+ "SyncClientWrapper": ".client_wrapper",
54
+ "UniversalBaseModel": ".pydantic_utilities",
55
+ "UniversalRootModel": ".pydantic_utilities",
56
+ "convert_and_respect_annotation_metadata": ".serialization",
57
+ "convert_file_dict_to_httpx_tuples": ".file",
58
+ "create_logger": ".logging",
59
+ "encode_query": ".query_encoder",
60
+ "get_status_code": ".websocket_compat",
61
+ "jsonable_encoder": ".jsonable_encoder",
62
+ "parse_obj_as": ".pydantic_utilities",
63
+ "parse_rfc2822_datetime": ".datetime_utils",
64
+ "remove_none_from_dict": ".remove_none_from_dict",
65
+ "serialize_datetime": ".datetime_utils",
66
+ "universal_field_validator": ".pydantic_utilities",
67
+ "universal_root_validator": ".pydantic_utilities",
68
+ "update_forward_refs": ".pydantic_utilities",
69
+ "with_content_type": ".file",
70
+ }
71
+
72
+
73
+ def __getattr__(attr_name: str) -> typing.Any:
74
+ module_name = _dynamic_imports.get(attr_name)
75
+ if module_name is None:
76
+ raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
77
+ try:
78
+ module = import_module(module_name, __package__)
79
+ if module_name == f".{attr_name}":
80
+ return module
81
+ else:
82
+ return getattr(module, attr_name)
83
+ except ImportError as e:
84
+ raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
85
+ except AttributeError as e:
86
+ raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
87
+
88
+
89
+ def __dir__():
90
+ lazy_attrs = list(_dynamic_imports.keys())
91
+ return sorted(lazy_attrs)
92
+
93
+
94
+ __all__ = [
95
+ "ApiError",
96
+ "AsyncClientWrapper",
97
+ "AsyncHttpClient",
98
+ "AsyncHttpResponse",
99
+ "BaseClientWrapper",
100
+ "ConsoleLogger",
101
+ "EventEmitterMixin",
102
+ "EventType",
103
+ "FieldMetadata",
104
+ "File",
105
+ "HttpClient",
106
+ "HttpResponse",
107
+ "ILogger",
108
+ "IS_PYDANTIC_V2",
109
+ "InvalidWebSocketStatus",
110
+ "LogConfig",
111
+ "LogLevel",
112
+ "Logger",
113
+ "RequestOptions",
114
+ "Rfc2822DateTime",
115
+ "SyncClientWrapper",
116
+ "UniversalBaseModel",
117
+ "UniversalRootModel",
118
+ "convert_and_respect_annotation_metadata",
119
+ "convert_file_dict_to_httpx_tuples",
120
+ "create_logger",
121
+ "encode_query",
122
+ "get_status_code",
123
+ "jsonable_encoder",
124
+ "parse_obj_as",
125
+ "parse_rfc2822_datetime",
126
+ "remove_none_from_dict",
127
+ "serialize_datetime",
128
+ "universal_field_validator",
129
+ "universal_root_validator",
130
+ "update_forward_refs",
131
+ "with_content_type",
132
+ ]
venv/lib/python3.12/site-packages/sarvamai/core/api_error.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from typing import Any, Dict, Optional
4
+
5
+
6
+ class ApiError(Exception):
7
+ headers: Optional[Dict[str, str]]
8
+ status_code: Optional[int]
9
+ body: Any
10
+
11
+ def __init__(
12
+ self,
13
+ *,
14
+ headers: Optional[Dict[str, str]] = None,
15
+ status_code: Optional[int] = None,
16
+ body: Any = None,
17
+ ) -> None:
18
+ self.headers = headers
19
+ self.status_code = status_code
20
+ self.body = body
21
+
22
+ def __str__(self) -> str:
23
+ return f"headers: {self.headers}, status_code: {self.status_code}, body: {self.body}"
venv/lib/python3.12/site-packages/sarvamai/core/client_wrapper.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import httpx
6
+ from ..environment import SarvamAIEnvironment
7
+ from .http_client import AsyncHttpClient, HttpClient
8
+ from .logging import LogConfig, Logger
9
+
10
+
11
+ class BaseClientWrapper:
12
+ def __init__(
13
+ self,
14
+ *,
15
+ api_subscription_key: str,
16
+ headers: typing.Optional[typing.Dict[str, str]] = None,
17
+ environment: SarvamAIEnvironment,
18
+ timeout: typing.Optional[float] = None,
19
+ logging: typing.Optional[typing.Union[LogConfig, Logger]] = None,
20
+ ):
21
+ self.api_subscription_key = api_subscription_key
22
+ self._headers = headers
23
+ self._environment = environment
24
+ self._timeout = timeout
25
+ self._logging = logging
26
+
27
+ def get_headers(self) -> typing.Dict[str, str]:
28
+ import platform
29
+
30
+ headers: typing.Dict[str, str] = {
31
+ "User-Agent": "sarvamai/0.1.28",
32
+ "X-Fern-Language": "Python",
33
+ "X-Fern-Runtime": f"python/{platform.python_version()}",
34
+ "X-Fern-Platform": f"{platform.system().lower()}/{platform.release()}",
35
+ "X-Fern-SDK-Name": "sarvamai",
36
+ "X-Fern-SDK-Version": "0.1.28",
37
+ **(self.get_custom_headers() or {}),
38
+ }
39
+ headers["api-subscription-key"] = self.api_subscription_key
40
+ return headers
41
+
42
+ def get_custom_headers(self) -> typing.Optional[typing.Dict[str, str]]:
43
+ return self._headers
44
+
45
+ def get_environment(self) -> SarvamAIEnvironment:
46
+ return self._environment
47
+
48
+ def get_timeout(self) -> typing.Optional[float]:
49
+ return self._timeout
50
+
51
+
52
+ class SyncClientWrapper(BaseClientWrapper):
53
+ def __init__(
54
+ self,
55
+ *,
56
+ api_subscription_key: str,
57
+ headers: typing.Optional[typing.Dict[str, str]] = None,
58
+ environment: SarvamAIEnvironment,
59
+ timeout: typing.Optional[float] = None,
60
+ logging: typing.Optional[typing.Union[LogConfig, Logger]] = None,
61
+ httpx_client: httpx.Client,
62
+ ):
63
+ super().__init__(
64
+ api_subscription_key=api_subscription_key,
65
+ headers=headers,
66
+ environment=environment,
67
+ timeout=timeout,
68
+ logging=logging,
69
+ )
70
+ self.httpx_client = HttpClient(
71
+ httpx_client=httpx_client,
72
+ base_headers=self.get_headers,
73
+ base_timeout=self.get_timeout,
74
+ logging_config=self._logging,
75
+ )
76
+
77
+
78
+ class AsyncClientWrapper(BaseClientWrapper):
79
+ def __init__(
80
+ self,
81
+ *,
82
+ api_subscription_key: str,
83
+ headers: typing.Optional[typing.Dict[str, str]] = None,
84
+ environment: SarvamAIEnvironment,
85
+ timeout: typing.Optional[float] = None,
86
+ logging: typing.Optional[typing.Union[LogConfig, Logger]] = None,
87
+ async_token: typing.Optional[typing.Callable[[], typing.Awaitable[str]]] = None,
88
+ httpx_client: httpx.AsyncClient,
89
+ ):
90
+ super().__init__(
91
+ api_subscription_key=api_subscription_key,
92
+ headers=headers,
93
+ environment=environment,
94
+ timeout=timeout,
95
+ logging=logging,
96
+ )
97
+ self._async_token = async_token
98
+ self.httpx_client = AsyncHttpClient(
99
+ httpx_client=httpx_client,
100
+ base_headers=self.get_headers,
101
+ base_timeout=self.get_timeout,
102
+ async_base_headers=self.async_get_headers,
103
+ logging_config=self._logging,
104
+ )
105
+
106
+ async def async_get_headers(self) -> typing.Dict[str, str]:
107
+ headers = self.get_headers()
108
+ if self._async_token is not None:
109
+ token = await self._async_token()
110
+ headers["Authorization"] = f"Bearer {token}"
111
+ return headers
venv/lib/python3.12/site-packages/sarvamai/core/datetime_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ from email.utils import parsedate_to_datetime
5
+ from typing import Any
6
+
7
+ import pydantic
8
+
9
+ IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
10
+
11
+
12
+ def parse_rfc2822_datetime(v: Any) -> dt.datetime:
13
+ """
14
+ Parse an RFC 2822 datetime string (e.g., "Wed, 02 Oct 2002 13:00:00 GMT")
15
+ into a datetime object. If the value is already a datetime, return it as-is.
16
+ Falls back to ISO 8601 parsing if RFC 2822 parsing fails.
17
+ """
18
+ if isinstance(v, dt.datetime):
19
+ return v
20
+ if isinstance(v, str):
21
+ try:
22
+ return parsedate_to_datetime(v)
23
+ except Exception:
24
+ pass
25
+ # Fallback to ISO 8601 parsing
26
+ return dt.datetime.fromisoformat(v.replace("Z", "+00:00"))
27
+ raise ValueError(f"Expected str or datetime, got {type(v)}")
28
+
29
+
30
+ class Rfc2822DateTime(dt.datetime):
31
+ """A datetime subclass that parses RFC 2822 date strings.
32
+
33
+ On Pydantic V1, uses __get_validators__ for pre-validation.
34
+ On Pydantic V2, uses __get_pydantic_core_schema__ for BeforeValidator-style parsing.
35
+ """
36
+
37
+ @classmethod
38
+ def __get_validators__(cls): # type: ignore[no-untyped-def]
39
+ yield parse_rfc2822_datetime
40
+
41
+ @classmethod
42
+ def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> Any: # type: ignore[override]
43
+ from pydantic_core import core_schema
44
+
45
+ return core_schema.no_info_before_validator_function(parse_rfc2822_datetime, core_schema.datetime_schema())
46
+
47
+
48
+ def serialize_datetime(v: dt.datetime) -> str:
49
+ """
50
+ Serialize a datetime including timezone info.
51
+
52
+ Uses the timezone info provided if present, otherwise uses the current runtime's timezone info.
53
+
54
+ UTC datetimes end in "Z" while all other timezones are represented as offset from UTC, e.g. +05:00.
55
+ """
56
+
57
+ def _serialize_zoned_datetime(v: dt.datetime) -> str:
58
+ if v.tzinfo is not None and v.tzinfo.tzname(None) == dt.timezone.utc.tzname(None):
59
+ # UTC is a special case where we use "Z" at the end instead of "+00:00"
60
+ return v.isoformat().replace("+00:00", "Z")
61
+ else:
62
+ # Delegate to the typical +/- offset format
63
+ return v.isoformat()
64
+
65
+ if v.tzinfo is not None:
66
+ return _serialize_zoned_datetime(v)
67
+ else:
68
+ local_tz = dt.datetime.now().astimezone().tzinfo
69
+ localized_dt = v.replace(tzinfo=local_tz)
70
+ return _serialize_zoned_datetime(localized_dt)
venv/lib/python3.12/site-packages/sarvamai/core/events.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import inspect
4
+ import typing
5
+ from enum import Enum
6
+
7
+
8
+ class EventType(str, Enum):
9
+ OPEN = "open"
10
+ MESSAGE = "message"
11
+ ERROR = "error"
12
+ CLOSE = "close"
13
+
14
+
15
+ class EventEmitterMixin:
16
+ """
17
+ Simple mixin for registering and emitting events.
18
+ """
19
+
20
+ def __init__(self) -> None:
21
+ self._callbacks: typing.Dict[EventType, typing.List[typing.Callable]] = {}
22
+
23
+ def on(self, event_name: EventType, callback: typing.Callable[[typing.Any], typing.Any]) -> None:
24
+ if event_name not in self._callbacks:
25
+ self._callbacks[event_name] = []
26
+ self._callbacks[event_name].append(callback)
27
+
28
+ def _emit(self, event_name: EventType, data: typing.Any) -> None:
29
+ if event_name in self._callbacks:
30
+ for cb in self._callbacks[event_name]:
31
+ cb(data)
32
+
33
+ async def _emit_async(self, event_name: EventType, data: typing.Any) -> None:
34
+ if event_name in self._callbacks:
35
+ for cb in self._callbacks[event_name]:
36
+ res = cb(data)
37
+ if inspect.isawaitable(res):
38
+ await res
venv/lib/python3.12/site-packages/sarvamai/core/file.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from typing import IO, Dict, List, Mapping, Optional, Tuple, Union, cast
4
+
5
+ # File typing inspired by the flexibility of types within the httpx library
6
+ # https://github.com/encode/httpx/blob/master/httpx/_types.py
7
+ FileContent = Union[IO[bytes], bytes, str]
8
+ File = Union[
9
+ # file (or bytes)
10
+ FileContent,
11
+ # (filename, file (or bytes))
12
+ Tuple[Optional[str], FileContent],
13
+ # (filename, file (or bytes), content_type)
14
+ Tuple[Optional[str], FileContent, Optional[str]],
15
+ # (filename, file (or bytes), content_type, headers)
16
+ Tuple[
17
+ Optional[str],
18
+ FileContent,
19
+ Optional[str],
20
+ Mapping[str, str],
21
+ ],
22
+ ]
23
+
24
+
25
+ def convert_file_dict_to_httpx_tuples(
26
+ d: Dict[str, Union[File, List[File]]],
27
+ ) -> List[Tuple[str, File]]:
28
+ """
29
+ The format we use is a list of tuples, where the first element is the
30
+ name of the file and the second is the file object. Typically HTTPX wants
31
+ a dict, but to be able to send lists of files, you have to use the list
32
+ approach (which also works for non-lists)
33
+ https://github.com/encode/httpx/pull/1032
34
+ """
35
+
36
+ httpx_tuples = []
37
+ for key, file_like in d.items():
38
+ if isinstance(file_like, list):
39
+ for file_like_item in file_like:
40
+ httpx_tuples.append((key, file_like_item))
41
+ else:
42
+ httpx_tuples.append((key, file_like))
43
+ return httpx_tuples
44
+
45
+
46
+ def with_content_type(*, file: File, default_content_type: str) -> File:
47
+ """
48
+ This function resolves to the file's content type, if provided, and defaults
49
+ to the default_content_type value if not.
50
+ """
51
+ if isinstance(file, tuple):
52
+ if len(file) == 2:
53
+ filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore
54
+ return (filename, content, default_content_type)
55
+ elif len(file) == 3:
56
+ filename, content, file_content_type = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore
57
+ out_content_type = file_content_type or default_content_type
58
+ return (filename, content, out_content_type)
59
+ elif len(file) == 4:
60
+ filename, content, file_content_type, headers = cast( # type: ignore
61
+ Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], file
62
+ )
63
+ out_content_type = file_content_type or default_content_type
64
+ return (filename, content, out_content_type, headers)
65
+ else:
66
+ raise ValueError(f"Unexpected tuple length: {len(file)}")
67
+ return (None, file, default_content_type)
venv/lib/python3.12/site-packages/sarvamai/core/force_multipart.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from typing import Any, Dict
4
+
5
+
6
+ class ForceMultipartDict(Dict[str, Any]):
7
+ """
8
+ A dictionary subclass that always evaluates to True in boolean contexts.
9
+
10
+ This is used to force multipart/form-data encoding in HTTP requests even when
11
+ the dictionary is empty, which would normally evaluate to False.
12
+ """
13
+
14
+ def __bool__(self) -> bool:
15
+ return True
16
+
17
+
18
+ FORCE_MULTIPART = ForceMultipartDict()
venv/lib/python3.12/site-packages/sarvamai/core/http_client.py ADDED
@@ -0,0 +1,776 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import asyncio
4
+ import email.utils
5
+ import re
6
+ import time
7
+ import typing
8
+ from contextlib import asynccontextmanager, contextmanager
9
+ from random import random
10
+
11
+ import httpx
12
+ from .file import File, convert_file_dict_to_httpx_tuples
13
+ from .force_multipart import FORCE_MULTIPART
14
+ from .jsonable_encoder import jsonable_encoder
15
+ from .logging import LogConfig, Logger, create_logger
16
+ from .query_encoder import encode_query
17
+ from .remove_none_from_dict import remove_none_from_dict as remove_none_from_dict
18
+ from .request_options import RequestOptions
19
+ from httpx._types import RequestFiles
20
+
21
+ INITIAL_RETRY_DELAY_SECONDS = 1.0
22
+ MAX_RETRY_DELAY_SECONDS = 60.0
23
+ JITTER_FACTOR = 0.2 # 20% random jitter
24
+
25
+
26
+ def _parse_retry_after(response_headers: httpx.Headers) -> typing.Optional[float]:
27
+ """
28
+ This function parses the `Retry-After` header in a HTTP response and returns the number of seconds to wait.
29
+
30
+ Inspired by the urllib3 retry implementation.
31
+ """
32
+ retry_after_ms = response_headers.get("retry-after-ms")
33
+ if retry_after_ms is not None:
34
+ try:
35
+ return int(retry_after_ms) / 1000 if retry_after_ms > 0 else 0
36
+ except Exception:
37
+ pass
38
+
39
+ retry_after = response_headers.get("retry-after")
40
+ if retry_after is None:
41
+ return None
42
+
43
+ # Attempt to parse the header as an int.
44
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
45
+ seconds = float(retry_after)
46
+ # Fallback to parsing it as a date.
47
+ else:
48
+ retry_date_tuple = email.utils.parsedate_tz(retry_after)
49
+ if retry_date_tuple is None:
50
+ return None
51
+ if retry_date_tuple[9] is None: # Python 2
52
+ # Assume UTC if no timezone was specified
53
+ # On Python2.7, parsedate_tz returns None for a timezone offset
54
+ # instead of 0 if no timezone is given, where mktime_tz treats
55
+ # a None timezone offset as local time.
56
+ retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
57
+
58
+ retry_date = email.utils.mktime_tz(retry_date_tuple)
59
+ seconds = retry_date - time.time()
60
+
61
+ if seconds < 0:
62
+ seconds = 0
63
+
64
+ return seconds
65
+
66
+
67
+ def _add_positive_jitter(delay: float) -> float:
68
+ """Add positive jitter (0-20%) to prevent thundering herd."""
69
+ jitter_multiplier = 1 + random() * JITTER_FACTOR
70
+ return delay * jitter_multiplier
71
+
72
+
73
+ def _add_symmetric_jitter(delay: float) -> float:
74
+ """Add symmetric jitter (±10%) for exponential backoff."""
75
+ jitter_multiplier = 1 + (random() - 0.5) * JITTER_FACTOR
76
+ return delay * jitter_multiplier
77
+
78
+
79
+ def _parse_x_ratelimit_reset(response_headers: httpx.Headers) -> typing.Optional[float]:
80
+ """
81
+ Parse the X-RateLimit-Reset header (Unix timestamp in seconds).
82
+ Returns seconds to wait, or None if header is missing/invalid.
83
+ """
84
+ reset_time_str = response_headers.get("x-ratelimit-reset")
85
+ if reset_time_str is None:
86
+ return None
87
+
88
+ try:
89
+ reset_time = int(reset_time_str)
90
+ delay = reset_time - time.time()
91
+ if delay > 0:
92
+ return delay
93
+ except (ValueError, TypeError):
94
+ pass
95
+
96
+ return None
97
+
98
+
99
+ def _retry_timeout(response: httpx.Response, retries: int) -> float:
100
+ """
101
+ Determine the amount of time to wait before retrying a request.
102
+ This function begins by trying to parse a retry-after header from the response, and then proceeds to use exponential backoff
103
+ with a jitter to determine the number of seconds to wait.
104
+ """
105
+
106
+ # 1. Check Retry-After header first
107
+ retry_after = _parse_retry_after(response.headers)
108
+ if retry_after is not None and retry_after > 0:
109
+ return min(retry_after, MAX_RETRY_DELAY_SECONDS)
110
+
111
+ # 2. Check X-RateLimit-Reset header (with positive jitter)
112
+ ratelimit_reset = _parse_x_ratelimit_reset(response.headers)
113
+ if ratelimit_reset is not None:
114
+ return _add_positive_jitter(min(ratelimit_reset, MAX_RETRY_DELAY_SECONDS))
115
+
116
+ # 3. Fall back to exponential backoff (with symmetric jitter)
117
+ backoff = min(INITIAL_RETRY_DELAY_SECONDS * pow(2.0, retries), MAX_RETRY_DELAY_SECONDS)
118
+ return _add_symmetric_jitter(backoff)
119
+
120
+
121
+ def _should_retry(response: httpx.Response) -> bool:
122
+ retryable_400s = [429, 408, 409]
123
+ return response.status_code >= 500 or response.status_code in retryable_400s
124
+
125
+
126
+ _SENSITIVE_HEADERS = frozenset(
127
+ {
128
+ "authorization",
129
+ "www-authenticate",
130
+ "x-api-key",
131
+ "api-key",
132
+ "apikey",
133
+ "x-api-token",
134
+ "x-auth-token",
135
+ "auth-token",
136
+ "cookie",
137
+ "set-cookie",
138
+ "proxy-authorization",
139
+ "proxy-authenticate",
140
+ "x-csrf-token",
141
+ "x-xsrf-token",
142
+ "x-session-token",
143
+ "x-access-token",
144
+ }
145
+ )
146
+
147
+
148
+ def _redact_headers(headers: typing.Dict[str, str]) -> typing.Dict[str, str]:
149
+ return {k: ("[REDACTED]" if k.lower() in _SENSITIVE_HEADERS else v) for k, v in headers.items()}
150
+
151
+
152
+ def _build_url(base_url: str, path: typing.Optional[str]) -> str:
153
+ """
154
+ Build a full URL by joining a base URL with a path.
155
+
156
+ This function correctly handles base URLs that contain path prefixes (e.g., tenant-based URLs)
157
+ by using string concatenation instead of urllib.parse.urljoin(), which would incorrectly
158
+ strip path components when the path starts with '/'.
159
+
160
+ Example:
161
+ >>> _build_url("https://cloud.example.com/org/tenant/api", "/users")
162
+ 'https://cloud.example.com/org/tenant/api/users'
163
+
164
+ Args:
165
+ base_url: The base URL, which may contain path prefixes.
166
+ path: The path to append. Can be None or empty string.
167
+
168
+ Returns:
169
+ The full URL with base_url and path properly joined.
170
+ """
171
+ if not path:
172
+ return base_url
173
+ return f"{base_url.rstrip('/')}/{path.lstrip('/')}"
174
+
175
+
176
+ def _maybe_filter_none_from_multipart_data(
177
+ data: typing.Optional[typing.Any],
178
+ request_files: typing.Optional[RequestFiles],
179
+ force_multipart: typing.Optional[bool],
180
+ ) -> typing.Optional[typing.Any]:
181
+ """
182
+ Filter None values from data body for multipart/form requests.
183
+ This prevents httpx from converting None to empty strings in multipart encoding.
184
+ Only applies when files are present or force_multipart is True.
185
+ """
186
+ if data is not None and isinstance(data, typing.Mapping) and (request_files or force_multipart):
187
+ return remove_none_from_dict(data)
188
+ return data
189
+
190
+
191
+ def remove_omit_from_dict(
192
+ original: typing.Dict[str, typing.Optional[typing.Any]],
193
+ omit: typing.Optional[typing.Any],
194
+ ) -> typing.Dict[str, typing.Any]:
195
+ if omit is None:
196
+ return original
197
+ new: typing.Dict[str, typing.Any] = {}
198
+ for key, value in original.items():
199
+ if value is not omit:
200
+ new[key] = value
201
+ return new
202
+
203
+
204
+ def maybe_filter_request_body(
205
+ data: typing.Optional[typing.Any],
206
+ request_options: typing.Optional[RequestOptions],
207
+ omit: typing.Optional[typing.Any],
208
+ ) -> typing.Optional[typing.Any]:
209
+ if data is None:
210
+ return (
211
+ jsonable_encoder(request_options.get("additional_body_parameters", {})) or {}
212
+ if request_options is not None
213
+ else None
214
+ )
215
+ elif not isinstance(data, typing.Mapping):
216
+ data_content = jsonable_encoder(data)
217
+ else:
218
+ data_content = {
219
+ **(jsonable_encoder(remove_omit_from_dict(data, omit))), # type: ignore
220
+ **(
221
+ jsonable_encoder(request_options.get("additional_body_parameters", {})) or {}
222
+ if request_options is not None
223
+ else {}
224
+ ),
225
+ }
226
+ return data_content
227
+
228
+
229
+ # Abstracted out for testing purposes
230
+ def get_request_body(
231
+ *,
232
+ json: typing.Optional[typing.Any],
233
+ data: typing.Optional[typing.Any],
234
+ request_options: typing.Optional[RequestOptions],
235
+ omit: typing.Optional[typing.Any],
236
+ ) -> typing.Tuple[typing.Optional[typing.Any], typing.Optional[typing.Any]]:
237
+ json_body = None
238
+ data_body = None
239
+ if data is not None:
240
+ data_body = maybe_filter_request_body(data, request_options, omit)
241
+ else:
242
+ # If both data and json are None, we send json data in the event extra properties are specified
243
+ json_body = maybe_filter_request_body(json, request_options, omit)
244
+
245
+ has_additional_body_parameters = bool(
246
+ request_options is not None and request_options.get("additional_body_parameters")
247
+ )
248
+
249
+ # Only collapse empty dict to None when the body was not explicitly provided
250
+ # and there are no additional body parameters. This preserves explicit empty
251
+ # bodies (e.g., when an endpoint has a request body type but all fields are optional).
252
+ if json_body == {} and json is None and not has_additional_body_parameters:
253
+ json_body = None
254
+ if data_body == {} and data is None and not has_additional_body_parameters:
255
+ data_body = None
256
+
257
+ return json_body, data_body
258
+
259
+
260
+ class HttpClient:
261
+ def __init__(
262
+ self,
263
+ *,
264
+ httpx_client: httpx.Client,
265
+ base_timeout: typing.Callable[[], typing.Optional[float]],
266
+ base_headers: typing.Callable[[], typing.Dict[str, str]],
267
+ base_url: typing.Optional[typing.Callable[[], str]] = None,
268
+ logging_config: typing.Optional[typing.Union[LogConfig, Logger]] = None,
269
+ ):
270
+ self.base_url = base_url
271
+ self.base_timeout = base_timeout
272
+ self.base_headers = base_headers
273
+ self.httpx_client = httpx_client
274
+ self.logger = create_logger(logging_config)
275
+
276
+ def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
277
+ base_url = maybe_base_url
278
+ if self.base_url is not None and base_url is None:
279
+ base_url = self.base_url()
280
+
281
+ if base_url is None:
282
+ raise ValueError("A base_url is required to make this request, please provide one and try again.")
283
+ return base_url
284
+
285
+ def request(
286
+ self,
287
+ path: typing.Optional[str] = None,
288
+ *,
289
+ method: str,
290
+ base_url: typing.Optional[str] = None,
291
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
292
+ json: typing.Optional[typing.Any] = None,
293
+ data: typing.Optional[typing.Any] = None,
294
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
295
+ files: typing.Optional[
296
+ typing.Union[
297
+ typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]],
298
+ typing.List[typing.Tuple[str, File]],
299
+ ]
300
+ ] = None,
301
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
302
+ request_options: typing.Optional[RequestOptions] = None,
303
+ retries: int = 0,
304
+ omit: typing.Optional[typing.Any] = None,
305
+ force_multipart: typing.Optional[bool] = None,
306
+ ) -> httpx.Response:
307
+ base_url = self.get_base_url(base_url)
308
+ timeout = (
309
+ request_options.get("timeout_in_seconds")
310
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
311
+ else self.base_timeout()
312
+ )
313
+
314
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
315
+
316
+ request_files: typing.Optional[RequestFiles] = (
317
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
318
+ if (files is not None and files is not omit and isinstance(files, dict))
319
+ else None
320
+ )
321
+
322
+ if (request_files is None or len(request_files) == 0) and force_multipart:
323
+ request_files = FORCE_MULTIPART
324
+
325
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
326
+
327
+ # Compute encoded params separately to avoid passing empty list to httpx
328
+ # (httpx strips existing query params from URL when params=[] is passed)
329
+ _encoded_params = encode_query(
330
+ jsonable_encoder(
331
+ remove_none_from_dict(
332
+ remove_omit_from_dict(
333
+ {
334
+ **(params if params is not None else {}),
335
+ **(
336
+ request_options.get("additional_query_parameters", {}) or {}
337
+ if request_options is not None
338
+ else {}
339
+ ),
340
+ },
341
+ omit,
342
+ )
343
+ )
344
+ )
345
+ )
346
+
347
+ _request_url = _build_url(base_url, path)
348
+ _request_headers = jsonable_encoder(
349
+ remove_none_from_dict(
350
+ {
351
+ **self.base_headers(),
352
+ **(headers if headers is not None else {}),
353
+ **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
354
+ }
355
+ )
356
+ )
357
+
358
+ if self.logger.is_debug():
359
+ self.logger.debug(
360
+ "Making HTTP request",
361
+ method=method,
362
+ url=_request_url,
363
+ headers=_redact_headers(_request_headers),
364
+ has_body=json_body is not None or data_body is not None,
365
+ )
366
+
367
+ response = self.httpx_client.request(
368
+ method=method,
369
+ url=_request_url,
370
+ headers=_request_headers,
371
+ params=_encoded_params if _encoded_params else None,
372
+ json=json_body,
373
+ data=data_body,
374
+ content=content,
375
+ files=request_files,
376
+ timeout=timeout,
377
+ )
378
+
379
+ max_retries: int = request_options.get("max_retries", 2) if request_options is not None else 2
380
+ if _should_retry(response=response):
381
+ if retries < max_retries:
382
+ time.sleep(_retry_timeout(response=response, retries=retries))
383
+ return self.request(
384
+ path=path,
385
+ method=method,
386
+ base_url=base_url,
387
+ params=params,
388
+ json=json,
389
+ content=content,
390
+ files=files,
391
+ headers=headers,
392
+ request_options=request_options,
393
+ retries=retries + 1,
394
+ omit=omit,
395
+ )
396
+
397
+ if self.logger.is_debug():
398
+ if 200 <= response.status_code < 400:
399
+ self.logger.debug(
400
+ "HTTP request succeeded",
401
+ method=method,
402
+ url=_request_url,
403
+ status_code=response.status_code,
404
+ )
405
+
406
+ if self.logger.is_error():
407
+ if response.status_code >= 400:
408
+ self.logger.error(
409
+ "HTTP request failed with error status",
410
+ method=method,
411
+ url=_request_url,
412
+ status_code=response.status_code,
413
+ )
414
+
415
+ return response
416
+
417
+ @contextmanager
418
+ def stream(
419
+ self,
420
+ path: typing.Optional[str] = None,
421
+ *,
422
+ method: str,
423
+ base_url: typing.Optional[str] = None,
424
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
425
+ json: typing.Optional[typing.Any] = None,
426
+ data: typing.Optional[typing.Any] = None,
427
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
428
+ files: typing.Optional[
429
+ typing.Union[
430
+ typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]],
431
+ typing.List[typing.Tuple[str, File]],
432
+ ]
433
+ ] = None,
434
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
435
+ request_options: typing.Optional[RequestOptions] = None,
436
+ retries: int = 0,
437
+ omit: typing.Optional[typing.Any] = None,
438
+ force_multipart: typing.Optional[bool] = None,
439
+ ) -> typing.Iterator[httpx.Response]:
440
+ base_url = self.get_base_url(base_url)
441
+ timeout = (
442
+ request_options.get("timeout_in_seconds")
443
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
444
+ else self.base_timeout()
445
+ )
446
+
447
+ request_files: typing.Optional[RequestFiles] = (
448
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
449
+ if (files is not None and files is not omit and isinstance(files, dict))
450
+ else None
451
+ )
452
+
453
+ if (request_files is None or len(request_files) == 0) and force_multipart:
454
+ request_files = FORCE_MULTIPART
455
+
456
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
457
+
458
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
459
+
460
+ # Compute encoded params separately to avoid passing empty list to httpx
461
+ # (httpx strips existing query params from URL when params=[] is passed)
462
+ _encoded_params = encode_query(
463
+ jsonable_encoder(
464
+ remove_none_from_dict(
465
+ remove_omit_from_dict(
466
+ {
467
+ **(params if params is not None else {}),
468
+ **(
469
+ request_options.get("additional_query_parameters", {})
470
+ if request_options is not None
471
+ else {}
472
+ ),
473
+ },
474
+ omit,
475
+ )
476
+ )
477
+ )
478
+ )
479
+
480
+ _request_url = _build_url(base_url, path)
481
+ _request_headers = jsonable_encoder(
482
+ remove_none_from_dict(
483
+ {
484
+ **self.base_headers(),
485
+ **(headers if headers is not None else {}),
486
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
487
+ }
488
+ )
489
+ )
490
+
491
+ if self.logger.is_debug():
492
+ self.logger.debug(
493
+ "Making streaming HTTP request",
494
+ method=method,
495
+ url=_request_url,
496
+ headers=_redact_headers(_request_headers),
497
+ )
498
+
499
+ with self.httpx_client.stream(
500
+ method=method,
501
+ url=_request_url,
502
+ headers=_request_headers,
503
+ params=_encoded_params if _encoded_params else None,
504
+ json=json_body,
505
+ data=data_body,
506
+ content=content,
507
+ files=request_files,
508
+ timeout=timeout,
509
+ ) as stream:
510
+ yield stream
511
+
512
+
513
+ class AsyncHttpClient:
514
+ def __init__(
515
+ self,
516
+ *,
517
+ httpx_client: httpx.AsyncClient,
518
+ base_timeout: typing.Callable[[], typing.Optional[float]],
519
+ base_headers: typing.Callable[[], typing.Dict[str, str]],
520
+ base_url: typing.Optional[typing.Callable[[], str]] = None,
521
+ async_base_headers: typing.Optional[typing.Callable[[], typing.Awaitable[typing.Dict[str, str]]]] = None,
522
+ logging_config: typing.Optional[typing.Union[LogConfig, Logger]] = None,
523
+ ):
524
+ self.base_url = base_url
525
+ self.base_timeout = base_timeout
526
+ self.base_headers = base_headers
527
+ self.async_base_headers = async_base_headers
528
+ self.httpx_client = httpx_client
529
+ self.logger = create_logger(logging_config)
530
+
531
+ async def _get_headers(self) -> typing.Dict[str, str]:
532
+ if self.async_base_headers is not None:
533
+ return await self.async_base_headers()
534
+ return self.base_headers()
535
+
536
+ def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
537
+ base_url = maybe_base_url
538
+ if self.base_url is not None and base_url is None:
539
+ base_url = self.base_url()
540
+
541
+ if base_url is None:
542
+ raise ValueError("A base_url is required to make this request, please provide one and try again.")
543
+ return base_url
544
+
545
+ async def request(
546
+ self,
547
+ path: typing.Optional[str] = None,
548
+ *,
549
+ method: str,
550
+ base_url: typing.Optional[str] = None,
551
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
552
+ json: typing.Optional[typing.Any] = None,
553
+ data: typing.Optional[typing.Any] = None,
554
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
555
+ files: typing.Optional[
556
+ typing.Union[
557
+ typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]],
558
+ typing.List[typing.Tuple[str, File]],
559
+ ]
560
+ ] = None,
561
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
562
+ request_options: typing.Optional[RequestOptions] = None,
563
+ retries: int = 0,
564
+ omit: typing.Optional[typing.Any] = None,
565
+ force_multipart: typing.Optional[bool] = None,
566
+ ) -> httpx.Response:
567
+ base_url = self.get_base_url(base_url)
568
+ timeout = (
569
+ request_options.get("timeout_in_seconds")
570
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
571
+ else self.base_timeout()
572
+ )
573
+
574
+ request_files: typing.Optional[RequestFiles] = (
575
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
576
+ if (files is not None and files is not omit and isinstance(files, dict))
577
+ else None
578
+ )
579
+
580
+ if (request_files is None or len(request_files) == 0) and force_multipart:
581
+ request_files = FORCE_MULTIPART
582
+
583
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
584
+
585
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
586
+
587
+ # Get headers (supports async token providers)
588
+ _headers = await self._get_headers()
589
+
590
+ # Compute encoded params separately to avoid passing empty list to httpx
591
+ # (httpx strips existing query params from URL when params=[] is passed)
592
+ _encoded_params = encode_query(
593
+ jsonable_encoder(
594
+ remove_none_from_dict(
595
+ remove_omit_from_dict(
596
+ {
597
+ **(params if params is not None else {}),
598
+ **(
599
+ request_options.get("additional_query_parameters", {}) or {}
600
+ if request_options is not None
601
+ else {}
602
+ ),
603
+ },
604
+ omit,
605
+ )
606
+ )
607
+ )
608
+ )
609
+
610
+ _request_url = _build_url(base_url, path)
611
+ _request_headers = jsonable_encoder(
612
+ remove_none_from_dict(
613
+ {
614
+ **_headers,
615
+ **(headers if headers is not None else {}),
616
+ **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
617
+ }
618
+ )
619
+ )
620
+
621
+ if self.logger.is_debug():
622
+ self.logger.debug(
623
+ "Making HTTP request",
624
+ method=method,
625
+ url=_request_url,
626
+ headers=_redact_headers(_request_headers),
627
+ has_body=json_body is not None or data_body is not None,
628
+ )
629
+
630
+ response = await self.httpx_client.request(
631
+ method=method,
632
+ url=_request_url,
633
+ headers=_request_headers,
634
+ params=_encoded_params if _encoded_params else None,
635
+ json=json_body,
636
+ data=data_body,
637
+ content=content,
638
+ files=request_files,
639
+ timeout=timeout,
640
+ )
641
+
642
+ max_retries: int = request_options.get("max_retries", 2) if request_options is not None else 2
643
+ if _should_retry(response=response):
644
+ if retries < max_retries:
645
+ await asyncio.sleep(_retry_timeout(response=response, retries=retries))
646
+ return await self.request(
647
+ path=path,
648
+ method=method,
649
+ base_url=base_url,
650
+ params=params,
651
+ json=json,
652
+ content=content,
653
+ files=files,
654
+ headers=headers,
655
+ request_options=request_options,
656
+ retries=retries + 1,
657
+ omit=omit,
658
+ )
659
+
660
+ if self.logger.is_debug():
661
+ if 200 <= response.status_code < 400:
662
+ self.logger.debug(
663
+ "HTTP request succeeded",
664
+ method=method,
665
+ url=_request_url,
666
+ status_code=response.status_code,
667
+ )
668
+
669
+ if self.logger.is_error():
670
+ if response.status_code >= 400:
671
+ self.logger.error(
672
+ "HTTP request failed with error status",
673
+ method=method,
674
+ url=_request_url,
675
+ status_code=response.status_code,
676
+ )
677
+
678
+ return response
679
+
680
+ @asynccontextmanager
681
+ async def stream(
682
+ self,
683
+ path: typing.Optional[str] = None,
684
+ *,
685
+ method: str,
686
+ base_url: typing.Optional[str] = None,
687
+ params: typing.Optional[typing.Dict[str, typing.Any]] = None,
688
+ json: typing.Optional[typing.Any] = None,
689
+ data: typing.Optional[typing.Any] = None,
690
+ content: typing.Optional[typing.Union[bytes, typing.Iterator[bytes], typing.AsyncIterator[bytes]]] = None,
691
+ files: typing.Optional[
692
+ typing.Union[
693
+ typing.Dict[str, typing.Optional[typing.Union[File, typing.List[File]]]],
694
+ typing.List[typing.Tuple[str, File]],
695
+ ]
696
+ ] = None,
697
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = None,
698
+ request_options: typing.Optional[RequestOptions] = None,
699
+ retries: int = 0,
700
+ omit: typing.Optional[typing.Any] = None,
701
+ force_multipart: typing.Optional[bool] = None,
702
+ ) -> typing.AsyncIterator[httpx.Response]:
703
+ base_url = self.get_base_url(base_url)
704
+ timeout = (
705
+ request_options.get("timeout_in_seconds")
706
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
707
+ else self.base_timeout()
708
+ )
709
+
710
+ request_files: typing.Optional[RequestFiles] = (
711
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
712
+ if (files is not None and files is not omit and isinstance(files, dict))
713
+ else None
714
+ )
715
+
716
+ if (request_files is None or len(request_files) == 0) and force_multipart:
717
+ request_files = FORCE_MULTIPART
718
+
719
+ json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
720
+
721
+ data_body = _maybe_filter_none_from_multipart_data(data_body, request_files, force_multipart)
722
+
723
+ # Get headers (supports async token providers)
724
+ _headers = await self._get_headers()
725
+
726
+ # Compute encoded params separately to avoid passing empty list to httpx
727
+ # (httpx strips existing query params from URL when params=[] is passed)
728
+ _encoded_params = encode_query(
729
+ jsonable_encoder(
730
+ remove_none_from_dict(
731
+ remove_omit_from_dict(
732
+ {
733
+ **(params if params is not None else {}),
734
+ **(
735
+ request_options.get("additional_query_parameters", {})
736
+ if request_options is not None
737
+ else {}
738
+ ),
739
+ },
740
+ omit=omit,
741
+ )
742
+ )
743
+ )
744
+ )
745
+
746
+ _request_url = _build_url(base_url, path)
747
+ _request_headers = jsonable_encoder(
748
+ remove_none_from_dict(
749
+ {
750
+ **_headers,
751
+ **(headers if headers is not None else {}),
752
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
753
+ }
754
+ )
755
+ )
756
+
757
+ if self.logger.is_debug():
758
+ self.logger.debug(
759
+ "Making streaming HTTP request",
760
+ method=method,
761
+ url=_request_url,
762
+ headers=_redact_headers(_request_headers),
763
+ )
764
+
765
+ async with self.httpx_client.stream(
766
+ method=method,
767
+ url=_request_url,
768
+ headers=_request_headers,
769
+ params=_encoded_params if _encoded_params else None,
770
+ json=json_body,
771
+ data=data_body,
772
+ content=content,
773
+ files=request_files,
774
+ timeout=timeout,
775
+ ) as stream:
776
+ yield stream
venv/lib/python3.12/site-packages/sarvamai/core/http_response.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from typing import Dict, Generic, TypeVar
4
+
5
+ import httpx
6
+
7
+ # Generic to represent the underlying type of the data wrapped by the HTTP response.
8
+ T = TypeVar("T")
9
+
10
+
11
+ class BaseHttpResponse:
12
+ """Minimalist HTTP response wrapper that exposes response headers and status code."""
13
+
14
+ _response: httpx.Response
15
+
16
+ def __init__(self, response: httpx.Response):
17
+ self._response = response
18
+
19
+ @property
20
+ def headers(self) -> Dict[str, str]:
21
+ return dict(self._response.headers)
22
+
23
+ @property
24
+ def status_code(self) -> int:
25
+ return self._response.status_code
26
+
27
+
28
+ class HttpResponse(Generic[T], BaseHttpResponse):
29
+ """HTTP response wrapper that exposes response headers and data."""
30
+
31
+ _data: T
32
+
33
+ def __init__(self, response: httpx.Response, data: T):
34
+ super().__init__(response)
35
+ self._data = data
36
+
37
+ @property
38
+ def data(self) -> T:
39
+ return self._data
40
+
41
+ def close(self) -> None:
42
+ self._response.close()
43
+
44
+
45
+ class AsyncHttpResponse(Generic[T], BaseHttpResponse):
46
+ """HTTP response wrapper that exposes response headers and data."""
47
+
48
+ _data: T
49
+
50
+ def __init__(self, response: httpx.Response, data: T):
51
+ super().__init__(response)
52
+ self._data = data
53
+
54
+ @property
55
+ def data(self) -> T:
56
+ return self._data
57
+
58
+ async def close(self) -> None:
59
+ await self._response.aclose()
venv/lib/python3.12/site-packages/sarvamai/core/jsonable_encoder.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ """
4
+ jsonable_encoder converts a Python object to a JSON-friendly dict
5
+ (e.g. datetimes to strings, Pydantic models to dicts).
6
+
7
+ Taken from FastAPI, and made a bit simpler
8
+ https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py
9
+ """
10
+
11
+ import base64
12
+ import dataclasses
13
+ import datetime as dt
14
+ from enum import Enum
15
+ from pathlib import PurePath
16
+ from types import GeneratorType
17
+ from typing import Any, Callable, Dict, List, Optional, Set, Union
18
+
19
+ import pydantic
20
+ from .datetime_utils import serialize_datetime
21
+ from .pydantic_utilities import (
22
+ IS_PYDANTIC_V2,
23
+ encode_by_type,
24
+ to_jsonable_with_fallback,
25
+ )
26
+
27
+ SetIntStr = Set[Union[int, str]]
28
+ DictIntStrAny = Dict[Union[int, str], Any]
29
+
30
+
31
+ def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any:
32
+ custom_encoder = custom_encoder or {}
33
+ # Generated SDKs use Ellipsis (`...`) as the sentinel value for "OMIT".
34
+ # OMIT values should be excluded from serialized payloads.
35
+ if obj is Ellipsis:
36
+ return None
37
+ if custom_encoder:
38
+ if type(obj) in custom_encoder:
39
+ return custom_encoder[type(obj)](obj)
40
+ else:
41
+ for encoder_type, encoder_instance in custom_encoder.items():
42
+ if isinstance(obj, encoder_type):
43
+ return encoder_instance(obj)
44
+ if isinstance(obj, pydantic.BaseModel):
45
+ if IS_PYDANTIC_V2:
46
+ encoder = getattr(obj.model_config, "json_encoders", {}) # type: ignore # Pydantic v2
47
+ else:
48
+ encoder = getattr(obj.__config__, "json_encoders", {}) # type: ignore # Pydantic v1
49
+ if custom_encoder:
50
+ encoder.update(custom_encoder)
51
+ obj_dict = obj.dict(by_alias=True)
52
+ if "__root__" in obj_dict:
53
+ obj_dict = obj_dict["__root__"]
54
+ if "root" in obj_dict:
55
+ obj_dict = obj_dict["root"]
56
+ return jsonable_encoder(obj_dict, custom_encoder=encoder)
57
+ if dataclasses.is_dataclass(obj):
58
+ obj_dict = dataclasses.asdict(obj) # type: ignore
59
+ return jsonable_encoder(obj_dict, custom_encoder=custom_encoder)
60
+ if isinstance(obj, bytes):
61
+ return base64.b64encode(obj).decode("utf-8")
62
+ if isinstance(obj, Enum):
63
+ return obj.value
64
+ if isinstance(obj, PurePath):
65
+ return str(obj)
66
+ if isinstance(obj, (str, int, float, type(None))):
67
+ return obj
68
+ if isinstance(obj, dt.datetime):
69
+ return serialize_datetime(obj)
70
+ if isinstance(obj, dt.date):
71
+ return str(obj)
72
+ if isinstance(obj, dict):
73
+ encoded_dict = {}
74
+ allowed_keys = set(obj.keys())
75
+ for key, value in obj.items():
76
+ if key in allowed_keys:
77
+ if value is Ellipsis:
78
+ continue
79
+ encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder)
80
+ encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder)
81
+ encoded_dict[encoded_key] = encoded_value
82
+ return encoded_dict
83
+ if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
84
+ encoded_list = []
85
+ for item in obj:
86
+ if item is Ellipsis:
87
+ continue
88
+ encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder))
89
+ return encoded_list
90
+
91
+ def fallback_serializer(o: Any) -> Any:
92
+ attempt_encode = encode_by_type(o)
93
+ if attempt_encode is not None:
94
+ return attempt_encode
95
+
96
+ try:
97
+ data = dict(o)
98
+ except Exception as e:
99
+ errors: List[Exception] = []
100
+ errors.append(e)
101
+ try:
102
+ data = vars(o)
103
+ except Exception as e:
104
+ errors.append(e)
105
+ raise ValueError(errors) from e
106
+ return jsonable_encoder(data, custom_encoder=custom_encoder)
107
+
108
+ return to_jsonable_with_fallback(obj, fallback_serializer)
venv/lib/python3.12/site-packages/sarvamai/core/logging.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import logging
4
+ import typing
5
+
6
+ LogLevel = typing.Literal["debug", "info", "warn", "error"]
7
+
8
+ _LOG_LEVEL_MAP: typing.Dict[LogLevel, int] = {
9
+ "debug": 1,
10
+ "info": 2,
11
+ "warn": 3,
12
+ "error": 4,
13
+ }
14
+
15
+
16
+ class ILogger(typing.Protocol):
17
+ def debug(self, message: str, **kwargs: typing.Any) -> None: ...
18
+ def info(self, message: str, **kwargs: typing.Any) -> None: ...
19
+ def warn(self, message: str, **kwargs: typing.Any) -> None: ...
20
+ def error(self, message: str, **kwargs: typing.Any) -> None: ...
21
+
22
+
23
+ class ConsoleLogger:
24
+ _logger: logging.Logger
25
+
26
+ def __init__(self) -> None:
27
+ self._logger = logging.getLogger("fern")
28
+ if not self._logger.handlers:
29
+ handler = logging.StreamHandler()
30
+ handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s"))
31
+ self._logger.addHandler(handler)
32
+ self._logger.setLevel(logging.DEBUG)
33
+
34
+ def debug(self, message: str, **kwargs: typing.Any) -> None:
35
+ self._logger.debug(message, extra=kwargs)
36
+
37
+ def info(self, message: str, **kwargs: typing.Any) -> None:
38
+ self._logger.info(message, extra=kwargs)
39
+
40
+ def warn(self, message: str, **kwargs: typing.Any) -> None:
41
+ self._logger.warning(message, extra=kwargs)
42
+
43
+ def error(self, message: str, **kwargs: typing.Any) -> None:
44
+ self._logger.error(message, extra=kwargs)
45
+
46
+
47
+ class LogConfig(typing.TypedDict, total=False):
48
+ level: LogLevel
49
+ logger: ILogger
50
+ silent: bool
51
+
52
+
53
+ class Logger:
54
+ _level: int
55
+ _logger: ILogger
56
+ _silent: bool
57
+
58
+ def __init__(self, *, level: LogLevel, logger: ILogger, silent: bool) -> None:
59
+ self._level = _LOG_LEVEL_MAP[level]
60
+ self._logger = logger
61
+ self._silent = silent
62
+
63
+ def _should_log(self, level: LogLevel) -> bool:
64
+ return not self._silent and self._level <= _LOG_LEVEL_MAP[level]
65
+
66
+ def is_debug(self) -> bool:
67
+ return self._should_log("debug")
68
+
69
+ def is_info(self) -> bool:
70
+ return self._should_log("info")
71
+
72
+ def is_warn(self) -> bool:
73
+ return self._should_log("warn")
74
+
75
+ def is_error(self) -> bool:
76
+ return self._should_log("error")
77
+
78
+ def debug(self, message: str, **kwargs: typing.Any) -> None:
79
+ if self.is_debug():
80
+ self._logger.debug(message, **kwargs)
81
+
82
+ def info(self, message: str, **kwargs: typing.Any) -> None:
83
+ if self.is_info():
84
+ self._logger.info(message, **kwargs)
85
+
86
+ def warn(self, message: str, **kwargs: typing.Any) -> None:
87
+ if self.is_warn():
88
+ self._logger.warn(message, **kwargs)
89
+
90
+ def error(self, message: str, **kwargs: typing.Any) -> None:
91
+ if self.is_error():
92
+ self._logger.error(message, **kwargs)
93
+
94
+
95
+ _default_logger: Logger = Logger(level="info", logger=ConsoleLogger(), silent=True)
96
+
97
+
98
+ def create_logger(config: typing.Optional[typing.Union[LogConfig, Logger]] = None) -> Logger:
99
+ if config is None:
100
+ return _default_logger
101
+ if isinstance(config, Logger):
102
+ return config
103
+ return Logger(
104
+ level=config.get("level", "info"),
105
+ logger=config.get("logger", ConsoleLogger()),
106
+ silent=config.get("silent", True),
107
+ )
venv/lib/python3.12/site-packages/sarvamai/core/pydantic_utilities.py ADDED
@@ -0,0 +1,577 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ # nopycln: file
4
+ import datetime as dt
5
+ import inspect
6
+ import json
7
+ import logging
8
+ from collections import defaultdict
9
+ from dataclasses import asdict
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ Callable,
14
+ ClassVar,
15
+ Dict,
16
+ List,
17
+ Mapping,
18
+ Optional,
19
+ Set,
20
+ Tuple,
21
+ Type,
22
+ TypeVar,
23
+ Union,
24
+ cast,
25
+ )
26
+
27
+ import pydantic
28
+ import typing_extensions
29
+
30
+ _logger = logging.getLogger(__name__)
31
+
32
+ if TYPE_CHECKING:
33
+ from .http_sse._models import ServerSentEvent
34
+
35
+ IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
36
+
37
+ if IS_PYDANTIC_V2:
38
+ import warnings
39
+
40
+ _datetime_adapter = pydantic.TypeAdapter(dt.datetime) # type: ignore[attr-defined]
41
+ _date_adapter = pydantic.TypeAdapter(dt.date) # type: ignore[attr-defined]
42
+
43
+ def parse_datetime(value: Any) -> dt.datetime: # type: ignore[misc]
44
+ if isinstance(value, dt.datetime):
45
+ return value
46
+ return _datetime_adapter.validate_python(value)
47
+
48
+ def parse_date(value: Any) -> dt.date: # type: ignore[misc]
49
+ if isinstance(value, dt.datetime):
50
+ return value.date()
51
+ if isinstance(value, dt.date):
52
+ return value
53
+ return _date_adapter.validate_python(value)
54
+
55
+ with warnings.catch_warnings():
56
+ warnings.simplefilter("ignore", UserWarning)
57
+ from pydantic.v1.fields import ModelField as ModelField
58
+ from pydantic.v1.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[attr-defined]
59
+ from pydantic.v1.typing import get_args as get_args
60
+ from pydantic.v1.typing import get_origin as get_origin
61
+ from pydantic.v1.typing import is_literal_type as is_literal_type
62
+ from pydantic.v1.typing import is_union as is_union
63
+ else:
64
+ from pydantic.datetime_parse import parse_date as parse_date # type: ignore[no-redef]
65
+ from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore[no-redef]
66
+ from pydantic.fields import ModelField as ModelField # type: ignore[attr-defined, no-redef]
67
+ from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[no-redef]
68
+ from pydantic.typing import get_args as get_args # type: ignore[no-redef]
69
+ from pydantic.typing import get_origin as get_origin # type: ignore[no-redef]
70
+ from pydantic.typing import is_literal_type as is_literal_type # type: ignore[no-redef]
71
+ from pydantic.typing import is_union as is_union # type: ignore[no-redef]
72
+
73
+ from .datetime_utils import serialize_datetime
74
+ from .serialization import convert_and_respect_annotation_metadata
75
+ from typing_extensions import TypeAlias
76
+
77
+ T = TypeVar("T")
78
+ Model = TypeVar("Model", bound=pydantic.BaseModel)
79
+
80
+
81
+ def _get_discriminator_and_variants(type_: Type[Any]) -> Tuple[Optional[str], Optional[List[Type[Any]]]]:
82
+ """
83
+ Extract the discriminator field name and union variants from a discriminated union type.
84
+ Supports Annotated[Union[...], Field(discriminator=...)] patterns.
85
+ Returns (discriminator, variants) or (None, None) if not a discriminated union.
86
+ """
87
+ origin = typing_extensions.get_origin(type_)
88
+
89
+ if origin is typing_extensions.Annotated:
90
+ args = typing_extensions.get_args(type_)
91
+ if len(args) >= 2:
92
+ inner_type = args[0]
93
+ # Check annotations for discriminator
94
+ discriminator = None
95
+ for annotation in args[1:]:
96
+ if hasattr(annotation, "discriminator"):
97
+ discriminator = getattr(annotation, "discriminator", None)
98
+ break
99
+
100
+ if discriminator:
101
+ inner_origin = typing_extensions.get_origin(inner_type)
102
+ if inner_origin is Union:
103
+ variants = list(typing_extensions.get_args(inner_type))
104
+ return discriminator, variants
105
+ return None, None
106
+
107
+
108
+ def _get_field_annotation(model: Type[Any], field_name: str) -> Optional[Type[Any]]:
109
+ """Get the type annotation of a field from a Pydantic model."""
110
+ if IS_PYDANTIC_V2:
111
+ fields = getattr(model, "model_fields", {})
112
+ field_info = fields.get(field_name)
113
+ if field_info:
114
+ return cast(Optional[Type[Any]], field_info.annotation)
115
+ else:
116
+ fields = getattr(model, "__fields__", {})
117
+ field_info = fields.get(field_name)
118
+ if field_info:
119
+ return cast(Optional[Type[Any]], field_info.outer_type_)
120
+ return None
121
+
122
+
123
+ def _find_variant_by_discriminator(
124
+ variants: List[Type[Any]],
125
+ discriminator: str,
126
+ discriminator_value: Any,
127
+ ) -> Optional[Type[Any]]:
128
+ """Find the union variant that matches the discriminator value."""
129
+ for variant in variants:
130
+ if not (inspect.isclass(variant) and issubclass(variant, pydantic.BaseModel)):
131
+ continue
132
+
133
+ disc_annotation = _get_field_annotation(variant, discriminator)
134
+ if disc_annotation and is_literal_type(disc_annotation):
135
+ literal_args = get_args(disc_annotation)
136
+ if literal_args and literal_args[0] == discriminator_value:
137
+ return variant
138
+ return None
139
+
140
+
141
+ def _is_string_type(type_: Type[Any]) -> bool:
142
+ """Check if a type is str or Optional[str]."""
143
+ if type_ is str:
144
+ return True
145
+
146
+ origin = typing_extensions.get_origin(type_)
147
+ if origin is Union:
148
+ args = typing_extensions.get_args(type_)
149
+ # Optional[str] = Union[str, None]
150
+ non_none_args = [a for a in args if a is not type(None)]
151
+ if len(non_none_args) == 1 and non_none_args[0] is str:
152
+ return True
153
+
154
+ return False
155
+
156
+
157
+ def parse_sse_obj(sse: "ServerSentEvent", type_: Type[T]) -> T:
158
+ """
159
+ Parse a ServerSentEvent into the appropriate type.
160
+
161
+ Handles two scenarios based on where the discriminator field is located:
162
+
163
+ 1. Data-level discrimination: The discriminator (e.g., 'type') is inside the 'data' payload.
164
+ The union describes the data content, not the SSE envelope.
165
+ -> Returns: json.loads(data) parsed into the type
166
+
167
+ Example: ChatStreamResponse with discriminator='type'
168
+ Input: ServerSentEvent(event="message", data='{"type": "content-delta", ...}', id="")
169
+ Output: ContentDeltaEvent (parsed from data, SSE envelope stripped)
170
+
171
+ 2. Event-level discrimination: The discriminator (e.g., 'event') is at the SSE event level.
172
+ The union describes the full SSE event structure.
173
+ -> Returns: SSE envelope with 'data' field JSON-parsed only if the variant expects non-string
174
+
175
+ Example: JobStreamResponse with discriminator='event'
176
+ Input: ServerSentEvent(event="ERROR", data='{"code": "FAILED", ...}', id="123")
177
+ Output: JobStreamResponse_Error with data as ErrorData object
178
+
179
+ But for variants where data is str (like STATUS_UPDATE):
180
+ Input: ServerSentEvent(event="STATUS_UPDATE", data='{"status": "processing"}', id="1")
181
+ Output: JobStreamResponse_StatusUpdate with data as string (not parsed)
182
+
183
+ Args:
184
+ sse: The ServerSentEvent object to parse
185
+ type_: The target discriminated union type
186
+
187
+ Returns:
188
+ The parsed object of type T
189
+
190
+ Note:
191
+ This function is only available in SDK contexts where http_sse module exists.
192
+ """
193
+ sse_event = asdict(sse)
194
+ discriminator, variants = _get_discriminator_and_variants(type_)
195
+
196
+ if discriminator is None or variants is None:
197
+ # Not a discriminated union - parse the data field as JSON
198
+ data_value = sse_event.get("data")
199
+ if isinstance(data_value, str) and data_value:
200
+ try:
201
+ parsed_data = json.loads(data_value)
202
+ return parse_obj_as(type_, parsed_data)
203
+ except json.JSONDecodeError as e:
204
+ _logger.warning(
205
+ "Failed to parse SSE data field as JSON: %s, data: %s",
206
+ e,
207
+ data_value[:100] if len(data_value) > 100 else data_value,
208
+ )
209
+ return parse_obj_as(type_, sse_event)
210
+
211
+ data_value = sse_event.get("data")
212
+
213
+ # Check if discriminator is at the top level (event-level discrimination)
214
+ if discriminator in sse_event:
215
+ # Case 2: Event-level discrimination
216
+ # Find the matching variant to check if 'data' field needs JSON parsing
217
+ disc_value = sse_event.get(discriminator)
218
+ matching_variant = _find_variant_by_discriminator(variants, discriminator, disc_value)
219
+
220
+ if matching_variant is not None:
221
+ # Check what type the variant expects for 'data'
222
+ data_type = _get_field_annotation(matching_variant, "data")
223
+ if data_type is not None and not _is_string_type(data_type):
224
+ # Variant expects non-string data - parse JSON
225
+ if isinstance(data_value, str) and data_value:
226
+ try:
227
+ parsed_data = json.loads(data_value)
228
+ new_object = dict(sse_event)
229
+ new_object["data"] = parsed_data
230
+ return parse_obj_as(type_, new_object)
231
+ except json.JSONDecodeError as e:
232
+ _logger.warning(
233
+ "Failed to parse SSE data field as JSON for event-level discrimination: %s, data: %s",
234
+ e,
235
+ data_value[:100] if len(data_value) > 100 else data_value,
236
+ )
237
+ # Either no matching variant, data is string type, or JSON parse failed
238
+ return parse_obj_as(type_, sse_event)
239
+
240
+ else:
241
+ # Case 1: Data-level discrimination
242
+ # The discriminator is inside the data payload - extract and parse data only
243
+ if isinstance(data_value, str) and data_value:
244
+ try:
245
+ parsed_data = json.loads(data_value)
246
+ return parse_obj_as(type_, parsed_data)
247
+ except json.JSONDecodeError as e:
248
+ _logger.warning(
249
+ "Failed to parse SSE data field as JSON for data-level discrimination: %s, data: %s",
250
+ e,
251
+ data_value[:100] if len(data_value) > 100 else data_value,
252
+ )
253
+ return parse_obj_as(type_, sse_event)
254
+
255
+
256
+ def parse_obj_as(type_: Type[T], object_: Any) -> T:
257
+ # convert_and_respect_annotation_metadata is required for TypedDict aliasing.
258
+ #
259
+ # For Pydantic models, whether we should pre-dealias depends on how the model encodes aliasing:
260
+ # - If the model uses real Pydantic aliases (pydantic.Field(alias=...)), then we must pass wire keys through
261
+ # unchanged so Pydantic can validate them.
262
+ # - If the model encodes aliasing only via FieldMetadata annotations, then we MUST pre-dealias because Pydantic
263
+ # will not recognize those aliases during validation.
264
+ if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel):
265
+ has_pydantic_aliases = False
266
+ if IS_PYDANTIC_V2:
267
+ for field_name, field_info in getattr(type_, "model_fields", {}).items(): # type: ignore[attr-defined]
268
+ alias = getattr(field_info, "alias", None)
269
+ if alias is not None and alias != field_name:
270
+ has_pydantic_aliases = True
271
+ break
272
+ else:
273
+ for field in getattr(type_, "__fields__", {}).values():
274
+ alias = getattr(field, "alias", None)
275
+ name = getattr(field, "name", None)
276
+ if alias is not None and name is not None and alias != name:
277
+ has_pydantic_aliases = True
278
+ break
279
+
280
+ dealiased_object = (
281
+ object_
282
+ if has_pydantic_aliases
283
+ else convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read")
284
+ )
285
+ else:
286
+ dealiased_object = convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read")
287
+ if IS_PYDANTIC_V2:
288
+ adapter = pydantic.TypeAdapter(type_) # type: ignore[attr-defined]
289
+ return adapter.validate_python(dealiased_object)
290
+ return pydantic.parse_obj_as(type_, dealiased_object)
291
+
292
+
293
+ def to_jsonable_with_fallback(obj: Any, fallback_serializer: Callable[[Any], Any]) -> Any:
294
+ if IS_PYDANTIC_V2:
295
+ from pydantic_core import to_jsonable_python
296
+
297
+ return to_jsonable_python(obj, fallback=fallback_serializer)
298
+ return fallback_serializer(obj)
299
+
300
+
301
+ class UniversalBaseModel(pydantic.BaseModel):
302
+ if IS_PYDANTIC_V2:
303
+ model_config: ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( # type: ignore[typeddict-unknown-key]
304
+ # Allow fields beginning with `model_` to be used in the model
305
+ protected_namespaces=(),
306
+ )
307
+
308
+ @pydantic.model_validator(mode="before") # type: ignore[attr-defined]
309
+ @classmethod
310
+ def _coerce_field_names_to_aliases(cls, data: Any) -> Any:
311
+ """
312
+ Accept Python field names in input by rewriting them to their Pydantic aliases,
313
+ while avoiding silent collisions when a key could refer to multiple fields.
314
+ """
315
+ if not isinstance(data, Mapping):
316
+ return data
317
+
318
+ fields = getattr(cls, "model_fields", {}) # type: ignore[attr-defined]
319
+ name_to_alias: Dict[str, str] = {}
320
+ alias_to_name: Dict[str, str] = {}
321
+
322
+ for name, field_info in fields.items():
323
+ alias = getattr(field_info, "alias", None) or name
324
+ name_to_alias[name] = alias
325
+ if alias != name:
326
+ alias_to_name[alias] = name
327
+
328
+ # Detect ambiguous keys: a key that is an alias for one field and a name for another.
329
+ ambiguous_keys = set(alias_to_name.keys()).intersection(set(name_to_alias.keys()))
330
+ for key in ambiguous_keys:
331
+ if key in data and name_to_alias[key] not in data:
332
+ raise ValueError(
333
+ f"Ambiguous input key '{key}': it is both a field name and an alias. "
334
+ "Provide the explicit alias key to disambiguate."
335
+ )
336
+
337
+ original_keys = set(data.keys())
338
+ rewritten: Dict[str, Any] = dict(data)
339
+ for name, alias in name_to_alias.items():
340
+ if alias != name and name in original_keys and alias not in rewritten:
341
+ rewritten[alias] = rewritten.pop(name)
342
+
343
+ return rewritten
344
+
345
+ @pydantic.model_serializer(mode="plain", when_used="json") # type: ignore[attr-defined]
346
+ def serialize_model(self) -> Any: # type: ignore[name-defined]
347
+ serialized = self.dict() # type: ignore[attr-defined]
348
+ data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()}
349
+ return data
350
+
351
+ else:
352
+
353
+ class Config:
354
+ smart_union = True
355
+ json_encoders = {dt.datetime: serialize_datetime}
356
+
357
+ @pydantic.root_validator(pre=True)
358
+ def _coerce_field_names_to_aliases(cls, values: Any) -> Any:
359
+ """
360
+ Pydantic v1 equivalent of _coerce_field_names_to_aliases.
361
+ """
362
+ if not isinstance(values, Mapping):
363
+ return values
364
+
365
+ fields = getattr(cls, "__fields__", {})
366
+ name_to_alias: Dict[str, str] = {}
367
+ alias_to_name: Dict[str, str] = {}
368
+
369
+ for name, field in fields.items():
370
+ alias = getattr(field, "alias", None) or name
371
+ name_to_alias[name] = alias
372
+ if alias != name:
373
+ alias_to_name[alias] = name
374
+
375
+ ambiguous_keys = set(alias_to_name.keys()).intersection(set(name_to_alias.keys()))
376
+ for key in ambiguous_keys:
377
+ if key in values and name_to_alias[key] not in values:
378
+ raise ValueError(
379
+ f"Ambiguous input key '{key}': it is both a field name and an alias. "
380
+ "Provide the explicit alias key to disambiguate."
381
+ )
382
+
383
+ original_keys = set(values.keys())
384
+ rewritten: Dict[str, Any] = dict(values)
385
+ for name, alias in name_to_alias.items():
386
+ if alias != name and name in original_keys and alias not in rewritten:
387
+ rewritten[alias] = rewritten.pop(name)
388
+
389
+ return rewritten
390
+
391
+ @classmethod
392
+ def model_construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model":
393
+ dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
394
+ return cls.construct(_fields_set, **dealiased_object)
395
+
396
+ @classmethod
397
+ def construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model":
398
+ dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
399
+ if IS_PYDANTIC_V2:
400
+ return super().model_construct(_fields_set, **dealiased_object) # type: ignore[misc]
401
+ return super().construct(_fields_set, **dealiased_object)
402
+
403
+ def json(self, **kwargs: Any) -> str:
404
+ kwargs_with_defaults = {
405
+ "by_alias": True,
406
+ "exclude_unset": True,
407
+ **kwargs,
408
+ }
409
+ if IS_PYDANTIC_V2:
410
+ return super().model_dump_json(**kwargs_with_defaults) # type: ignore[misc]
411
+ return super().json(**kwargs_with_defaults)
412
+
413
+ def dict(self, **kwargs: Any) -> Dict[str, Any]:
414
+ """
415
+ Override the default dict method to `exclude_unset` by default. This function patches
416
+ `exclude_unset` to work include fields within non-None default values.
417
+ """
418
+ # Note: the logic here is multiplexed given the levers exposed in Pydantic V1 vs V2
419
+ # Pydantic V1's .dict can be extremely slow, so we do not want to call it twice.
420
+ #
421
+ # We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models
422
+ # that we have less control over, and this is less intrusive than custom serializers for now.
423
+ if IS_PYDANTIC_V2:
424
+ kwargs_with_defaults_exclude_unset = {
425
+ **kwargs,
426
+ "by_alias": True,
427
+ "exclude_unset": True,
428
+ "exclude_none": False,
429
+ }
430
+ kwargs_with_defaults_exclude_none = {
431
+ **kwargs,
432
+ "by_alias": True,
433
+ "exclude_none": True,
434
+ "exclude_unset": False,
435
+ }
436
+ dict_dump = deep_union_pydantic_dicts(
437
+ super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore[misc]
438
+ super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore[misc]
439
+ )
440
+
441
+ else:
442
+ _fields_set = self.__fields_set__.copy()
443
+
444
+ fields = _get_model_fields(self.__class__)
445
+ for name, field in fields.items():
446
+ if name not in _fields_set:
447
+ default = _get_field_default(field)
448
+
449
+ # If the default values are non-null act like they've been set
450
+ # This effectively allows exclude_unset to work like exclude_none where
451
+ # the latter passes through intentionally set none values.
452
+ if default is not None or ("exclude_unset" in kwargs and not kwargs["exclude_unset"]):
453
+ _fields_set.add(name)
454
+
455
+ if default is not None:
456
+ self.__fields_set__.add(name)
457
+
458
+ kwargs_with_defaults_exclude_unset_include_fields = {
459
+ "by_alias": True,
460
+ "exclude_unset": True,
461
+ "include": _fields_set,
462
+ **kwargs,
463
+ }
464
+
465
+ dict_dump = super().dict(**kwargs_with_defaults_exclude_unset_include_fields)
466
+
467
+ return cast(
468
+ Dict[str, Any],
469
+ convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write"),
470
+ )
471
+
472
+
473
+ def _union_list_of_pydantic_dicts(source: List[Any], destination: List[Any]) -> List[Any]:
474
+ converted_list: List[Any] = []
475
+ for i, item in enumerate(source):
476
+ destination_value = destination[i]
477
+ if isinstance(item, dict):
478
+ converted_list.append(deep_union_pydantic_dicts(item, destination_value))
479
+ elif isinstance(item, list):
480
+ converted_list.append(_union_list_of_pydantic_dicts(item, destination_value))
481
+ else:
482
+ converted_list.append(item)
483
+ return converted_list
484
+
485
+
486
+ def deep_union_pydantic_dicts(source: Dict[str, Any], destination: Dict[str, Any]) -> Dict[str, Any]:
487
+ for key, value in source.items():
488
+ node = destination.setdefault(key, {})
489
+ if isinstance(value, dict):
490
+ deep_union_pydantic_dicts(value, node)
491
+ # Note: we do not do this same processing for sets given we do not have sets of models
492
+ # and given the sets are unordered, the processing of the set and matching objects would
493
+ # be non-trivial.
494
+ elif isinstance(value, list):
495
+ destination[key] = _union_list_of_pydantic_dicts(value, node)
496
+ else:
497
+ destination[key] = value
498
+
499
+ return destination
500
+
501
+
502
+ if IS_PYDANTIC_V2:
503
+
504
+ class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore[misc, name-defined, type-arg]
505
+ pass
506
+
507
+ UniversalRootModel: TypeAlias = V2RootModel # type: ignore[misc]
508
+ else:
509
+ UniversalRootModel: TypeAlias = UniversalBaseModel # type: ignore[misc, no-redef]
510
+
511
+
512
+ def encode_by_type(o: Any) -> Any:
513
+ encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(tuple)
514
+ for type_, encoder in encoders_by_type.items():
515
+ encoders_by_class_tuples[encoder] += (type_,)
516
+
517
+ if type(o) in encoders_by_type:
518
+ return encoders_by_type[type(o)](o)
519
+ for encoder, classes_tuple in encoders_by_class_tuples.items():
520
+ if isinstance(o, classes_tuple):
521
+ return encoder(o)
522
+
523
+
524
+ def update_forward_refs(model: Type["Model"], **localns: Any) -> None:
525
+ if IS_PYDANTIC_V2:
526
+ model.model_rebuild(raise_errors=False) # type: ignore[attr-defined]
527
+ else:
528
+ model.update_forward_refs(**localns)
529
+
530
+
531
+ # Mirrors Pydantic's internal typing
532
+ AnyCallable = Callable[..., Any]
533
+
534
+
535
+ def universal_root_validator(
536
+ pre: bool = False,
537
+ ) -> Callable[[AnyCallable], AnyCallable]:
538
+ def decorator(func: AnyCallable) -> AnyCallable:
539
+ if IS_PYDANTIC_V2:
540
+ # In Pydantic v2, for RootModel we always use "before" mode
541
+ # The custom validators transform the input value before the model is created
542
+ return cast(AnyCallable, pydantic.model_validator(mode="before")(func)) # type: ignore[attr-defined]
543
+ return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload]
544
+
545
+ return decorator
546
+
547
+
548
+ def universal_field_validator(field_name: str, pre: bool = False) -> Callable[[AnyCallable], AnyCallable]:
549
+ def decorator(func: AnyCallable) -> AnyCallable:
550
+ if IS_PYDANTIC_V2:
551
+ return cast(AnyCallable, pydantic.field_validator(field_name, mode="before" if pre else "after")(func)) # type: ignore[attr-defined]
552
+ return cast(AnyCallable, pydantic.validator(field_name, pre=pre)(func))
553
+
554
+ return decorator
555
+
556
+
557
+ PydanticField = Union[ModelField, pydantic.fields.FieldInfo]
558
+
559
+
560
+ def _get_model_fields(model: Type["Model"]) -> Mapping[str, PydanticField]:
561
+ if IS_PYDANTIC_V2:
562
+ return cast(Mapping[str, PydanticField], model.model_fields) # type: ignore[attr-defined]
563
+ return cast(Mapping[str, PydanticField], model.__fields__)
564
+
565
+
566
+ def _get_field_default(field: PydanticField) -> Any:
567
+ try:
568
+ value = field.get_default() # type: ignore[union-attr]
569
+ except:
570
+ value = field.default
571
+ if IS_PYDANTIC_V2:
572
+ from pydantic_core import PydanticUndefined
573
+
574
+ if value == PydanticUndefined:
575
+ return None
576
+ return value
577
+ return value
venv/lib/python3.12/site-packages/sarvamai/core/query_encoder.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from typing import Any, Dict, List, Optional, Tuple
4
+
5
+ import pydantic
6
+
7
+
8
+ # Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict
9
+ def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[str] = None) -> List[Tuple[str, Any]]:
10
+ result = []
11
+ for k, v in dict_flat.items():
12
+ key = f"{key_prefix}[{k}]" if key_prefix is not None else k
13
+ if isinstance(v, dict):
14
+ result.extend(traverse_query_dict(v, key))
15
+ elif isinstance(v, list):
16
+ for arr_v in v:
17
+ if isinstance(arr_v, dict):
18
+ result.extend(traverse_query_dict(arr_v, key))
19
+ else:
20
+ result.append((key, arr_v))
21
+ else:
22
+ result.append((key, v))
23
+ return result
24
+
25
+
26
+ def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple[str, Any]]:
27
+ if isinstance(query_value, pydantic.BaseModel) or isinstance(query_value, dict):
28
+ if isinstance(query_value, pydantic.BaseModel):
29
+ obj_dict = query_value.dict(by_alias=True)
30
+ else:
31
+ obj_dict = query_value
32
+ return traverse_query_dict(obj_dict, query_key)
33
+ elif isinstance(query_value, list):
34
+ encoded_values: List[Tuple[str, Any]] = []
35
+ for value in query_value:
36
+ if isinstance(value, pydantic.BaseModel) or isinstance(value, dict):
37
+ if isinstance(value, pydantic.BaseModel):
38
+ obj_dict = value.dict(by_alias=True)
39
+ elif isinstance(value, dict):
40
+ obj_dict = value
41
+
42
+ encoded_values.extend(single_query_encoder(query_key, obj_dict))
43
+ else:
44
+ encoded_values.append((query_key, value))
45
+
46
+ return encoded_values
47
+
48
+ return [(query_key, query_value)]
49
+
50
+
51
+ def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple[str, Any]]]:
52
+ if query is None:
53
+ return None
54
+
55
+ encoded_query = []
56
+ for k, v in query.items():
57
+ encoded_query.extend(single_query_encoder(k, v))
58
+ return encoded_query
venv/lib/python3.12/site-packages/sarvamai/core/remove_none_from_dict.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from typing import Any, Dict, Mapping, Optional
4
+
5
+
6
+ def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict[str, Any]:
7
+ new: Dict[str, Any] = {}
8
+ for key, value in original.items():
9
+ if value is not None:
10
+ new[key] = value
11
+ return new
venv/lib/python3.12/site-packages/sarvamai/core/request_options.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ try:
6
+ from typing import NotRequired # type: ignore
7
+ except ImportError:
8
+ from typing_extensions import NotRequired
9
+
10
+
11
+ class RequestOptions(typing.TypedDict, total=False):
12
+ """
13
+ Additional options for request-specific configuration when calling APIs via the SDK.
14
+ This is used primarily as an optional final parameter for service functions.
15
+
16
+ Attributes:
17
+ - timeout_in_seconds: int. The number of seconds to await an API call before timing out.
18
+
19
+ - max_retries: int. The max number of retries to attempt if the API call fails.
20
+
21
+ - additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict
22
+
23
+ - additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict
24
+
25
+ - additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict
26
+
27
+ - chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads.
28
+ """
29
+
30
+ timeout_in_seconds: NotRequired[int]
31
+ max_retries: NotRequired[int]
32
+ additional_headers: NotRequired[typing.Dict[str, typing.Any]]
33
+ additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]]
34
+ additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]]
35
+ chunk_size: NotRequired[int]
venv/lib/python3.12/site-packages/sarvamai/core/serialization.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import collections
4
+ import inspect
5
+ import typing
6
+
7
+ import pydantic
8
+ import typing_extensions
9
+
10
+
11
+ class FieldMetadata:
12
+ """
13
+ Metadata class used to annotate fields to provide additional information.
14
+
15
+ Example:
16
+ class MyDict(TypedDict):
17
+ field: typing.Annotated[str, FieldMetadata(alias="field_name")]
18
+
19
+ Will serialize: `{"field": "value"}`
20
+ To: `{"field_name": "value"}`
21
+ """
22
+
23
+ alias: str
24
+
25
+ def __init__(self, *, alias: str) -> None:
26
+ self.alias = alias
27
+
28
+
29
+ def convert_and_respect_annotation_metadata(
30
+ *,
31
+ object_: typing.Any,
32
+ annotation: typing.Any,
33
+ inner_type: typing.Optional[typing.Any] = None,
34
+ direction: typing.Literal["read", "write"],
35
+ ) -> typing.Any:
36
+ """
37
+ Respect the metadata annotations on a field, such as aliasing. This function effectively
38
+ manipulates the dict-form of an object to respect the metadata annotations. This is primarily used for
39
+ TypedDicts, which cannot support aliasing out of the box, and can be extended for additional
40
+ utilities, such as defaults.
41
+
42
+ Parameters
43
+ ----------
44
+ object_ : typing.Any
45
+
46
+ annotation : type
47
+ The type we're looking to apply typing annotations from
48
+
49
+ inner_type : typing.Optional[type]
50
+
51
+ Returns
52
+ -------
53
+ typing.Any
54
+ """
55
+
56
+ if object_ is None:
57
+ return None
58
+ if inner_type is None:
59
+ inner_type = annotation
60
+
61
+ clean_type = _remove_annotations(inner_type)
62
+ # Pydantic models
63
+ if (
64
+ inspect.isclass(clean_type)
65
+ and issubclass(clean_type, pydantic.BaseModel)
66
+ and isinstance(object_, typing.Mapping)
67
+ ):
68
+ return _convert_mapping(object_, clean_type, direction)
69
+ # TypedDicts
70
+ if typing_extensions.is_typeddict(clean_type) and isinstance(object_, typing.Mapping):
71
+ return _convert_mapping(object_, clean_type, direction)
72
+
73
+ if (
74
+ typing_extensions.get_origin(clean_type) == typing.Dict
75
+ or typing_extensions.get_origin(clean_type) == dict
76
+ or clean_type == typing.Dict
77
+ ) and isinstance(object_, typing.Dict):
78
+ key_type = typing_extensions.get_args(clean_type)[0]
79
+ value_type = typing_extensions.get_args(clean_type)[1]
80
+
81
+ return {
82
+ key: convert_and_respect_annotation_metadata(
83
+ object_=value,
84
+ annotation=annotation,
85
+ inner_type=value_type,
86
+ direction=direction,
87
+ )
88
+ for key, value in object_.items()
89
+ }
90
+
91
+ # If you're iterating on a string, do not bother to coerce it to a sequence.
92
+ if not isinstance(object_, str):
93
+ if (
94
+ typing_extensions.get_origin(clean_type) == typing.Set
95
+ or typing_extensions.get_origin(clean_type) == set
96
+ or clean_type == typing.Set
97
+ ) and isinstance(object_, typing.Set):
98
+ inner_type = typing_extensions.get_args(clean_type)[0]
99
+ return {
100
+ convert_and_respect_annotation_metadata(
101
+ object_=item,
102
+ annotation=annotation,
103
+ inner_type=inner_type,
104
+ direction=direction,
105
+ )
106
+ for item in object_
107
+ }
108
+ elif (
109
+ (
110
+ typing_extensions.get_origin(clean_type) == typing.List
111
+ or typing_extensions.get_origin(clean_type) == list
112
+ or clean_type == typing.List
113
+ )
114
+ and isinstance(object_, typing.List)
115
+ ) or (
116
+ (
117
+ typing_extensions.get_origin(clean_type) == typing.Sequence
118
+ or typing_extensions.get_origin(clean_type) == collections.abc.Sequence
119
+ or clean_type == typing.Sequence
120
+ )
121
+ and isinstance(object_, typing.Sequence)
122
+ ):
123
+ inner_type = typing_extensions.get_args(clean_type)[0]
124
+ return [
125
+ convert_and_respect_annotation_metadata(
126
+ object_=item,
127
+ annotation=annotation,
128
+ inner_type=inner_type,
129
+ direction=direction,
130
+ )
131
+ for item in object_
132
+ ]
133
+
134
+ if typing_extensions.get_origin(clean_type) == typing.Union:
135
+ # We should be able to ~relatively~ safely try to convert keys against all
136
+ # member types in the union, the edge case here is if one member aliases a field
137
+ # of the same name to a different name from another member
138
+ # Or if another member aliases a field of the same name that another member does not.
139
+ for member in typing_extensions.get_args(clean_type):
140
+ object_ = convert_and_respect_annotation_metadata(
141
+ object_=object_,
142
+ annotation=annotation,
143
+ inner_type=member,
144
+ direction=direction,
145
+ )
146
+ return object_
147
+
148
+ annotated_type = _get_annotation(annotation)
149
+ if annotated_type is None:
150
+ return object_
151
+
152
+ # If the object is not a TypedDict, a Union, or other container (list, set, sequence, etc.)
153
+ # Then we can safely call it on the recursive conversion.
154
+ return object_
155
+
156
+
157
+ def _convert_mapping(
158
+ object_: typing.Mapping[str, object],
159
+ expected_type: typing.Any,
160
+ direction: typing.Literal["read", "write"],
161
+ ) -> typing.Mapping[str, object]:
162
+ converted_object: typing.Dict[str, object] = {}
163
+ try:
164
+ annotations = typing_extensions.get_type_hints(expected_type, include_extras=True)
165
+ except NameError:
166
+ # The TypedDict contains a circular reference, so
167
+ # we use the __annotations__ attribute directly.
168
+ annotations = getattr(expected_type, "__annotations__", {})
169
+ aliases_to_field_names = _get_alias_to_field_name(annotations)
170
+ for key, value in object_.items():
171
+ if direction == "read" and key in aliases_to_field_names:
172
+ dealiased_key = aliases_to_field_names.get(key)
173
+ if dealiased_key is not None:
174
+ type_ = annotations.get(dealiased_key)
175
+ else:
176
+ type_ = annotations.get(key)
177
+ # Note you can't get the annotation by the field name if you're in read mode, so you must check the aliases map
178
+ #
179
+ # So this is effectively saying if we're in write mode, and we don't have a type, or if we're in read mode and we don't have an alias
180
+ # then we can just pass the value through as is
181
+ if type_ is None:
182
+ converted_object[key] = value
183
+ elif direction == "read" and key not in aliases_to_field_names:
184
+ converted_object[key] = convert_and_respect_annotation_metadata(
185
+ object_=value, annotation=type_, direction=direction
186
+ )
187
+ else:
188
+ converted_object[_alias_key(key, type_, direction, aliases_to_field_names)] = (
189
+ convert_and_respect_annotation_metadata(object_=value, annotation=type_, direction=direction)
190
+ )
191
+ return converted_object
192
+
193
+
194
+ def _get_annotation(type_: typing.Any) -> typing.Optional[typing.Any]:
195
+ maybe_annotated_type = typing_extensions.get_origin(type_)
196
+ if maybe_annotated_type is None:
197
+ return None
198
+
199
+ if maybe_annotated_type == typing_extensions.NotRequired:
200
+ type_ = typing_extensions.get_args(type_)[0]
201
+ maybe_annotated_type = typing_extensions.get_origin(type_)
202
+
203
+ if maybe_annotated_type == typing_extensions.Annotated:
204
+ return type_
205
+
206
+ return None
207
+
208
+
209
+ def _remove_annotations(type_: typing.Any) -> typing.Any:
210
+ maybe_annotated_type = typing_extensions.get_origin(type_)
211
+ if maybe_annotated_type is None:
212
+ return type_
213
+
214
+ if maybe_annotated_type == typing_extensions.NotRequired:
215
+ return _remove_annotations(typing_extensions.get_args(type_)[0])
216
+
217
+ if maybe_annotated_type == typing_extensions.Annotated:
218
+ return _remove_annotations(typing_extensions.get_args(type_)[0])
219
+
220
+ return type_
221
+
222
+
223
+ def get_alias_to_field_mapping(type_: typing.Any) -> typing.Dict[str, str]:
224
+ annotations = typing_extensions.get_type_hints(type_, include_extras=True)
225
+ return _get_alias_to_field_name(annotations)
226
+
227
+
228
+ def get_field_to_alias_mapping(type_: typing.Any) -> typing.Dict[str, str]:
229
+ annotations = typing_extensions.get_type_hints(type_, include_extras=True)
230
+ return _get_field_to_alias_name(annotations)
231
+
232
+
233
+ def _get_alias_to_field_name(
234
+ field_to_hint: typing.Dict[str, typing.Any],
235
+ ) -> typing.Dict[str, str]:
236
+ aliases = {}
237
+ for field, hint in field_to_hint.items():
238
+ maybe_alias = _get_alias_from_type(hint)
239
+ if maybe_alias is not None:
240
+ aliases[maybe_alias] = field
241
+ return aliases
242
+
243
+
244
+ def _get_field_to_alias_name(
245
+ field_to_hint: typing.Dict[str, typing.Any],
246
+ ) -> typing.Dict[str, str]:
247
+ aliases = {}
248
+ for field, hint in field_to_hint.items():
249
+ maybe_alias = _get_alias_from_type(hint)
250
+ if maybe_alias is not None:
251
+ aliases[field] = maybe_alias
252
+ return aliases
253
+
254
+
255
+ def _get_alias_from_type(type_: typing.Any) -> typing.Optional[str]:
256
+ maybe_annotated_type = _get_annotation(type_)
257
+
258
+ if maybe_annotated_type is not None:
259
+ # The actual annotations are 1 onward, the first is the annotated type
260
+ annotations = typing_extensions.get_args(maybe_annotated_type)[1:]
261
+
262
+ for annotation in annotations:
263
+ if isinstance(annotation, FieldMetadata) and annotation.alias is not None:
264
+ return annotation.alias
265
+ return None
266
+
267
+
268
+ def _alias_key(
269
+ key: str,
270
+ type_: typing.Any,
271
+ direction: typing.Literal["read", "write"],
272
+ aliases_to_field_names: typing.Dict[str, str],
273
+ ) -> str:
274
+ if direction == "read":
275
+ return aliases_to_field_names.get(key, key)
276
+ return _get_alias_from_type(type_=type_) or key
venv/lib/python3.12/site-packages/sarvamai/core/websocket_compat.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ try:
4
+ from websockets.exceptions import InvalidStatusCode as InvalidWebSocketStatus # type: ignore[attr-defined]
5
+
6
+ def get_status_code(exc: InvalidWebSocketStatus) -> int:
7
+ return exc.status_code # type: ignore[attr-defined, union-attr]
8
+
9
+ except ImportError:
10
+ from websockets.exceptions import InvalidStatus as InvalidWebSocketStatus # type: ignore[assignment, no-redef]
11
+
12
+ def get_status_code(exc: InvalidWebSocketStatus) -> int: # type: ignore[no-redef]
13
+ return exc.response.status_code # type: ignore[attr-defined, union-attr]
venv/lib/python3.12/site-packages/sarvamai/document_intelligence/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ # isort: skip_file
4
+
venv/lib/python3.12/site-packages/sarvamai/document_intelligence/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (203 Bytes). View file