koichi12 commited on
Commit
8ea62b1
·
verified ·
1 Parent(s): bd38811

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/INSTALLER +1 -0
  2. .venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/LICENSE +201 -0
  3. .venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/RECORD +243 -0
  4. .venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/WHEEL +5 -0
  5. .venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/entry_points.txt +6 -0
  6. .venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/INSTALLER +1 -0
  7. .venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/License.txt +1568 -0
  8. .venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/RECORD +53 -0
  9. .venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/WHEEL +5 -0
  10. .venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/top_level.txt +1 -0
  11. .venv/lib/python3.11/site-packages/openai/_extras/__init__.py +2 -0
  12. .venv/lib/python3.11/site-packages/openai/_extras/__pycache__/__init__.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/openai/_extras/__pycache__/_common.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/openai/_extras/__pycache__/numpy_proxy.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/openai/_extras/__pycache__/pandas_proxy.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/openai/_extras/_common.py +21 -0
  17. .venv/lib/python3.11/site-packages/openai/_extras/numpy_proxy.py +37 -0
  18. .venv/lib/python3.11/site-packages/openai/_extras/pandas_proxy.py +28 -0
  19. .venv/lib/python3.11/site-packages/openai/lib/__pycache__/__init__.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/openai/lib/__pycache__/_old_api.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/openai/lib/__pycache__/_pydantic.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/openai/lib/__pycache__/_tools.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/openai/lib/__pycache__/_validators.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/openai/lib/streaming/chat/__pycache__/__init__.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/openai/lib/streaming/chat/__pycache__/_types.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/__init__.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/batches.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/completions.cpython-311.pyc +0 -0
  29. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/embeddings.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/files.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/images.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/models.cpython-311.pyc +0 -0
  33. .venv/lib/python3.11/site-packages/openai/resources/__pycache__/moderations.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/openai/resources/beta/chat/__init__.py +11 -0
  35. .venv/lib/python3.11/site-packages/openai/resources/beta/chat/__pycache__/__init__.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/openai/resources/beta/chat/__pycache__/chat.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/openai/resources/beta/chat/__pycache__/completions.cpython-311.pyc +0 -0
  38. .venv/lib/python3.11/site-packages/openai/resources/beta/chat/chat.py +21 -0
  39. .venv/lib/python3.11/site-packages/openai/resources/beta/chat/completions.py +630 -0
  40. .venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__init__.py +33 -0
  41. .venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__pycache__/__init__.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__pycache__/realtime.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__pycache__/sessions.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/openai/resources/beta/realtime/realtime.py +966 -0
  45. .venv/lib/python3.11/site-packages/openai/resources/beta/realtime/sessions.py +351 -0
  46. .venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__init__.py +47 -0
  47. .venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/__init__.cpython-311.pyc +0 -0
  48. .venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/file_batches.cpython-311.pyc +0 -0
  49. .venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/files.cpython-311.pyc +0 -0
  50. .venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/vector_stores.cpython-311.pyc +0 -0
.venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
.venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/RECORD ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/huggingface-cli,sha256=ZGrAah4v9DA6vnRHkgQM6FxpauvKRggIimICk3X8Mew,257
2
+ huggingface_hub-0.28.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ huggingface_hub-0.28.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
4
+ huggingface_hub-0.28.1.dist-info/METADATA,sha256=8vU6rq7_kzBwtVh5TY-h1UodfzCADDoHmsak4HCh7sA,13480
5
+ huggingface_hub-0.28.1.dist-info/RECORD,,
6
+ huggingface_hub-0.28.1.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
7
+ huggingface_hub-0.28.1.dist-info/entry_points.txt,sha256=Y3Z2L02rBG7va_iE6RPXolIgwOdwUFONyRN3kXMxZ0g,131
8
+ huggingface_hub-0.28.1.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
9
+ huggingface_hub/__init__.py,sha256=Jb4vfjjqH66CyTLCLUQJAW-yosi8Cz5CSfMtynsXJFI,48868
10
+ huggingface_hub/__pycache__/__init__.cpython-311.pyc,,
11
+ huggingface_hub/__pycache__/_commit_api.cpython-311.pyc,,
12
+ huggingface_hub/__pycache__/_commit_scheduler.cpython-311.pyc,,
13
+ huggingface_hub/__pycache__/_inference_endpoints.cpython-311.pyc,,
14
+ huggingface_hub/__pycache__/_local_folder.cpython-311.pyc,,
15
+ huggingface_hub/__pycache__/_login.cpython-311.pyc,,
16
+ huggingface_hub/__pycache__/_snapshot_download.cpython-311.pyc,,
17
+ huggingface_hub/__pycache__/_space_api.cpython-311.pyc,,
18
+ huggingface_hub/__pycache__/_tensorboard_logger.cpython-311.pyc,,
19
+ huggingface_hub/__pycache__/_upload_large_folder.cpython-311.pyc,,
20
+ huggingface_hub/__pycache__/_webhooks_payload.cpython-311.pyc,,
21
+ huggingface_hub/__pycache__/_webhooks_server.cpython-311.pyc,,
22
+ huggingface_hub/__pycache__/community.cpython-311.pyc,,
23
+ huggingface_hub/__pycache__/constants.cpython-311.pyc,,
24
+ huggingface_hub/__pycache__/errors.cpython-311.pyc,,
25
+ huggingface_hub/__pycache__/fastai_utils.cpython-311.pyc,,
26
+ huggingface_hub/__pycache__/file_download.cpython-311.pyc,,
27
+ huggingface_hub/__pycache__/hf_api.cpython-311.pyc,,
28
+ huggingface_hub/__pycache__/hf_file_system.cpython-311.pyc,,
29
+ huggingface_hub/__pycache__/hub_mixin.cpython-311.pyc,,
30
+ huggingface_hub/__pycache__/inference_api.cpython-311.pyc,,
31
+ huggingface_hub/__pycache__/keras_mixin.cpython-311.pyc,,
32
+ huggingface_hub/__pycache__/lfs.cpython-311.pyc,,
33
+ huggingface_hub/__pycache__/repocard.cpython-311.pyc,,
34
+ huggingface_hub/__pycache__/repocard_data.cpython-311.pyc,,
35
+ huggingface_hub/__pycache__/repository.cpython-311.pyc,,
36
+ huggingface_hub/_commit_api.py,sha256=TqXmu5moVAhBa7iuyJdsqsfRTxTpGMnvsPkb4GgC3dc,32636
37
+ huggingface_hub/_commit_scheduler.py,sha256=tfIoO1xWHjTJ6qy6VS6HIoymDycFPg0d6pBSZprrU2U,14679
38
+ huggingface_hub/_inference_endpoints.py,sha256=PmW6IdIq9SAvlniPuyh3alKfahAmm4cyArQ_vrIcgQE,16992
39
+ huggingface_hub/_local_folder.py,sha256=ScpCJUITFC0LMkiebyaGiBhAU6fvQK8w7pVV6L8rhmc,16575
40
+ huggingface_hub/_login.py,sha256=ssf4viT5BhHI2ZidnSuAZcrwSxzaLOrf8xgRVKuvu_A,20298
41
+ huggingface_hub/_snapshot_download.py,sha256=zZDaPBb4CfMCU7DgxjbaFmdoISCY425RaH7wXwFijEM,14992
42
+ huggingface_hub/_space_api.py,sha256=QVOUNty2T4RxPoxf9FzUjXmjHiGXP0mqXJzqQ7GmoJo,5363
43
+ huggingface_hub/_tensorboard_logger.py,sha256=ZkYcAUiRC8RGL214QUYtp58O8G5tn-HF6DCWha9imcA,8358
44
+ huggingface_hub/_upload_large_folder.py,sha256=g9P2-pQkbOd9UF_unXd9SpCX8p_U0gLyjqXZpkUl4p0,23484
45
+ huggingface_hub/_webhooks_payload.py,sha256=Xm3KaK7tCOGBlXkuZvbym6zjHXrT1XCrbUFWuXiBmNY,3617
46
+ huggingface_hub/_webhooks_server.py,sha256=oCvpFrYjrhJjClAMw26SQfvN4DUItgK2IhFp1OVh2bU,15623
47
+ huggingface_hub/commands/__init__.py,sha256=AkbM2a-iGh0Vq_xAWhK3mu3uZ44km8-X5uWjKcvcrUQ,928
48
+ huggingface_hub/commands/__pycache__/__init__.cpython-311.pyc,,
49
+ huggingface_hub/commands/__pycache__/_cli_utils.cpython-311.pyc,,
50
+ huggingface_hub/commands/__pycache__/delete_cache.cpython-311.pyc,,
51
+ huggingface_hub/commands/__pycache__/download.cpython-311.pyc,,
52
+ huggingface_hub/commands/__pycache__/env.cpython-311.pyc,,
53
+ huggingface_hub/commands/__pycache__/huggingface_cli.cpython-311.pyc,,
54
+ huggingface_hub/commands/__pycache__/lfs.cpython-311.pyc,,
55
+ huggingface_hub/commands/__pycache__/repo_files.cpython-311.pyc,,
56
+ huggingface_hub/commands/__pycache__/scan_cache.cpython-311.pyc,,
57
+ huggingface_hub/commands/__pycache__/tag.cpython-311.pyc,,
58
+ huggingface_hub/commands/__pycache__/upload.cpython-311.pyc,,
59
+ huggingface_hub/commands/__pycache__/upload_large_folder.cpython-311.pyc,,
60
+ huggingface_hub/commands/__pycache__/user.cpython-311.pyc,,
61
+ huggingface_hub/commands/__pycache__/version.cpython-311.pyc,,
62
+ huggingface_hub/commands/_cli_utils.py,sha256=Nt6CjbkYqQQRuh70bUXVA6rZpbZt_Sa1WqBUxjQLu6g,2095
63
+ huggingface_hub/commands/delete_cache.py,sha256=Rb1BtIltJPnQ-th7tcK_L4mFqfk785t3KXV77xXKBP4,16131
64
+ huggingface_hub/commands/download.py,sha256=1YXKttB8YBX7SJ0Jxg0t1n8yp2BUZXtY0ck6DhCg-XE,8183
65
+ huggingface_hub/commands/env.py,sha256=yYl4DSS14V8t244nAi0t77Izx5LIdgS_dy6xiV5VQME,1226
66
+ huggingface_hub/commands/huggingface_cli.py,sha256=ZwW_nwgppyj-GA6iM3mgmbXMZ63bgtpGl_yIQDyWS4A,2414
67
+ huggingface_hub/commands/lfs.py,sha256=xdbnNRO04UuQemEhUGT809jFgQn9Rj-SnyT_0Ph-VYg,7342
68
+ huggingface_hub/commands/repo_files.py,sha256=Nfv8TjuaZVOrj7TZjrojtjdD8Wf54aZvYPDEOevh7tA,4923
69
+ huggingface_hub/commands/scan_cache.py,sha256=xdD_zRKd49hRuATyptG-zaY08h1f9CAjB5zZBKe0YEo,8563
70
+ huggingface_hub/commands/tag.py,sha256=0LNQZyK-WKi0VIL9i1xWzKxJ1ILw1jxMF_E6t2weJss,6288
71
+ huggingface_hub/commands/upload.py,sha256=xMExm68YcR8R_dDRi3bcIC1qVCvRFRW7aP_AGxGZ1rc,13656
72
+ huggingface_hub/commands/upload_large_folder.py,sha256=P-EO44JWVl39Ax4b0E0Z873d0a6S38Qas8P6DaL1EwI,6129
73
+ huggingface_hub/commands/user.py,sha256=M6Ef045YcyV4mFCbLaTRPciQDC6xtV9MMheeen69D0E,11168
74
+ huggingface_hub/commands/version.py,sha256=vfCJn7GO1m-DtDmbdsty8_RTVtnZ7lX6MJsx0Bf4e-s,1266
75
+ huggingface_hub/community.py,sha256=4MtcoxEI9_0lmmilBEnvUEi8_O1Ivfa8p6eKxYU5-ts,12198
76
+ huggingface_hub/constants.py,sha256=PrJE6b8einTDbID6IHkgm28tNA8uwp3dt32NDoSPrys,8218
77
+ huggingface_hub/errors.py,sha256=zble0j94ai8zwyM0a2DovwcF372zQohwDsgajTsaxqI,9703
78
+ huggingface_hub/fastai_utils.py,sha256=DpeH9d-6ut2k_nCAAwglM51XmRmgfbRe2SPifpVL5Yk,16745
79
+ huggingface_hub/file_download.py,sha256=h3egIfo07tAC7fbXFfVj1GRCmKn6qTXyMUoSYhu3bSo,70364
80
+ huggingface_hub/hf_api.py,sha256=cvGLQroZ1wT1H3s_vKA1Q2TrogCKDQiONkuitVw2TeU,422283
81
+ huggingface_hub/hf_file_system.py,sha256=m_g7uYLGxTdsBnhvR5835jvYMAuEBsUSFvEbzZKzzoo,47500
82
+ huggingface_hub/hub_mixin.py,sha256=-oTnuB3b-0WeutZ1iBkAy1YuWrBKvHBVBpmd3-7oGB4,37419
83
+ huggingface_hub/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
+ huggingface_hub/inference/__pycache__/__init__.cpython-311.pyc,,
85
+ huggingface_hub/inference/__pycache__/_client.cpython-311.pyc,,
86
+ huggingface_hub/inference/__pycache__/_common.cpython-311.pyc,,
87
+ huggingface_hub/inference/_client.py,sha256=l9-scOnCJD3qDawdmu2mW6OtHewigM79Njgh_HEC-_4,159438
88
+ huggingface_hub/inference/_common.py,sha256=PfdkPHtpoXP3DP70XBOkXq_GJPKhh6mFsdZ1kGf1pw0,15168
89
+ huggingface_hub/inference/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
90
+ huggingface_hub/inference/_generated/__pycache__/__init__.cpython-311.pyc,,
91
+ huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-311.pyc,,
92
+ huggingface_hub/inference/_generated/_async_client.py,sha256=CcV_Pe44suV8NIkGcuqn-0M5icuyh-sJ-vCdDY0sVXE,165975
93
+ huggingface_hub/inference/_generated/types/__init__.py,sha256=GFq7Yz0cen-VCxcf7AxWjRV_hsWN3jf67FyWzDUm0DA,6297
94
+ huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-311.pyc,,
95
+ huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-311.pyc,,
96
+ huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-311.pyc,,
97
+ huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-311.pyc,,
98
+ huggingface_hub/inference/_generated/types/__pycache__/base.cpython-311.pyc,,
99
+ huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-311.pyc,,
100
+ huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-311.pyc,,
101
+ huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-311.pyc,,
102
+ huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-311.pyc,,
103
+ huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-311.pyc,,
104
+ huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-311.pyc,,
105
+ huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-311.pyc,,
106
+ huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-311.pyc,,
107
+ huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-311.pyc,,
108
+ huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-311.pyc,,
109
+ huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-311.pyc,,
110
+ huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-311.pyc,,
111
+ huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-311.pyc,,
112
+ huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-311.pyc,,
113
+ huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-311.pyc,,
114
+ huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-311.pyc,,
115
+ huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-311.pyc,,
116
+ huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-311.pyc,,
117
+ huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-311.pyc,,
118
+ huggingface_hub/inference/_generated/types/__pycache__/text_to_speech.cpython-311.pyc,,
119
+ huggingface_hub/inference/_generated/types/__pycache__/text_to_video.cpython-311.pyc,,
120
+ huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-311.pyc,,
121
+ huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-311.pyc,,
122
+ huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-311.pyc,,
123
+ huggingface_hub/inference/_generated/types/__pycache__/visual_question_answering.cpython-311.pyc,,
124
+ huggingface_hub/inference/_generated/types/__pycache__/zero_shot_classification.cpython-311.pyc,,
125
+ huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-311.pyc,,
126
+ huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-311.pyc,,
127
+ huggingface_hub/inference/_generated/types/audio_classification.py,sha256=U340ccLtMjqRMtD3hviXCQaWk_DWJgQWor8vLgW_GKM,1552
128
+ huggingface_hub/inference/_generated/types/audio_to_audio.py,sha256=n7GeCepzt254yoSLsdjrI1j4fzYgjWzxoaKE5gZJc48,881
129
+ huggingface_hub/inference/_generated/types/automatic_speech_recognition.py,sha256=g5L2LasOLPW49ik69SlUu-EVdNn7IWWpmGhaP9b-en0,5582
130
+ huggingface_hub/inference/_generated/types/base.py,sha256=dQ-ej4weVueTJQXaDFLYFhKvlCUYVH2k9r-Ck0uhFIU,5870
131
+ huggingface_hub/inference/_generated/types/chat_completion.py,sha256=6QKg0upzaNTv5esWfRZbtYixNrcsvGNrUpPw1mNezQ8,9589
132
+ huggingface_hub/inference/_generated/types/depth_estimation.py,sha256=3ZUI2w3J-WgDDxlRQ_444r46TlFRhHBt2LN2LLVJB-w,919
133
+ huggingface_hub/inference/_generated/types/document_question_answering.py,sha256=NtEF_MRPbj4iaNIFSBC5sPf92HKeOc3B7ZZE1RooLFU,3170
134
+ huggingface_hub/inference/_generated/types/feature_extraction.py,sha256=-lCiN_i4ChGz4Dtw75muArKuvVfpquy-XRfC7OS7r0Q,1490
135
+ huggingface_hub/inference/_generated/types/fill_mask.py,sha256=fxYjJgERCwKS-LUc3FPfKEqo0x-BemwmC_MbgUZwdkQ,1687
136
+ huggingface_hub/inference/_generated/types/image_classification.py,sha256=hCvz1MpMVXY3KlVENmq1Ygn4SsbE9UngPH9Y7Nj_kgg,1564
137
+ huggingface_hub/inference/_generated/types/image_segmentation.py,sha256=4T0rj-wGJnmqvbcj7kVt_L_mhWjEpjFH9o0VhHXaGRs,1929
138
+ huggingface_hub/inference/_generated/types/image_to_image.py,sha256=iJvTWi3MxbT5nwVB-CDYG9rardMPcHcGJBaxN6thLiE,1922
139
+ huggingface_hub/inference/_generated/types/image_to_text.py,sha256=kocfj5jn_sPDk5FSR_PGl4Xq72BNHOwHEKCP3wv2poY,4887
140
+ huggingface_hub/inference/_generated/types/object_detection.py,sha256=eS3bbkpdtjLu2k2TjlEvLjVtMI5OdHJK1jq3kTzVZdQ,1968
141
+ huggingface_hub/inference/_generated/types/question_answering.py,sha256=Dzpwz3KWq3uQuzIwMRbUxSHscpLsVOPjwrKVqLJR5vA,2866
142
+ huggingface_hub/inference/_generated/types/sentence_similarity.py,sha256=y3KHkCYxn1tOEPrOQ6DNqQtCEhNNtGEN5aMstiQ6hVA,1042
143
+ huggingface_hub/inference/_generated/types/summarization.py,sha256=uwW7QdEXhWSkDIqlz_EEZiaaQscgjzTocR1bu7LRaxU,1466
144
+ huggingface_hub/inference/_generated/types/table_question_answering.py,sha256=YLMT7FY_nx6CSv146p6MyFMzwSZLsYSmOQVG39aiKUQ,2261
145
+ huggingface_hub/inference/_generated/types/text2text_generation.py,sha256=dqwTt-i-k6jo9DdGUS5ZRtrkLTVWoje3L5C3-pMv45Q,1588
146
+ huggingface_hub/inference/_generated/types/text_classification.py,sha256=j-0g3Ajaxg4e5TQlNs3UqVryvp5delYm8znrgzvkW-8,1424
147
+ huggingface_hub/inference/_generated/types/text_generation.py,sha256=TKJb26T03Gyw3As0IXmRzAritNeKUpturKfO_2OenzA,5798
148
+ huggingface_hub/inference/_generated/types/text_to_audio.py,sha256=AHVRQ-TIWnuxa3AvnromoiIYYPawaQDvVcSwlNj0Gpg,4818
149
+ huggingface_hub/inference/_generated/types/text_to_image.py,sha256=yEQ_aZ2fJ1P2PXkExrUdoXXzm4LDE_58pGVAjhbzMqY,1961
150
+ huggingface_hub/inference/_generated/types/text_to_speech.py,sha256=OpKQ8XDNIT---FPS8er94ZUh59JJnCLCBRn9Z_sk7v4,4837
151
+ huggingface_hub/inference/_generated/types/text_to_video.py,sha256=wsMz_K3FTuNP78hsonx9jh8XhwVRn5Sn-ETka8SnRI4,1769
152
+ huggingface_hub/inference/_generated/types/token_classification.py,sha256=C0K5OukrNTyvZE-AdIbblacTtyc1Ita374bwfNw2Y-8,1894
153
+ huggingface_hub/inference/_generated/types/translation.py,sha256=AYfr0n-o0HsK21QNs0HMufI1wCMbtRhAZvNK40hjAYk,1742
154
+ huggingface_hub/inference/_generated/types/video_classification.py,sha256=e6J5BNqQ5gtyHAuVB-UPo4RYy4AIT6XSShhGA13CB_E,1659
155
+ huggingface_hub/inference/_generated/types/visual_question_answering.py,sha256=bSFakQ9eOrD1oNiMNfO4_ukQYSJIcukiAf9k7c5tsrY,1641
156
+ huggingface_hub/inference/_generated/types/zero_shot_classification.py,sha256=XysOHfPIx6V3jBuUM1P0QM4UjpkGQV816awIN_W5Oqk,1717
157
+ huggingface_hub/inference/_generated/types/zero_shot_image_classification.py,sha256=sXIYvmrX0DreSfjERDKeY9iTFV8Zg_UEo6m5xsOsInE,1466
158
+ huggingface_hub/inference/_generated/types/zero_shot_object_detection.py,sha256=LFtdjPxuEMsEcCqqrJETV05t9bz4CpdYMfU2a-eBvl4,1598
159
+ huggingface_hub/inference/_providers/__init__.py,sha256=3a81s3o50LzYMB1ePy6JU3z3imZ6TUUfnEsunScbwYk,4061
160
+ huggingface_hub/inference/_providers/__pycache__/__init__.cpython-311.pyc,,
161
+ huggingface_hub/inference/_providers/__pycache__/fal_ai.cpython-311.pyc,,
162
+ huggingface_hub/inference/_providers/__pycache__/hf_inference.cpython-311.pyc,,
163
+ huggingface_hub/inference/_providers/__pycache__/replicate.cpython-311.pyc,,
164
+ huggingface_hub/inference/_providers/__pycache__/sambanova.cpython-311.pyc,,
165
+ huggingface_hub/inference/_providers/__pycache__/together.cpython-311.pyc,,
166
+ huggingface_hub/inference/_providers/fal_ai.py,sha256=tS2sUzpw08cLKta2vSBablgUI7GiMO8APeJXd6wPo5A,5870
167
+ huggingface_hub/inference/_providers/hf_inference.py,sha256=lh52LekKRi43VX18tLGi2LE2iOC3dNfdP8GsfqUfTMQ,7210
168
+ huggingface_hub/inference/_providers/replicate.py,sha256=hStDlm789VaEjKUVlUFoxvQ9SnczLCeS1wL_NBkWGFs,5682
169
+ huggingface_hub/inference/_providers/sambanova.py,sha256=KenHXBitAueQhSd6_ZsRXtCHQNi4s5g0R4BLVa8SC64,3429
170
+ huggingface_hub/inference/_providers/together.py,sha256=ZJGcfzZkJ0AVUJX-qiuWIrL2YCMBy-AAmdZaVrmYvDU,6909
171
+ huggingface_hub/inference_api.py,sha256=b4-NhPSn9b44nYKV8tDKXodmE4JVdEymMWL4CVGkzlE,8323
172
+ huggingface_hub/keras_mixin.py,sha256=3d2oW35SALXHq-WHoLD_tbq0UrcabGKj3HidtPRx51U,19574
173
+ huggingface_hub/lfs.py,sha256=n-TIjK7J7aXG3zi__0nkd6aNkE4djOf9CD6dYQOQ5P8,16649
174
+ huggingface_hub/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
175
+ huggingface_hub/repocard.py,sha256=ihFBKYqPNaWw9rWMUvcaRKxrooL32NA4fAlrwzXk9LY,34733
176
+ huggingface_hub/repocard_data.py,sha256=EqJ-54QF0qngitsZwCkPQjPwzrkLpxt_qU4lxekMWs8,33247
177
+ huggingface_hub/repository.py,sha256=xVQR-MRKNDfJ_Z_99DwtXZB3xNO06eYG_GvRM4fLiTU,54557
178
+ huggingface_hub/serialization/__init__.py,sha256=kn-Fa-m4FzMnN8lNsF-SwFcfzug4CucexybGKyvZ8S0,1041
179
+ huggingface_hub/serialization/__pycache__/__init__.cpython-311.pyc,,
180
+ huggingface_hub/serialization/__pycache__/_base.cpython-311.pyc,,
181
+ huggingface_hub/serialization/__pycache__/_dduf.cpython-311.pyc,,
182
+ huggingface_hub/serialization/__pycache__/_tensorflow.cpython-311.pyc,,
183
+ huggingface_hub/serialization/__pycache__/_torch.cpython-311.pyc,,
184
+ huggingface_hub/serialization/_base.py,sha256=Df3GwGR9NzeK_SD75prXLucJAzPiNPgHbgXSw-_LTk8,8126
185
+ huggingface_hub/serialization/_dduf.py,sha256=s42239rLiHwaJE36QDEmS5GH7DSmQ__BffiHJO5RjIg,15424
186
+ huggingface_hub/serialization/_tensorflow.py,sha256=zHOvEMg-JHC55Fm4roDT3LUCDO5zB9qtXZffG065RAM,3625
187
+ huggingface_hub/serialization/_torch.py,sha256=WoNV_17x99Agx68mNMbi2g8T5CAVIkSb3_OaZx9KrX4,44714
188
+ huggingface_hub/templates/datasetcard_template.md,sha256=W-EMqR6wndbrnZorkVv56URWPG49l7MATGeI015kTvs,5503
189
+ huggingface_hub/templates/modelcard_template.md,sha256=4AqArS3cqdtbit5Bo-DhjcnDFR-pza5hErLLTPM4Yuc,6870
190
+ huggingface_hub/utils/__init__.py,sha256=aMEsiXGi93z-dXz1W7FFma71tAMeKw0SoKVZSQUeE_4,3525
191
+ huggingface_hub/utils/__pycache__/__init__.cpython-311.pyc,,
192
+ huggingface_hub/utils/__pycache__/_auth.cpython-311.pyc,,
193
+ huggingface_hub/utils/__pycache__/_cache_assets.cpython-311.pyc,,
194
+ huggingface_hub/utils/__pycache__/_cache_manager.cpython-311.pyc,,
195
+ huggingface_hub/utils/__pycache__/_chunk_utils.cpython-311.pyc,,
196
+ huggingface_hub/utils/__pycache__/_datetime.cpython-311.pyc,,
197
+ huggingface_hub/utils/__pycache__/_deprecation.cpython-311.pyc,,
198
+ huggingface_hub/utils/__pycache__/_experimental.cpython-311.pyc,,
199
+ huggingface_hub/utils/__pycache__/_fixes.cpython-311.pyc,,
200
+ huggingface_hub/utils/__pycache__/_git_credential.cpython-311.pyc,,
201
+ huggingface_hub/utils/__pycache__/_headers.cpython-311.pyc,,
202
+ huggingface_hub/utils/__pycache__/_hf_folder.cpython-311.pyc,,
203
+ huggingface_hub/utils/__pycache__/_http.cpython-311.pyc,,
204
+ huggingface_hub/utils/__pycache__/_lfs.cpython-311.pyc,,
205
+ huggingface_hub/utils/__pycache__/_pagination.cpython-311.pyc,,
206
+ huggingface_hub/utils/__pycache__/_paths.cpython-311.pyc,,
207
+ huggingface_hub/utils/__pycache__/_runtime.cpython-311.pyc,,
208
+ huggingface_hub/utils/__pycache__/_safetensors.cpython-311.pyc,,
209
+ huggingface_hub/utils/__pycache__/_subprocess.cpython-311.pyc,,
210
+ huggingface_hub/utils/__pycache__/_telemetry.cpython-311.pyc,,
211
+ huggingface_hub/utils/__pycache__/_typing.cpython-311.pyc,,
212
+ huggingface_hub/utils/__pycache__/_validators.cpython-311.pyc,,
213
+ huggingface_hub/utils/__pycache__/endpoint_helpers.cpython-311.pyc,,
214
+ huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-311.pyc,,
215
+ huggingface_hub/utils/__pycache__/logging.cpython-311.pyc,,
216
+ huggingface_hub/utils/__pycache__/sha.cpython-311.pyc,,
217
+ huggingface_hub/utils/__pycache__/tqdm.cpython-311.pyc,,
218
+ huggingface_hub/utils/_auth.py,sha256=-9p3SSOtWKMMCDKlsM_-ebsIGX0sSgKTSnC-_O4kTxg,8294
219
+ huggingface_hub/utils/_cache_assets.py,sha256=kai77HPQMfYpROouMBQCr_gdBCaeTm996Sqj0dExbNg,5728
220
+ huggingface_hub/utils/_cache_manager.py,sha256=GhiuVQsEkWU55uYkkgiGJV1_naeciyk8u4qb4WTIVyw,34531
221
+ huggingface_hub/utils/_chunk_utils.py,sha256=kRCaj5228_vKcyLWspd8Xq01f17Jz6ds5Sr9ed5d_RU,2130
222
+ huggingface_hub/utils/_datetime.py,sha256=kCS5jaKV25kOncX1xujbXsz5iDLcjLcLw85semGNzxQ,2770
223
+ huggingface_hub/utils/_deprecation.py,sha256=HZhRGGUX_QMKBBBwHHlffLtmCSK01TOpeXHefZbPfwI,4872
224
+ huggingface_hub/utils/_experimental.py,sha256=crCPH6k6-11wwH2GZuZzZzZbjUotay49ywV1SSJhMHM,2395
225
+ huggingface_hub/utils/_fixes.py,sha256=xQV1QkUn2WpLqLjtXNiyn9gh-454K6AF-Q3kwkYAQD8,4437
226
+ huggingface_hub/utils/_git_credential.py,sha256=SDdsiREr1TcAR2Ze2TB0E5cYzVJgvDZrs60od9lAsMc,4596
227
+ huggingface_hub/utils/_headers.py,sha256=A-OYkrVG8LQGgeNq79rIbPD0ovHBibnoASoxJESzeuY,8698
228
+ huggingface_hub/utils/_hf_folder.py,sha256=cffdBqvvk0E_DW9rkaaVXLYOTFsOREDB_Y98evp_nWI,3612
229
+ huggingface_hub/utils/_http.py,sha256=L5mw-SXZ_uJvC9hZLB9yy-2u_vqTtVCFNUNN--OdDQU,22700
230
+ huggingface_hub/utils/_lfs.py,sha256=EC0Oz6Wiwl8foRNkUOzrETXzAWlbgpnpxo5a410ovFY,3957
231
+ huggingface_hub/utils/_pagination.py,sha256=hzLFLd8i_DKkPRVYzOx2CxLt5lcocEiAxDJriQUjAjY,1841
232
+ huggingface_hub/utils/_paths.py,sha256=w1ZhFmmD5ykWjp_hAvhjtOoa2ZUcOXJrF4a6O3QpAWo,5042
233
+ huggingface_hub/utils/_runtime.py,sha256=tUyWylDgqaOXnMg39rvyusiruVN5ulcqiSwUEkQ9jjg,11195
234
+ huggingface_hub/utils/_safetensors.py,sha256=GW3nyv7xQcuwObKYeYoT9VhURVzG1DZTbKBKho8Bbos,4458
235
+ huggingface_hub/utils/_subprocess.py,sha256=6GpGD4qE9-Z1-Ocs3JuCLjR4NcRlknA-hAuQlqiprYY,4595
236
+ huggingface_hub/utils/_telemetry.py,sha256=54LXeIJU5pEGghPAh06gqNAR-UoxOjVLvKqAQscwqZs,4890
237
+ huggingface_hub/utils/_typing.py,sha256=UO0-GeTbiKFV9GqDh4YNRyScQSRAAZRoUeEYQX4P0rE,2882
238
+ huggingface_hub/utils/_validators.py,sha256=dDsVG31iooTYrIyi5Vwr1DukL0fEmJwu3ceVNduhsuE,9204
239
+ huggingface_hub/utils/endpoint_helpers.py,sha256=9VtIAlxQ5H_4y30sjCAgbu7XCqAtNLC7aRYxaNn0hLI,2366
240
+ huggingface_hub/utils/insecure_hashlib.py,sha256=OjxlvtSQHpbLp9PWSrXBDJ0wHjxCBU-SQJgucEEXDbU,1058
241
+ huggingface_hub/utils/logging.py,sha256=rkY4bb4aoXiM9MFtA2jGq9Mjh2rKZ0dYdwFM-x5odWc,4726
242
+ huggingface_hub/utils/sha.py,sha256=OFnNGCba0sNcT2gUwaVCJnldxlltrHHe0DS_PCpV3C4,2134
243
+ huggingface_hub/utils/tqdm.py,sha256=ZgdphuTnwAIaUKnnD2P7qVvNHpzHAyrYoItkiV0aEjQ,9835
.venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.45.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
.venv/lib/python3.11/site-packages/huggingface_hub-0.28.1.dist-info/entry_points.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [console_scripts]
2
+ huggingface-cli = huggingface_hub.commands.huggingface_cli:main
3
+
4
+ [fsspec.specs]
5
+ hf=huggingface_hub.HfFileSystem
6
+
.venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/License.txt ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ End User License Agreement
2
+ --------------------------
3
+
4
+
5
+ Preface
6
+ -------
7
+
8
+ The Software License Agreement in Chapter 1 and the Supplement
9
+ in Chapter 2 contain license terms and conditions that govern
10
+ the use of NVIDIA software. By accepting this agreement, you
11
+ agree to comply with all the terms and conditions applicable
12
+ to the product(s) included herein.
13
+
14
+
15
+ NVIDIA Driver
16
+
17
+
18
+ Description
19
+
20
+ This package contains the operating system driver and
21
+ fundamental system software components for NVIDIA GPUs.
22
+
23
+
24
+ NVIDIA CUDA Toolkit
25
+
26
+
27
+ Description
28
+
29
+ The NVIDIA CUDA Toolkit provides command-line and graphical
30
+ tools for building, debugging and optimizing the performance
31
+ of applications accelerated by NVIDIA GPUs, runtime and math
32
+ libraries, and documentation including programming guides,
33
+ user manuals, and API references.
34
+
35
+
36
+ Default Install Location of CUDA Toolkit
37
+
38
+ Windows platform:
39
+
40
+ %ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
41
+
42
+ Linux platform:
43
+
44
+ /usr/local/cuda-#.#
45
+
46
+ Mac platform:
47
+
48
+ /Developer/NVIDIA/CUDA-#.#
49
+
50
+
51
+ NVIDIA CUDA Samples
52
+
53
+
54
+ Description
55
+
56
+ This package includes over 100+ CUDA examples that demonstrate
57
+ various CUDA programming principles, and efficient CUDA
58
+ implementation of algorithms in specific application domains.
59
+
60
+
61
+ Default Install Location of CUDA Samples
62
+
63
+ Windows platform:
64
+
65
+ %ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
66
+
67
+ Linux platform:
68
+
69
+ /usr/local/cuda-#.#/samples
70
+
71
+ and
72
+
73
+ $HOME/NVIDIA_CUDA-#.#_Samples
74
+
75
+ Mac platform:
76
+
77
+ /Developer/NVIDIA/CUDA-#.#/samples
78
+
79
+
80
+ NVIDIA Nsight Visual Studio Edition (Windows only)
81
+
82
+
83
+ Description
84
+
85
+ NVIDIA Nsight Development Platform, Visual Studio Edition is a
86
+ development environment integrated into Microsoft Visual
87
+ Studio that provides tools for debugging, profiling, analyzing
88
+ and optimizing your GPU computing and graphics applications.
89
+
90
+
91
+ Default Install Location of Nsight Visual Studio Edition
92
+
93
+ Windows platform:
94
+
95
+ %ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
96
+
97
+
98
+ 1. License Agreement for NVIDIA Software Development Kits
99
+ ---------------------------------------------------------
100
+
101
+
102
+ Release Date: July 26, 2018
103
+ ---------------------------
104
+
105
+
106
+ Important NoticeRead before downloading, installing,
107
+ copying or using the licensed software:
108
+ -------------------------------------------------------
109
+
110
+ This license agreement, including exhibits attached
111
+ ("Agreement”) is a legal agreement between you and NVIDIA
112
+ Corporation ("NVIDIA") and governs your use of a NVIDIA
113
+ software development kit (“SDK”).
114
+
115
+ Each SDK has its own set of software and materials, but here
116
+ is a description of the types of items that may be included in
117
+ a SDK: source code, header files, APIs, data sets and assets
118
+ (examples include images, textures, models, scenes, videos,
119
+ native API input/output files), binary software, sample code,
120
+ libraries, utility programs, programming code and
121
+ documentation.
122
+
123
+ This Agreement can be accepted only by an adult of legal age
124
+ of majority in the country in which the SDK is used.
125
+
126
+ If you are entering into this Agreement on behalf of a company
127
+ or other legal entity, you represent that you have the legal
128
+ authority to bind the entity to this Agreement, in which case
129
+ “you” will mean the entity you represent.
130
+
131
+ If you don’t have the required age or authority to accept
132
+ this Agreement, or if you don’t accept all the terms and
133
+ conditions of this Agreement, do not download, install or use
134
+ the SDK.
135
+
136
+ You agree to use the SDK only for purposes that are permitted
137
+ by (a) this Agreement, and (b) any applicable law, regulation
138
+ or generally accepted practices or guidelines in the relevant
139
+ jurisdictions.
140
+
141
+
142
+ 1.1. License
143
+
144
+
145
+ 1.1.1. License Grant
146
+
147
+ Subject to the terms of this Agreement, NVIDIA hereby grants
148
+ you a non-exclusive, non-transferable license, without the
149
+ right to sublicense (except as expressly provided in this
150
+ Agreement) to:
151
+
152
+ 1. Install and use the SDK,
153
+
154
+ 2. Modify and create derivative works of sample source code
155
+ delivered in the SDK, and
156
+
157
+ 3. Distribute those portions of the SDK that are identified
158
+ in this Agreement as distributable, as incorporated in
159
+ object code format into a software application that meets
160
+ the distribution requirements indicated in this Agreement.
161
+
162
+
163
+ 1.1.2. Distribution Requirements
164
+
165
+ These are the distribution requirements for you to exercise
166
+ the distribution grant:
167
+
168
+ 1. Your application must have material additional
169
+ functionality, beyond the included portions of the SDK.
170
+
171
+ 2. The distributable portions of the SDK shall only be
172
+ accessed by your application.
173
+
174
+ 3. The following notice shall be included in modifications
175
+ and derivative works of sample source code distributed:
176
+ “This software contains source code provided by NVIDIA
177
+ Corporation.”
178
+
179
+ 4. Unless a developer tool is identified in this Agreement
180
+ as distributable, it is delivered for your internal use
181
+ only.
182
+
183
+ 5. The terms under which you distribute your application
184
+ must be consistent with the terms of this Agreement,
185
+ including (without limitation) terms relating to the
186
+ license grant and license restrictions and protection of
187
+ NVIDIA’s intellectual property rights. Additionally, you
188
+ agree that you will protect the privacy, security and
189
+ legal rights of your application users.
190
+
191
+ 6. You agree to notify NVIDIA in writing of any known or
192
+ suspected distribution or use of the SDK not in compliance
193
+ with the requirements of this Agreement, and to enforce
194
+ the terms of your agreements with respect to distributed
195
+ SDK.
196
+
197
+
198
+ 1.1.3. Authorized Users
199
+
200
+ You may allow employees and contractors of your entity or of
201
+ your subsidiary(ies) to access and use the SDK from your
202
+ secure network to perform work on your behalf.
203
+
204
+ If you are an academic institution you may allow users
205
+ enrolled or employed by the academic institution to access and
206
+ use the SDK from your secure network.
207
+
208
+ You are responsible for the compliance with the terms of this
209
+ Agreement by your authorized users. If you become aware that
210
+ your authorized users didn’t follow the terms of this
211
+ Agreement, you agree to take reasonable steps to resolve the
212
+ non-compliance and prevent new occurrences.
213
+
214
+
215
+ 1.1.4. Pre-Release SDK
216
+
217
+ The SDK versions identified as alpha, beta, preview or
218
+ otherwise as pre-release, may not be fully functional, may
219
+ contain errors or design flaws, and may have reduced or
220
+ different security, privacy, accessibility, availability, and
221
+ reliability standards relative to commercial versions of
222
+ NVIDIA software and materials. Use of a pre-release SDK may
223
+ result in unexpected results, loss of data, project delays or
224
+ other unpredictable damage or loss.
225
+
226
+ You may use a pre-release SDK at your own risk, understanding
227
+ that pre-release SDKs are not intended for use in production
228
+ or business-critical systems.
229
+
230
+ NVIDIA may choose not to make available a commercial version
231
+ of any pre-release SDK. NVIDIA may also choose to abandon
232
+ development and terminate the availability of a pre-release
233
+ SDK at any time without liability.
234
+
235
+
236
+ 1.1.5. Updates
237
+
238
+ NVIDIA may, at its option, make available patches, workarounds
239
+ or other updates to this SDK. Unless the updates are provided
240
+ with their separate governing terms, they are deemed part of
241
+ the SDK licensed to you as provided in this Agreement. You
242
+ agree that the form and content of the SDK that NVIDIA
243
+ provides may change without prior notice to you. While NVIDIA
244
+ generally maintains compatibility between versions, NVIDIA may
245
+ in some cases make changes that introduce incompatibilities in
246
+ future versions of the SDK.
247
+
248
+
249
+ 1.1.6. Third Party Licenses
250
+
251
+ The SDK may come bundled with, or otherwise include or be
252
+ distributed with, third party software licensed by a NVIDIA
253
+ supplier and/or open source software provided under an open
254
+ source license. Use of third party software is subject to the
255
+ third-party license terms, or in the absence of third party
256
+ terms, the terms of this Agreement. Copyright to third party
257
+ software is held by the copyright holders indicated in the
258
+ third-party software or license.
259
+
260
+
261
+ 1.1.7. Reservation of Rights
262
+
263
+ NVIDIA reserves all rights, title, and interest in and to the
264
+ SDK, not expressly granted to you under this Agreement.
265
+
266
+
267
+ 1.2. Limitations
268
+
269
+ The following license limitations apply to your use of the
270
+ SDK:
271
+
272
+ 1. You may not reverse engineer, decompile or disassemble,
273
+ or remove copyright or other proprietary notices from any
274
+ portion of the SDK or copies of the SDK.
275
+
276
+ 2. Except as expressly provided in this Agreement, you may
277
+ not copy, sell, rent, sublicense, transfer, distribute,
278
+ modify, or create derivative works of any portion of the
279
+ SDK. For clarity, you may not distribute or sublicense the
280
+ SDK as a stand-alone product.
281
+
282
+ 3. Unless you have an agreement with NVIDIA for this
283
+ purpose, you may not indicate that an application created
284
+ with the SDK is sponsored or endorsed by NVIDIA.
285
+
286
+ 4. You may not bypass, disable, or circumvent any
287
+ encryption, security, digital rights management or
288
+ authentication mechanism in the SDK.
289
+
290
+ 5. You may not use the SDK in any manner that would cause it
291
+ to become subject to an open source software license. As
292
+ examples, licenses that require as a condition of use,
293
+ modification, and/or distribution that the SDK be:
294
+
295
+ a. Disclosed or distributed in source code form;
296
+
297
+ b. Licensed for the purpose of making derivative works;
298
+ or
299
+
300
+ c. Redistributable at no charge.
301
+
302
+ 6. Unless you have an agreement with NVIDIA for this
303
+ purpose, you may not use the SDK with any system or
304
+ application where the use or failure of the system or
305
+ application can reasonably be expected to threaten or
306
+ result in personal injury, death, or catastrophic loss.
307
+ Examples include use in avionics, navigation, military,
308
+ medical, life support or other life critical applications.
309
+ NVIDIA does not design, test or manufacture the SDK for
310
+ these critical uses and NVIDIA shall not be liable to you
311
+ or any third party, in whole or in part, for any claims or
312
+ damages arising from such uses.
313
+
314
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
315
+ and its affiliates, and their respective employees,
316
+ contractors, agents, officers and directors, from and
317
+ against any and all claims, damages, obligations, losses,
318
+ liabilities, costs or debt, fines, restitutions and
319
+ expenses (including but not limited to attorney’s fees
320
+ and costs incident to establishing the right of
321
+ indemnification) arising out of or related to your use of
322
+ the SDK outside of the scope of this Agreement, or not in
323
+ compliance with its terms.
324
+
325
+
326
+ 1.3. Ownership
327
+
328
+ 1. NVIDIA or its licensors hold all rights, title and
329
+ interest in and to the SDK and its modifications and
330
+ derivative works, including their respective intellectual
331
+ property rights, subject to your rights described in this
332
+ section. This SDK may include software and materials from
333
+ NVIDIA’s licensors, and these licensors are intended
334
+ third party beneficiaries that may enforce this Agreement
335
+ with respect to their intellectual property rights.
336
+
337
+ 2. You hold all rights, title and interest in and to your
338
+ applications and your derivative works of the sample
339
+ source code delivered in the SDK, including their
340
+ respective intellectual property rights, subject to
341
+ NVIDIA’s rights described in this section.
342
+
343
+ 3. You may, but don’t have to, provide to NVIDIA
344
+ suggestions, feature requests or other feedback regarding
345
+ the SDK, including possible enhancements or modifications
346
+ to the SDK. For any feedback that you voluntarily provide,
347
+ you hereby grant NVIDIA and its affiliates a perpetual,
348
+ non-exclusive, worldwide, irrevocable license to use,
349
+ reproduce, modify, license, sublicense (through multiple
350
+ tiers of sublicensees), and distribute (through multiple
351
+ tiers of distributors) it without the payment of any
352
+ royalties or fees to you. NVIDIA will use feedback at its
353
+ choice. NVIDIA is constantly looking for ways to improve
354
+ its products, so you may send feedback to NVIDIA through
355
+ the developer portal at https://developer.nvidia.com.
356
+
357
+
358
+ 1.4. No Warranties
359
+
360
+ THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
361
+ FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
362
+ ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
363
+ OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
364
+ BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
365
+ FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
366
+ ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
367
+ WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
368
+ DEALING OR COURSE OF TRADE.
369
+
370
+
371
+ 1.5. Limitation of Liability
372
+
373
+ TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
374
+ AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
375
+ PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
376
+ OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
377
+ PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
378
+ WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
379
+ WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
380
+ OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
381
+ PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
382
+ LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
383
+ TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
384
+ AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
385
+ NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
386
+ LIMIT.
387
+
388
+ These exclusions and limitations of liability shall apply
389
+ regardless if NVIDIA or its affiliates have been advised of
390
+ the possibility of such damages, and regardless of whether a
391
+ remedy fails its essential purpose. These exclusions and
392
+ limitations of liability form an essential basis of the
393
+ bargain between the parties, and, absent any of these
394
+ exclusions or limitations of liability, the provisions of this
395
+ Agreement, including, without limitation, the economic terms,
396
+ would be substantially different.
397
+
398
+
399
+ 1.6. Termination
400
+
401
+ 1. This Agreement will continue to apply until terminated by
402
+ either you or NVIDIA as described below.
403
+
404
+ 2. If you want to terminate this Agreement, you may do so by
405
+ stopping to use the SDK.
406
+
407
+ 3. NVIDIA may, at any time, terminate this Agreement if:
408
+
409
+ a. (i) you fail to comply with any term of this
410
+ Agreement and the non-compliance is not fixed within
411
+ thirty (30) days following notice from NVIDIA (or
412
+ immediately if you violate NVIDIA’s intellectual
413
+ property rights);
414
+
415
+ b. (ii) you commence or participate in any legal
416
+ proceeding against NVIDIA with respect to the SDK; or
417
+
418
+ c. (iii) NVIDIA decides to no longer provide the SDK in
419
+ a country or, in NVIDIA’s sole discretion, the
420
+ continued use of it is no longer commercially viable.
421
+
422
+ 4. Upon any termination of this Agreement, you agree to
423
+ promptly discontinue use of the SDK and destroy all copies
424
+ in your possession or control. Your prior distributions in
425
+ accordance with this Agreement are not affected by the
426
+ termination of this Agreement. Upon written request, you
427
+ will certify in writing that you have complied with your
428
+ commitments under this section. Upon any termination of
429
+ this Agreement all provisions survive except for the
430
+ license grant provisions.
431
+
432
+
433
+ 1.7. General
434
+
435
+ If you wish to assign this Agreement or your rights and
436
+ obligations, including by merger, consolidation, dissolution
437
+ or operation of law, contact NVIDIA to ask for permission. Any
438
+ attempted assignment not approved by NVIDIA in writing shall
439
+ be void and of no effect. NVIDIA may assign, delegate or
440
+ transfer this Agreement and its rights and obligations, and if
441
+ to a non-affiliate you will be notified.
442
+
443
+ You agree to cooperate with NVIDIA and provide reasonably
444
+ requested information to verify your compliance with this
445
+ Agreement.
446
+
447
+ This Agreement will be governed in all respects by the laws of
448
+ the United States and of the State of Delaware as those laws
449
+ are applied to contracts entered into and performed entirely
450
+ within Delaware by Delaware residents, without regard to the
451
+ conflicts of laws principles. The United Nations Convention on
452
+ Contracts for the International Sale of Goods is specifically
453
+ disclaimed. You agree to all terms of this Agreement in the
454
+ English language.
455
+
456
+ The state or federal courts residing in Santa Clara County,
457
+ California shall have exclusive jurisdiction over any dispute
458
+ or claim arising out of this Agreement. Notwithstanding this,
459
+ you agree that NVIDIA shall still be allowed to apply for
460
+ injunctive remedies or an equivalent type of urgent legal
461
+ relief in any jurisdiction.
462
+
463
+ If any court of competent jurisdiction determines that any
464
+ provision of this Agreement is illegal, invalid or
465
+ unenforceable, such provision will be construed as limited to
466
+ the extent necessary to be consistent with and fully
467
+ enforceable under the law and the remaining provisions will
468
+ remain in full force and effect. Unless otherwise specified,
469
+ remedies are cumulative.
470
+
471
+ Each party acknowledges and agrees that the other is an
472
+ independent contractor in the performance of this Agreement.
473
+
474
+ The SDK has been developed entirely at private expense and is
475
+ “commercial items” consisting of “commercial computer
476
+ software” and “commercial computer software
477
+ documentation” provided with RESTRICTED RIGHTS. Use,
478
+ duplication or disclosure by the U.S. Government or a U.S.
479
+ Government subcontractor is subject to the restrictions in
480
+ this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
481
+ in subparagraphs (c)(1) and (2) of the Commercial Computer
482
+ Software - Restricted Rights clause at FAR 52.227-19, as
483
+ applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
484
+ Expressway, Santa Clara, CA 95051.
485
+
486
+ The SDK is subject to United States export laws and
487
+ regulations. You agree that you will not ship, transfer or
488
+ export the SDK into any country, or use the SDK in any manner,
489
+ prohibited by the United States Bureau of Industry and
490
+ Security or economic sanctions regulations administered by the
491
+ U.S. Department of Treasury’s Office of Foreign Assets
492
+ Control (OFAC), or any applicable export laws, restrictions or
493
+ regulations. These laws include restrictions on destinations,
494
+ end users and end use. By accepting this Agreement, you
495
+ confirm that you are not a resident or citizen of any country
496
+ currently embargoed by the U.S. and that you are not otherwise
497
+ prohibited from receiving the SDK.
498
+
499
+ Any notice delivered by NVIDIA to you under this Agreement
500
+ will be delivered via mail, email or fax. You agree that any
501
+ notices that NVIDIA sends you electronically will satisfy any
502
+ legal communication requirements. Please direct your legal
503
+ notices or other correspondence to NVIDIA Corporation, 2788
504
+ San Tomas Expressway, Santa Clara, California 95051, United
505
+ States of America, Attention: Legal Department.
506
+
507
+ This Agreement and any exhibits incorporated into this
508
+ Agreement constitute the entire agreement of the parties with
509
+ respect to the subject matter of this Agreement and supersede
510
+ all prior negotiations or documentation exchanged between the
511
+ parties relating to this SDK license. Any additional and/or
512
+ conflicting terms on documents issued by you are null, void,
513
+ and invalid. Any amendment or waiver under this Agreement
514
+ shall be in writing and signed by representatives of both
515
+ parties.
516
+
517
+
518
+ 2. CUDA Toolkit Supplement to Software License Agreement for
519
+ NVIDIA Software Development Kits
520
+ ------------------------------------------------------------
521
+
522
+
523
+ Release date: August 16, 2018
524
+ -----------------------------
525
+
526
+ The terms in this supplement govern your use of the NVIDIA
527
+ CUDA Toolkit SDK under the terms of your license agreement
528
+ (“Agreement”) as modified by this supplement. Capitalized
529
+ terms used but not defined below have the meaning assigned to
530
+ them in the Agreement.
531
+
532
+ This supplement is an exhibit to the Agreement and is
533
+ incorporated as an integral part of the Agreement. In the
534
+ event of conflict between the terms in this supplement and the
535
+ terms in the Agreement, the terms in this supplement govern.
536
+
537
+
538
+ 2.1. License Scope
539
+
540
+ The SDK is licensed for you to develop applications only for
541
+ use in systems with NVIDIA GPUs.
542
+
543
+
544
+ 2.2. Distribution
545
+
546
+ The portions of the SDK that are distributable under the
547
+ Agreement are listed in Attachment A.
548
+
549
+
550
+ 2.3. Operating Systems
551
+
552
+ Those portions of the SDK designed exclusively for use on the
553
+ Linux or FreeBSD operating systems, or other operating systems
554
+ derived from the source code to these operating systems, may
555
+ be copied and redistributed for use in accordance with this
556
+ Agreement, provided that the object code files are not
557
+ modified in any way (except for unzipping of compressed
558
+ files).
559
+
560
+
561
+ 2.4. Audio and Video Encoders and Decoders
562
+
563
+ You acknowledge and agree that it is your sole responsibility
564
+ to obtain any additional third-party licenses required to
565
+ make, have made, use, have used, sell, import, and offer for
566
+ sale your products or services that include or incorporate any
567
+ third-party software and content relating to audio and/or
568
+ video encoders and decoders from, including but not limited
569
+ to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
570
+ MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
571
+ under this Agreement any necessary patent or other rights with
572
+ respect to any audio and/or video encoders and decoders.
573
+
574
+
575
+ 2.5. Licensing
576
+
577
+ If the distribution terms in this Agreement are not suitable
578
+ for your organization, or for any questions regarding this
579
+ Agreement, please contact NVIDIA at
580
+ nvidia-compute-license-questions@nvidia.com.
581
+
582
+
583
+ 2.6. Attachment A
584
+
585
+ The following portions of the SDK are distributable under the
586
+ Agreement:
587
+
588
+ Component
589
+
590
+ CUDA Runtime
591
+
592
+ Windows
593
+
594
+ cudart.dll, cudart_static.lib, cudadevrt.lib
595
+
596
+ Mac OSX
597
+
598
+ libcudart.dylib, libcudart_static.a, libcudadevrt.a
599
+
600
+ Linux
601
+
602
+ libcudart.so, libcudart_static.a, libcudadevrt.a
603
+
604
+ Android
605
+
606
+ libcudart.so, libcudart_static.a, libcudadevrt.a
607
+
608
+ Component
609
+
610
+ CUDA FFT Library
611
+
612
+ Windows
613
+
614
+ cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
615
+
616
+ Mac OSX
617
+
618
+ libcufft.dylib, libcufft_static.a, libcufftw.dylib,
619
+ libcufftw_static.a
620
+
621
+ Linux
622
+
623
+ libcufft.so, libcufft_static.a, libcufftw.so,
624
+ libcufftw_static.a
625
+
626
+ Android
627
+
628
+ libcufft.so, libcufft_static.a, libcufftw.so,
629
+ libcufftw_static.a
630
+
631
+ Component
632
+
633
+ CUDA BLAS Library
634
+
635
+ Windows
636
+
637
+ cublas.dll, cublasLt.dll
638
+
639
+ Mac OSX
640
+
641
+ libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
642
+ libcublasLt_static.a
643
+
644
+ Linux
645
+
646
+ libcublas.so, libcublasLt.so, libcublas_static.a,
647
+ libcublasLt_static.a
648
+
649
+ Android
650
+
651
+ libcublas.so, libcublasLt.so, libcublas_static.a,
652
+ libcublasLt_static.a
653
+
654
+ Component
655
+
656
+ NVIDIA "Drop-in" BLAS Library
657
+
658
+ Windows
659
+
660
+ nvblas.dll
661
+
662
+ Mac OSX
663
+
664
+ libnvblas.dylib
665
+
666
+ Linux
667
+
668
+ libnvblas.so
669
+
670
+ Component
671
+
672
+ CUDA Sparse Matrix Library
673
+
674
+ Windows
675
+
676
+ cusparse.dll, cusparse.lib
677
+
678
+ Mac OSX
679
+
680
+ libcusparse.dylib, libcusparse_static.a
681
+
682
+ Linux
683
+
684
+ libcusparse.so, libcusparse_static.a
685
+
686
+ Android
687
+
688
+ libcusparse.so, libcusparse_static.a
689
+
690
+ Component
691
+
692
+ CUDA Linear Solver Library
693
+
694
+ Windows
695
+
696
+ cusolver.dll, cusolver.lib
697
+
698
+ Mac OSX
699
+
700
+ libcusolver.dylib, libcusolver_static.a
701
+
702
+ Linux
703
+
704
+ libcusolver.so, libcusolver_static.a
705
+
706
+ Android
707
+
708
+ libcusolver.so, libcusolver_static.a
709
+
710
+ Component
711
+
712
+ CUDA Random Number Generation Library
713
+
714
+ Windows
715
+
716
+ curand.dll, curand.lib
717
+
718
+ Mac OSX
719
+
720
+ libcurand.dylib, libcurand_static.a
721
+
722
+ Linux
723
+
724
+ libcurand.so, libcurand_static.a
725
+
726
+ Android
727
+
728
+ libcurand.so, libcurand_static.a
729
+
730
+ Component
731
+
732
+ CUDA Accelerated Graph Library
733
+
734
+ Component
735
+
736
+ NVIDIA Performance Primitives Library
737
+
738
+ Windows
739
+
740
+ nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
741
+ nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
742
+ nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
743
+ nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
744
+ nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
745
+
746
+ Mac OSX
747
+
748
+ libnppc.dylib, libnppc_static.a, libnppial.dylib,
749
+ libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
750
+ libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
751
+ libnppidei_static.a, libnppif.dylib, libnppif_static.a,
752
+ libnppig.dylib, libnppig_static.a, libnppim.dylib,
753
+ libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
754
+ libnpps.dylib, libnpps_static.a
755
+
756
+ Linux
757
+
758
+ libnppc.so, libnppc_static.a, libnppial.so,
759
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
760
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
761
+ libnppidei_static.a, libnppif.so, libnppif_static.a
762
+ libnppig.so, libnppig_static.a, libnppim.so,
763
+ libnppim_static.a, libnppist.so, libnppist_static.a,
764
+ libnppisu.so, libnppisu_static.a, libnppitc.so
765
+ libnppitc_static.a, libnpps.so, libnpps_static.a
766
+
767
+ Android
768
+
769
+ libnppc.so, libnppc_static.a, libnppial.so,
770
+ libnppial_static.a, libnppicc.so, libnppicc_static.a,
771
+ libnppicom.so, libnppicom_static.a, libnppidei.so,
772
+ libnppidei_static.a, libnppif.so, libnppif_static.a
773
+ libnppig.so, libnppig_static.a, libnppim.so,
774
+ libnppim_static.a, libnppist.so, libnppist_static.a,
775
+ libnppisu.so, libnppisu_static.a, libnppitc.so
776
+ libnppitc_static.a, libnpps.so, libnpps_static.a
777
+
778
+ Component
779
+
780
+ NVIDIA JPEG Library
781
+
782
+ Linux
783
+
784
+ libnvjpeg.so, libnvjpeg_static.a
785
+
786
+ Component
787
+
788
+ Internal common library required for statically linking to
789
+ cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
790
+
791
+ Mac OSX
792
+
793
+ libculibos.a
794
+
795
+ Linux
796
+
797
+ libculibos.a
798
+
799
+ Component
800
+
801
+ NVIDIA Runtime Compilation Library and Header
802
+
803
+ All
804
+
805
+ nvrtc.h
806
+
807
+ Windows
808
+
809
+ nvrtc.dll, nvrtc-builtins.dll
810
+
811
+ Mac OSX
812
+
813
+ libnvrtc.dylib, libnvrtc-builtins.dylib
814
+
815
+ Linux
816
+
817
+ libnvrtc.so, libnvrtc-builtins.so
818
+
819
+ Component
820
+
821
+ NVIDIA Optimizing Compiler Library
822
+
823
+ Windows
824
+
825
+ nvvm.dll
826
+
827
+ Mac OSX
828
+
829
+ libnvvm.dylib
830
+
831
+ Linux
832
+
833
+ libnvvm.so
834
+
835
+ Component
836
+
837
+ NVIDIA Common Device Math Functions Library
838
+
839
+ Windows
840
+
841
+ libdevice.10.bc
842
+
843
+ Mac OSX
844
+
845
+ libdevice.10.bc
846
+
847
+ Linux
848
+
849
+ libdevice.10.bc
850
+
851
+ Component
852
+
853
+ CUDA Occupancy Calculation Header Library
854
+
855
+ All
856
+
857
+ cuda_occupancy.h
858
+
859
+ Component
860
+
861
+ CUDA Half Precision Headers
862
+
863
+ All
864
+
865
+ cuda_fp16.h, cuda_fp16.hpp
866
+
867
+ Component
868
+
869
+ CUDA Profiling Tools Interface (CUPTI) Library
870
+
871
+ Windows
872
+
873
+ cupti.dll
874
+
875
+ Mac OSX
876
+
877
+ libcupti.dylib
878
+
879
+ Linux
880
+
881
+ libcupti.so
882
+
883
+ Component
884
+
885
+ NVIDIA Tools Extension Library
886
+
887
+ Windows
888
+
889
+ nvToolsExt.dll, nvToolsExt.lib
890
+
891
+ Mac OSX
892
+
893
+ libnvToolsExt.dylib
894
+
895
+ Linux
896
+
897
+ libnvToolsExt.so
898
+
899
+ Component
900
+
901
+ NVIDIA CUDA Driver Libraries
902
+
903
+ Linux
904
+
905
+ libcuda.so, libnvidia-fatbinaryloader.so,
906
+ libnvidia-ptxjitcompiler.so
907
+
908
+ The NVIDIA CUDA Driver Libraries are only distributable in
909
+ applications that meet this criteria:
910
+
911
+ 1. The application was developed starting from a NVIDIA CUDA
912
+ container obtained from Docker Hub or the NVIDIA GPU
913
+ Cloud, and
914
+
915
+ 2. The resulting application is packaged as a Docker
916
+ container and distributed to users on Docker Hub or the
917
+ NVIDIA GPU Cloud only.
918
+
919
+
920
+ 2.7. Attachment B
921
+
922
+
923
+ Additional Licensing Obligations
924
+
925
+ The following third party components included in the SOFTWARE
926
+ are licensed to Licensee pursuant to the following terms and
927
+ conditions:
928
+
929
+ 1. Licensee's use of the GDB third party component is
930
+ subject to the terms and conditions of GNU GPL v3:
931
+
932
+ This product includes copyrighted third-party software licensed
933
+ under the terms of the GNU General Public License v3 ("GPL v3").
934
+ All third-party software packages are copyright by their respective
935
+ authors. GPL v3 terms and conditions are hereby incorporated into
936
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
937
+
938
+ Consistent with these licensing requirements, the software
939
+ listed below is provided under the terms of the specified
940
+ open source software licenses. To obtain source code for
941
+ software provided under licenses that require
942
+ redistribution of source code, including the GNU General
943
+ Public License (GPL) and GNU Lesser General Public License
944
+ (LGPL), contact oss-requests@nvidia.com. This offer is
945
+ valid for a period of three (3) years from the date of the
946
+ distribution of this product by NVIDIA CORPORATION.
947
+
948
+ Component License
949
+ CUDA-GDB GPL v3
950
+
951
+ 2. Licensee represents and warrants that any and all third
952
+ party licensing and/or royalty payment obligations in
953
+ connection with Licensee's use of the H.264 video codecs
954
+ are solely the responsibility of Licensee.
955
+
956
+ 3. Licensee's use of the Thrust library is subject to the
957
+ terms and conditions of the Apache License Version 2.0.
958
+ All third-party software packages are copyright by their
959
+ respective authors. Apache License Version 2.0 terms and
960
+ conditions are hereby incorporated into the Agreement by
961
+ this reference.
962
+ http://www.apache.org/licenses/LICENSE-2.0.html
963
+
964
+ In addition, Licensee acknowledges the following notice:
965
+ Thrust includes source code from the Boost Iterator,
966
+ Tuple, System, and Random Number libraries.
967
+
968
+ Boost Software License - Version 1.0 - August 17th, 2003
969
+ . . . .
970
+
971
+ Permission is hereby granted, free of charge, to any person or
972
+ organization obtaining a copy of the software and accompanying
973
+ documentation covered by this license (the "Software") to use,
974
+ reproduce, display, distribute, execute, and transmit the Software,
975
+ and to prepare derivative works of the Software, and to permit
976
+ third-parties to whom the Software is furnished to do so, all
977
+ subject to the following:
978
+
979
+ The copyright notices in the Software and this entire statement,
980
+ including the above license grant, this restriction and the following
981
+ disclaimer, must be included in all copies of the Software, in whole
982
+ or in part, and all derivative works of the Software, unless such
983
+ copies or derivative works are solely in the form of machine-executable
984
+ object code generated by a source language processor.
985
+
986
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
987
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
988
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
989
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
990
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
991
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
992
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
993
+ OTHER DEALINGS IN THE SOFTWARE.
994
+
995
+ 4. Licensee's use of the LLVM third party component is
996
+ subject to the following terms and conditions:
997
+
998
+ ======================================================
999
+ LLVM Release License
1000
+ ======================================================
1001
+ University of Illinois/NCSA
1002
+ Open Source License
1003
+
1004
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
1005
+ All rights reserved.
1006
+
1007
+ Developed by:
1008
+
1009
+ LLVM Team
1010
+
1011
+ University of Illinois at Urbana-Champaign
1012
+
1013
+ http://llvm.org
1014
+
1015
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1016
+ of this software and associated documentation files (the "Software"), to
1017
+ deal with the Software without restriction, including without limitation the
1018
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1019
+ sell copies of the Software, and to permit persons to whom the Software is
1020
+ furnished to do so, subject to the following conditions:
1021
+
1022
+ * Redistributions of source code must retain the above copyright notice,
1023
+ this list of conditions and the following disclaimers.
1024
+
1025
+ * Redistributions in binary form must reproduce the above copyright
1026
+ notice, this list of conditions and the following disclaimers in the
1027
+ documentation and/or other materials provided with the distribution.
1028
+
1029
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
1030
+ Champaign, nor the names of its contributors may be used to endorse or
1031
+ promote products derived from this Software without specific prior
1032
+ written permission.
1033
+
1034
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1035
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1036
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1037
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
1038
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1039
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1040
+ DEALINGS WITH THE SOFTWARE.
1041
+
1042
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
1043
+ component is subject to the following terms and
1044
+ conditions:
1045
+
1046
+ ------------
1047
+ PCRE LICENCE
1048
+ ------------
1049
+ PCRE is a library of functions to support regular expressions whose syntax
1050
+ and semantics are as close as possible to those of the Perl 5 language.
1051
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
1052
+ specified below. The documentation for PCRE, supplied in the "doc"
1053
+ directory, is distributed under the same terms as the software itself. The
1054
+ basic library functions are written in C and are freestanding. Also
1055
+ included in the distribution is a set of C++ wrapper functions, and a just-
1056
+ in-time compiler that can be used to optimize pattern matching. These are
1057
+ both optional features that can be omitted when the library is built.
1058
+
1059
+ THE BASIC LIBRARY FUNCTIONS
1060
+ ---------------------------
1061
+ Written by: Philip Hazel
1062
+ Email local part: ph10
1063
+ Email domain: cam.ac.uk
1064
+ University of Cambridge Computing Service,
1065
+ Cambridge, England.
1066
+ Copyright (c) 1997-2012 University of Cambridge
1067
+ All rights reserved.
1068
+
1069
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
1070
+ -------------------------------------
1071
+ Written by: Zoltan Herczeg
1072
+ Email local part: hzmester
1073
+ Emain domain: freemail.hu
1074
+ Copyright(c) 2010-2012 Zoltan Herczeg
1075
+ All rights reserved.
1076
+
1077
+ STACK-LESS JUST-IN-TIME COMPILER
1078
+ --------------------------------
1079
+ Written by: Zoltan Herczeg
1080
+ Email local part: hzmester
1081
+ Emain domain: freemail.hu
1082
+ Copyright(c) 2009-2012 Zoltan Herczeg
1083
+ All rights reserved.
1084
+
1085
+ THE C++ WRAPPER FUNCTIONS
1086
+ -------------------------
1087
+ Contributed by: Google Inc.
1088
+ Copyright (c) 2007-2012, Google Inc.
1089
+ All rights reserved.
1090
+
1091
+ THE "BSD" LICENCE
1092
+ -----------------
1093
+ Redistribution and use in source and binary forms, with or without
1094
+ modification, are permitted provided that the following conditions are met:
1095
+
1096
+ * Redistributions of source code must retain the above copyright notice,
1097
+ this list of conditions and the following disclaimer.
1098
+
1099
+ * Redistributions in binary form must reproduce the above copyright
1100
+ notice, this list of conditions and the following disclaimer in the
1101
+ documentation and/or other materials provided with the distribution.
1102
+
1103
+ * Neither the name of the University of Cambridge nor the name of Google
1104
+ Inc. nor the names of their contributors may be used to endorse or
1105
+ promote products derived from this software without specific prior
1106
+ written permission.
1107
+
1108
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1109
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1110
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1111
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
1112
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1113
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1114
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1115
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1116
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1117
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1118
+ POSSIBILITY OF SUCH DAMAGE.
1119
+
1120
+ 6. Some of the cuBLAS library routines were written by or
1121
+ derived from code written by Vasily Volkov and are subject
1122
+ to the Modified Berkeley Software Distribution License as
1123
+ follows:
1124
+
1125
+ Copyright (c) 2007-2009, Regents of the University of California
1126
+
1127
+ All rights reserved.
1128
+
1129
+ Redistribution and use in source and binary forms, with or without
1130
+ modification, are permitted provided that the following conditions are
1131
+ met:
1132
+ * Redistributions of source code must retain the above copyright
1133
+ notice, this list of conditions and the following disclaimer.
1134
+ * Redistributions in binary form must reproduce the above
1135
+ copyright notice, this list of conditions and the following
1136
+ disclaimer in the documentation and/or other materials provided
1137
+ with the distribution.
1138
+ * Neither the name of the University of California, Berkeley nor
1139
+ the names of its contributors may be used to endorse or promote
1140
+ products derived from this software without specific prior
1141
+ written permission.
1142
+
1143
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1144
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1145
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1146
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1147
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1148
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1149
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1150
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1151
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1152
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1153
+ POSSIBILITY OF SUCH DAMAGE.
1154
+
1155
+ 7. Some of the cuBLAS library routines were written by or
1156
+ derived from code written by Davide Barbieri and are
1157
+ subject to the Modified Berkeley Software Distribution
1158
+ License as follows:
1159
+
1160
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
1161
+
1162
+ All rights reserved.
1163
+
1164
+ Redistribution and use in source and binary forms, with or without
1165
+ modification, are permitted provided that the following conditions are
1166
+ met:
1167
+ * Redistributions of source code must retain the above copyright
1168
+ notice, this list of conditions and the following disclaimer.
1169
+ * Redistributions in binary form must reproduce the above
1170
+ copyright notice, this list of conditions and the following
1171
+ disclaimer in the documentation and/or other materials provided
1172
+ with the distribution.
1173
+ * The name of the author may not be used to endorse or promote
1174
+ products derived from this software without specific prior
1175
+ written permission.
1176
+
1177
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
1178
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1179
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1180
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1181
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1182
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1183
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1184
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1185
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1186
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1187
+ POSSIBILITY OF SUCH DAMAGE.
1188
+
1189
+ 8. Some of the cuBLAS library routines were derived from
1190
+ code developed by the University of Tennessee and are
1191
+ subject to the Modified Berkeley Software Distribution
1192
+ License as follows:
1193
+
1194
+ Copyright (c) 2010 The University of Tennessee.
1195
+
1196
+ All rights reserved.
1197
+
1198
+ Redistribution and use in source and binary forms, with or without
1199
+ modification, are permitted provided that the following conditions are
1200
+ met:
1201
+ * Redistributions of source code must retain the above copyright
1202
+ notice, this list of conditions and the following disclaimer.
1203
+ * Redistributions in binary form must reproduce the above
1204
+ copyright notice, this list of conditions and the following
1205
+ disclaimer listed in this license in the documentation and/or
1206
+ other materials provided with the distribution.
1207
+ * Neither the name of the copyright holders nor the names of its
1208
+ contributors may be used to endorse or promote products derived
1209
+ from this software without specific prior written permission.
1210
+
1211
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1212
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1213
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1214
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1215
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1216
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1217
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1218
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1219
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1220
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1221
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1222
+
1223
+ 9. Some of the cuBLAS library routines were written by or
1224
+ derived from code written by Jonathan Hogg and are subject
1225
+ to the Modified Berkeley Software Distribution License as
1226
+ follows:
1227
+
1228
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
1229
+
1230
+ All rights reserved.
1231
+
1232
+ Redistribution and use in source and binary forms, with or without
1233
+ modification, are permitted provided that the following conditions are
1234
+ met:
1235
+ * Redistributions of source code must retain the above copyright
1236
+ notice, this list of conditions and the following disclaimer.
1237
+ * Redistributions in binary form must reproduce the above
1238
+ copyright notice, this list of conditions and the following
1239
+ disclaimer in the documentation and/or other materials provided
1240
+ with the distribution.
1241
+ * Neither the name of the STFC nor the names of its contributors
1242
+ may be used to endorse or promote products derived from this
1243
+ software without specific prior written permission.
1244
+
1245
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1246
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1247
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1248
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
1249
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1250
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1251
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
1252
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
1253
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
1254
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
1255
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1256
+
1257
+ 10. Some of the cuBLAS library routines were written by or
1258
+ derived from code written by Ahmad M. Abdelfattah, David
1259
+ Keyes, and Hatem Ltaief, and are subject to the Apache
1260
+ License, Version 2.0, as follows:
1261
+
1262
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
1263
+ Authors:
1264
+ Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa)
1265
+ David Keyes (david.keyes@kaust.edu.sa)
1266
+ Hatem Ltaief (hatem.ltaief@kaust.edu.sa)
1267
+
1268
+ Redistribution and use in source and binary forms, with or without
1269
+ modification, are permitted provided that the following conditions
1270
+ are met:
1271
+
1272
+ * Redistributions of source code must retain the above copyright
1273
+ notice, this list of conditions and the following disclaimer.
1274
+ * Redistributions in binary form must reproduce the above copyright
1275
+ notice, this list of conditions and the following disclaimer in the
1276
+ documentation and/or other materials provided with the distribution.
1277
+ * Neither the name of the King Abdullah University of Science and
1278
+ Technology nor the names of its contributors may be used to endorse
1279
+ or promote products derived from this software without specific prior
1280
+ written permission.
1281
+
1282
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1283
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1284
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1285
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1286
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1287
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1288
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1289
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1290
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1291
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1292
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
1293
+
1294
+ 11. Some of the cuSPARSE library routines were written by or
1295
+ derived from code written by Li-Wen Chang and are subject
1296
+ to the NCSA Open Source License as follows:
1297
+
1298
+ Copyright (c) 2012, University of Illinois.
1299
+
1300
+ All rights reserved.
1301
+
1302
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
1303
+
1304
+ Permission is hereby granted, free of charge, to any person obtaining
1305
+ a copy of this software and associated documentation files (the
1306
+ "Software"), to deal with the Software without restriction, including
1307
+ without limitation the rights to use, copy, modify, merge, publish,
1308
+ distribute, sublicense, and/or sell copies of the Software, and to
1309
+ permit persons to whom the Software is furnished to do so, subject to
1310
+ the following conditions:
1311
+ * Redistributions of source code must retain the above copyright
1312
+ notice, this list of conditions and the following disclaimer.
1313
+ * Redistributions in binary form must reproduce the above
1314
+ copyright notice, this list of conditions and the following
1315
+ disclaimers in the documentation and/or other materials provided
1316
+ with the distribution.
1317
+ * Neither the names of IMPACT Group, University of Illinois, nor
1318
+ the names of its contributors may be used to endorse or promote
1319
+ products derived from this Software without specific prior
1320
+ written permission.
1321
+
1322
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1323
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1324
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1325
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
1326
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
1327
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
1328
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
1329
+ SOFTWARE.
1330
+
1331
+ 12. Some of the cuRAND library routines were written by or
1332
+ derived from code written by Mutsuo Saito and Makoto
1333
+ Matsumoto and are subject to the following license:
1334
+
1335
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
1336
+ University. All rights reserved.
1337
+
1338
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
1339
+ University and University of Tokyo. All rights reserved.
1340
+
1341
+ Redistribution and use in source and binary forms, with or without
1342
+ modification, are permitted provided that the following conditions are
1343
+ met:
1344
+ * Redistributions of source code must retain the above copyright
1345
+ notice, this list of conditions and the following disclaimer.
1346
+ * Redistributions in binary form must reproduce the above
1347
+ copyright notice, this list of conditions and the following
1348
+ disclaimer in the documentation and/or other materials provided
1349
+ with the distribution.
1350
+ * Neither the name of the Hiroshima University nor the names of
1351
+ its contributors may be used to endorse or promote products
1352
+ derived from this software without specific prior written
1353
+ permission.
1354
+
1355
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1356
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1357
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1358
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1359
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1360
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1361
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1362
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1363
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1364
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1365
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1366
+
1367
+ 13. Some of the cuRAND library routines were derived from
1368
+ code developed by D. E. Shaw Research and are subject to
1369
+ the following license:
1370
+
1371
+ Copyright 2010-2011, D. E. Shaw Research.
1372
+
1373
+ All rights reserved.
1374
+
1375
+ Redistribution and use in source and binary forms, with or without
1376
+ modification, are permitted provided that the following conditions are
1377
+ met:
1378
+ * Redistributions of source code must retain the above copyright
1379
+ notice, this list of conditions, and the following disclaimer.
1380
+ * Redistributions in binary form must reproduce the above
1381
+ copyright notice, this list of conditions, and the following
1382
+ disclaimer in the documentation and/or other materials provided
1383
+ with the distribution.
1384
+ * Neither the name of D. E. Shaw Research nor the names of its
1385
+ contributors may be used to endorse or promote products derived
1386
+ from this software without specific prior written permission.
1387
+
1388
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1389
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1390
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1391
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1392
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1393
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1394
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1395
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1396
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1397
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1398
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1399
+
1400
+ 14. Some of the Math library routines were written by or
1401
+ derived from code developed by Norbert Juffa and are
1402
+ subject to the following license:
1403
+
1404
+ Copyright (c) 2015-2017, Norbert Juffa
1405
+ All rights reserved.
1406
+
1407
+ Redistribution and use in source and binary forms, with or without
1408
+ modification, are permitted provided that the following conditions
1409
+ are met:
1410
+
1411
+ 1. Redistributions of source code must retain the above copyright
1412
+ notice, this list of conditions and the following disclaimer.
1413
+
1414
+ 2. Redistributions in binary form must reproduce the above copyright
1415
+ notice, this list of conditions and the following disclaimer in the
1416
+ documentation and/or other materials provided with the distribution.
1417
+
1418
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1419
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1420
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1421
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1422
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1423
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1424
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1425
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1426
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1427
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1428
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429
+
1430
+ 15. Licensee's use of the lz4 third party component is
1431
+ subject to the following terms and conditions:
1432
+
1433
+ Copyright (C) 2011-2013, Yann Collet.
1434
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1435
+
1436
+ Redistribution and use in source and binary forms, with or without
1437
+ modification, are permitted provided that the following conditions are
1438
+ met:
1439
+
1440
+ * Redistributions of source code must retain the above copyright
1441
+ notice, this list of conditions and the following disclaimer.
1442
+ * Redistributions in binary form must reproduce the above
1443
+ copyright notice, this list of conditions and the following disclaimer
1444
+ in the documentation and/or other materials provided with the
1445
+ distribution.
1446
+
1447
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1448
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1449
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1450
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1451
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1452
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1453
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1454
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1455
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1456
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1457
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1458
+
1459
+ 16. The NPP library uses code from the Boost Math Toolkit,
1460
+ and is subject to the following license:
1461
+
1462
+ Boost Software License - Version 1.0 - August 17th, 2003
1463
+ . . . .
1464
+
1465
+ Permission is hereby granted, free of charge, to any person or
1466
+ organization obtaining a copy of the software and accompanying
1467
+ documentation covered by this license (the "Software") to use,
1468
+ reproduce, display, distribute, execute, and transmit the Software,
1469
+ and to prepare derivative works of the Software, and to permit
1470
+ third-parties to whom the Software is furnished to do so, all
1471
+ subject to the following:
1472
+
1473
+ The copyright notices in the Software and this entire statement,
1474
+ including the above license grant, this restriction and the following
1475
+ disclaimer, must be included in all copies of the Software, in whole
1476
+ or in part, and all derivative works of the Software, unless such
1477
+ copies or derivative works are solely in the form of machine-executable
1478
+ object code generated by a source language processor.
1479
+
1480
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1481
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1482
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
1483
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
1484
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
1485
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
1486
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
1487
+ OTHER DEALINGS IN THE SOFTWARE.
1488
+
1489
+ 17. Portions of the Nsight Eclipse Edition is subject to the
1490
+ following license:
1491
+
1492
+ The Eclipse Foundation makes available all content in this plug-in
1493
+ ("Content"). Unless otherwise indicated below, the Content is provided
1494
+ to you under the terms and conditions of the Eclipse Public License
1495
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
1496
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
1497
+ will mean the Content.
1498
+
1499
+ If you did not receive this Content directly from the Eclipse
1500
+ Foundation, the Content is being redistributed by another party
1501
+ ("Redistributor") and different terms and conditions may apply to your
1502
+ use of any object code in the Content. Check the Redistributor's
1503
+ license that was provided with the Content. If no such license exists,
1504
+ contact the Redistributor. Unless otherwise indicated below, the terms
1505
+ and conditions of the EPL still apply to any source code in the
1506
+ Content and such source code may be obtained at http://www.eclipse.org.
1507
+
1508
+ 18. Some of the cuBLAS library routines uses code from
1509
+ OpenAI, which is subject to the following license:
1510
+
1511
+ License URL
1512
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
1513
+
1514
+ License Text
1515
+ The MIT License
1516
+
1517
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
1518
+
1519
+ Permission is hereby granted, free of charge, to any person obtaining a copy
1520
+ of this software and associated documentation files (the "Software"), to deal
1521
+ in the Software without restriction, including without limitation the rights
1522
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1523
+ copies of the Software, and to permit persons to whom the Software is
1524
+ furnished to do so, subject to the following conditions:
1525
+
1526
+ The above copyright notice and this permission notice shall be included in
1527
+ all copies or substantial portions of the Software.
1528
+
1529
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1530
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1531
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1532
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1533
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1534
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1535
+ THE SOFTWARE.
1536
+
1537
+ 19. Licensee's use of the Visual Studio Setup Configuration
1538
+ Samples is subject to the following license:
1539
+
1540
+ The MIT License (MIT)
1541
+ Copyright (C) Microsoft Corporation. All rights reserved.
1542
+
1543
+ Permission is hereby granted, free of charge, to any person
1544
+ obtaining a copy of this software and associated documentation
1545
+ files (the "Software"), to deal in the Software without restriction,
1546
+ including without limitation the rights to use, copy, modify, merge,
1547
+ publish, distribute, sublicense, and/or sell copies of the Software,
1548
+ and to permit persons to whom the Software is furnished to do so,
1549
+ subject to the following conditions:
1550
+
1551
+ The above copyright notice and this permission notice shall be included
1552
+ in all copies or substantial portions of the Software.
1553
+
1554
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
1555
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1556
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1557
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1558
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1559
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1560
+
1561
+ 20. Licensee's use of linmath.h header for CPU functions for
1562
+ GL vector/matrix operations from lunarG is subject to the
1563
+ Apache License Version 2.0.
1564
+
1565
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
1566
+ subject to the MIT license .
1567
+
1568
+ -----------------
.venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/RECORD ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ nvidia/__pycache__/__init__.cpython-311.pyc,,
3
+ nvidia/cuda_cupti/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ nvidia/cuda_cupti/__pycache__/__init__.cpython-311.pyc,,
5
+ nvidia/cuda_cupti/include/Openacc/cupti_openacc.h,sha256=Z0OM5e_hbd3cxdXyn3SCHqBBQawLg4QORnlm57Cr2-M,3513
6
+ nvidia/cuda_cupti/include/Openmp/cupti_openmp.h,sha256=E1WNmeb_7HaUSmBegtUNe4IV1i7pXeNxgzIlyKn1zrM,3491
7
+ nvidia/cuda_cupti/include/Openmp/omp-tools.h,sha256=AmuC_xPC7VPu3B-W4PmXuCNufFawhY8PjNXePaQFAOg,37403
8
+ nvidia/cuda_cupti/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ nvidia/cuda_cupti/include/__pycache__/__init__.cpython-311.pyc,,
10
+ nvidia/cuda_cupti/include/cuda_stdint.h,sha256=XbFOk9CtJjKqk7PpYNqbSVsDxAsVM8avA4rWpPi0BjQ,4093
11
+ nvidia/cuda_cupti/include/cupti.h,sha256=JkVyAGTIMYzwm62dfVqas3nMcILhgP_Wdz6fh4_NED0,4697
12
+ nvidia/cuda_cupti/include/cupti_activity.h,sha256=RB7VYrdiOBpdH_LbXb2o-CcGthnk3-NE2Cqq-jQbN7Q,210611
13
+ nvidia/cuda_cupti/include/cupti_activity_deprecated.h,sha256=rYJsoAJxA2BTT50-olN8EYcSzdlXBpRbR1ATLG3rVIM,121526
14
+ nvidia/cuda_cupti/include/cupti_callbacks.h,sha256=zrEVRb0hubSfD69QUmHsJiL8oAfvqyuKGcTVRihQrnc,29729
15
+ nvidia/cuda_cupti/include/cupti_checkpoint.h,sha256=rTz8JoWxqESBXyZWUhZJGm4xeYcx4OJOtJ7Ld13T_b0,5264
16
+ nvidia/cuda_cupti/include/cupti_common.h,sha256=85m74bxUgXp3tEaPQpezeazmpsNMw41PsjNSYmQdT20,3514
17
+ nvidia/cuda_cupti/include/cupti_driver_cbid.h,sha256=dHKyQYZbBbdlxixzFkIoNHg5IfGXdgriyjN1Bu1i6g4,74462
18
+ nvidia/cuda_cupti/include/cupti_events.h,sha256=f7lLGmD2e8FzvMhRgnn0-v7U0vTpUkiQHIpQxgARGb0,51896
19
+ nvidia/cuda_cupti/include/cupti_metrics.h,sha256=iLAOlDrcbHEsIIUmgq0Tp1ZOY9O3Ot3wj2-bI8iYbSs,32148
20
+ nvidia/cuda_cupti/include/cupti_nvtx_cbid.h,sha256=_azPtR1g4qivvX7qbvHRUg0RHCWF7iEOJyHMN9qZe9E,5912
21
+ nvidia/cuda_cupti/include/cupti_pcsampling.h,sha256=ycJHT36DmPIaVzHsB3xxjXkhFyEfMCJOl3LbCsHFgyA,32144
22
+ nvidia/cuda_cupti/include/cupti_pcsampling_util.h,sha256=lx8CaNXowJe5Zvc06LE-u_Zry_jODs1mM6j9Q5WIX9E,12430
23
+ nvidia/cuda_cupti/include/cupti_profiler_target.h,sha256=JsceoDuhllWNEzaO0xxT81dJ55NrbF0UtRJJgit0P_E,32131
24
+ nvidia/cuda_cupti/include/cupti_result.h,sha256=a-C4Y7LAYCiCT1ngOfoDuTi2stEG1YTafwwn6UfL-LU,12603
25
+ nvidia/cuda_cupti/include/cupti_runtime_cbid.h,sha256=11pXl0MdmTtxUngel-ru4JdqWvF_gEIG14aQExRyfzI,46436
26
+ nvidia/cuda_cupti/include/cupti_sass_metrics.h,sha256=3RW9snJuFQdOhrEn3wDJOru05q0V_zssWrqD7tvVJKw,19674
27
+ nvidia/cuda_cupti/include/cupti_target.h,sha256=x4Vz1Upb6m9ixmVpmGaKQldDWYQI3OZ-ocEXGzNK0EE,1263
28
+ nvidia/cuda_cupti/include/cupti_version.h,sha256=sjd-aUoTGkEWyvA2VUWIpZwXyXAaclqC8gbwNnuK5D0,4425
29
+ nvidia/cuda_cupti/include/generated_cudaGL_meta.h,sha256=dfd2QuaRdEjbStOKvaQLi1Md_qrpRQh8PfyZznJ8bWY,3115
30
+ nvidia/cuda_cupti/include/generated_cudaVDPAU_meta.h,sha256=fAedsoQxaU3hIAApAWDOKsa9kgcuQw4tdyf8klLm-3k,1453
31
+ nvidia/cuda_cupti/include/generated_cuda_gl_interop_meta.h,sha256=LXOqvQCej0sCgAT1LUKKYZ466EFxN4hIwf9oIhXOLF0,2250
32
+ nvidia/cuda_cupti/include/generated_cuda_meta.h,sha256=hawYpDe0xpaDFDnClXI91JjwCRxWb-AS0FS8ydUMgxc,94639
33
+ nvidia/cuda_cupti/include/generated_cuda_runtime_api_meta.h,sha256=D8CbAN3-jLuF2KGfsBHXEELSgL92KrUAiDvugWE8B8M,69706
34
+ nvidia/cuda_cupti/include/generated_cuda_vdpau_interop_meta.h,sha256=8OLqWN26aEYpTWUXtbHJvA5GYhVv3ybYVOTW7yK37z8,1367
35
+ nvidia/cuda_cupti/include/generated_cudart_removed_meta.h,sha256=X3I5WXmhtsJNNlgY7coJ5vg4t11G5FRR6Xo7MboIeck,5172
36
+ nvidia/cuda_cupti/include/generated_nvtx_meta.h,sha256=YHb_RD8g3s4m8PJn7Z0wnxvUHarl7BOAX5ADr-BL3HI,7513
37
+ nvidia/cuda_cupti/include/nvperf_common.h,sha256=BqPml9AxyN10-ptWT3hQzh2JUWqQX57Q5BjQ3ZuaKNs,17255
38
+ nvidia/cuda_cupti/include/nvperf_cuda_host.h,sha256=aBnyIr_hexPDGBkP6WSujN1mI_DYP25sEIXWYY1O7VI,8298
39
+ nvidia/cuda_cupti/include/nvperf_host.h,sha256=afdHG6eraeo4ltlF9ihskqhU7IccxcRCaZDZ6_ikjkg,68506
40
+ nvidia/cuda_cupti/include/nvperf_target.h,sha256=ZDA-JI459tLBW4iLLCQjYYRAMeHwfqDIgXbVqVLDYZ4,22539
41
+ nvidia/cuda_cupti/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
+ nvidia/cuda_cupti/lib/__pycache__/__init__.cpython-311.pyc,,
43
+ nvidia/cuda_cupti/lib/libcheckpoint.so,sha256=cCTAB7_UNqvoU7zKMCHkklcmM1GGr3atZmZoZksdAKM,1501336
44
+ nvidia/cuda_cupti/lib/libcupti.so.12,sha256=-yp8WxXITflQXdR-VT_kbzEhpX0wOR_KJBedIC9z8_c,7748112
45
+ nvidia/cuda_cupti/lib/libnvperf_host.so,sha256=tZsmsdNdAik8jdiVaro3V8FGa3FzLGaHq6QSxQ2VC2k,28132984
46
+ nvidia/cuda_cupti/lib/libnvperf_target.so,sha256=ztN3NKnf_9XyEogyuHjyOAcTvqYBn6lE0psxejPTeYw,5592368
47
+ nvidia/cuda_cupti/lib/libpcsamplingutil.so,sha256=ZDY0bEGLzy-pA3yfFtc6jfvo-Cu8vWwUCQYatGJrb0Q,912728
48
+ nvidia_cuda_cupti_cu12-12.4.127.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
49
+ nvidia_cuda_cupti_cu12-12.4.127.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
50
+ nvidia_cuda_cupti_cu12-12.4.127.dist-info/METADATA,sha256=UiXYPD5hc55tQSSNiYNq5AqkD68jq1KHNCtG-PJvPds,1553
51
+ nvidia_cuda_cupti_cu12-12.4.127.dist-info/RECORD,,
52
+ nvidia_cuda_cupti_cu12-12.4.127.dist-info/WHEEL,sha256=XDTs3wIbcE-BcRO08VJlZpA6z9OaC1mOKPCGGGwuM2g,109
53
+ nvidia_cuda_cupti_cu12-12.4.127.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
.venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.42.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-manylinux2014_x86_64
5
+
.venv/lib/python3.11/site-packages/nvidia_cuda_cupti_cu12-12.4.127.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia
.venv/lib/python3.11/site-packages/openai/_extras/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .numpy_proxy import numpy as numpy, has_numpy as has_numpy
2
+ from .pandas_proxy import pandas as pandas
.venv/lib/python3.11/site-packages/openai/_extras/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (327 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/_extras/__pycache__/_common.cpython-311.pyc ADDED
Binary file (935 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/_extras/__pycache__/numpy_proxy.cpython-311.pyc ADDED
Binary file (1.7 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_extras/__pycache__/pandas_proxy.cpython-311.pyc ADDED
Binary file (1.44 kB). View file
 
.venv/lib/python3.11/site-packages/openai/_extras/_common.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .._exceptions import OpenAIError
2
+
3
+ INSTRUCTIONS = """
4
+
5
+ OpenAI error:
6
+
7
+ missing `{library}`
8
+
9
+ This feature requires additional dependencies:
10
+
11
+ $ pip install openai[{extra}]
12
+
13
+ """
14
+
15
+
16
+ def format_instructions(*, library: str, extra: str) -> str:
17
+ return INSTRUCTIONS.format(library=library, extra=extra)
18
+
19
+
20
+ class MissingDependencyError(OpenAIError):
21
+ pass
.venv/lib/python3.11/site-packages/openai/_extras/numpy_proxy.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any
4
+ from typing_extensions import override
5
+
6
+ from .._utils import LazyProxy
7
+ from ._common import MissingDependencyError, format_instructions
8
+
9
+ if TYPE_CHECKING:
10
+ import numpy as numpy
11
+
12
+
13
+ NUMPY_INSTRUCTIONS = format_instructions(library="numpy", extra="datalib")
14
+
15
+
16
+ class NumpyProxy(LazyProxy[Any]):
17
+ @override
18
+ def __load__(self) -> Any:
19
+ try:
20
+ import numpy
21
+ except ImportError as err:
22
+ raise MissingDependencyError(NUMPY_INSTRUCTIONS) from err
23
+
24
+ return numpy
25
+
26
+
27
+ if not TYPE_CHECKING:
28
+ numpy = NumpyProxy()
29
+
30
+
31
+ def has_numpy() -> bool:
32
+ try:
33
+ import numpy # noqa: F401 # pyright: ignore[reportUnusedImport]
34
+ except ImportError:
35
+ return False
36
+
37
+ return True
.venv/lib/python3.11/site-packages/openai/_extras/pandas_proxy.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any
4
+ from typing_extensions import override
5
+
6
+ from .._utils import LazyProxy
7
+ from ._common import MissingDependencyError, format_instructions
8
+
9
+ if TYPE_CHECKING:
10
+ import pandas as pandas
11
+
12
+
13
+ PANDAS_INSTRUCTIONS = format_instructions(library="pandas", extra="datalib")
14
+
15
+
16
+ class PandasProxy(LazyProxy[Any]):
17
+ @override
18
+ def __load__(self) -> Any:
19
+ try:
20
+ import pandas
21
+ except ImportError as err:
22
+ raise MissingDependencyError(PANDAS_INSTRUCTIONS) from err
23
+
24
+ return pandas
25
+
26
+
27
+ if not TYPE_CHECKING:
28
+ pandas = PandasProxy()
.venv/lib/python3.11/site-packages/openai/lib/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (314 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/lib/__pycache__/_old_api.cpython-311.pyc ADDED
Binary file (3.18 kB). View file
 
.venv/lib/python3.11/site-packages/openai/lib/__pycache__/_pydantic.cpython-311.pyc ADDED
Binary file (8.42 kB). View file
 
.venv/lib/python3.11/site-packages/openai/lib/__pycache__/_tools.cpython-311.pyc ADDED
Binary file (2.38 kB). View file
 
.venv/lib/python3.11/site-packages/openai/lib/__pycache__/_validators.cpython-311.pyc ADDED
Binary file (47.3 kB). View file
 
.venv/lib/python3.11/site-packages/openai/lib/streaming/chat/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (1.24 kB). View file
 
.venv/lib/python3.11/site-packages/openai/lib/streaming/chat/__pycache__/_types.cpython-311.pyc ADDED
Binary file (777 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (4.13 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/batches.cpython-311.pyc ADDED
Binary file (15.1 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/completions.cpython-311.pyc ADDED
Binary file (26.2 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/embeddings.cpython-311.pyc ADDED
Binary file (10.6 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/files.cpython-311.pyc ADDED
Binary file (21.9 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/images.cpython-311.pyc ADDED
Binary file (17.6 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/models.cpython-311.pyc ADDED
Binary file (10 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/__pycache__/moderations.cpython-311.pyc ADDED
Binary file (7.97 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/chat/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .chat import Chat, AsyncChat
4
+ from .completions import Completions, AsyncCompletions
5
+
6
+ __all__ = [
7
+ "Completions",
8
+ "AsyncCompletions",
9
+ "Chat",
10
+ "AsyncChat",
11
+ ]
.venv/lib/python3.11/site-packages/openai/resources/beta/chat/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (424 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/chat/__pycache__/chat.cpython-311.pyc ADDED
Binary file (1.47 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/chat/__pycache__/completions.cpython-311.pyc ADDED
Binary file (22.5 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/chat/chat.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from ...._compat import cached_property
6
+ from .completions import Completions, AsyncCompletions
7
+ from ...._resource import SyncAPIResource, AsyncAPIResource
8
+
9
+ __all__ = ["Chat", "AsyncChat"]
10
+
11
+
12
+ class Chat(SyncAPIResource):
13
+ @cached_property
14
+ def completions(self) -> Completions:
15
+ return Completions(self._client)
16
+
17
+
18
+ class AsyncChat(AsyncAPIResource):
19
+ @cached_property
20
+ def completions(self) -> AsyncCompletions:
21
+ return AsyncCompletions(self._client)
.venv/lib/python3.11/site-packages/openai/resources/beta/chat/completions.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, List, Type, Union, Iterable, Optional, cast
6
+ from functools import partial
7
+ from typing_extensions import Literal
8
+
9
+ import httpx
10
+
11
+ from .... import _legacy_response
12
+ from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
13
+ from ...._utils import maybe_transform, async_maybe_transform
14
+ from ...._compat import cached_property
15
+ from ...._resource import SyncAPIResource, AsyncAPIResource
16
+ from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
17
+ from ...._streaming import Stream
18
+ from ....types.chat import (
19
+ ChatCompletionReasoningEffort,
20
+ completion_create_params,
21
+ )
22
+ from ...._base_client import make_request_options
23
+ from ....lib._parsing import (
24
+ ResponseFormatT,
25
+ validate_input_tools as _validate_input_tools,
26
+ parse_chat_completion as _parse_chat_completion,
27
+ type_to_response_format_param as _type_to_response_format,
28
+ )
29
+ from ....types.chat_model import ChatModel
30
+ from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
31
+ from ....types.shared_params import Metadata
32
+ from ....types.chat.chat_completion import ChatCompletion
33
+ from ....types.chat.chat_completion_chunk import ChatCompletionChunk
34
+ from ....types.chat.parsed_chat_completion import ParsedChatCompletion
35
+ from ....types.chat.chat_completion_modality import ChatCompletionModality
36
+ from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
37
+ from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
38
+ from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
39
+ from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
40
+ from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
41
+ from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
42
+
43
+ __all__ = ["Completions", "AsyncCompletions"]
44
+
45
+
46
+ class Completions(SyncAPIResource):
47
+ @cached_property
48
+ def with_raw_response(self) -> CompletionsWithRawResponse:
49
+ """
50
+ This property can be used as a prefix for any HTTP method call to return the
51
+ the raw response object instead of the parsed content.
52
+
53
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
54
+ """
55
+ return CompletionsWithRawResponse(self)
56
+
57
+ @cached_property
58
+ def with_streaming_response(self) -> CompletionsWithStreamingResponse:
59
+ """
60
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
61
+
62
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
63
+ """
64
+ return CompletionsWithStreamingResponse(self)
65
+
66
+ def parse(
67
+ self,
68
+ *,
69
+ messages: Iterable[ChatCompletionMessageParam],
70
+ model: Union[str, ChatModel],
71
+ audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
72
+ response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
73
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
74
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
75
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
76
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
77
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
78
+ max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
79
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
80
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
81
+ modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
82
+ n: Optional[int] | NotGiven = NOT_GIVEN,
83
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
84
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
85
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
86
+ reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
87
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
88
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
89
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
90
+ store: Optional[bool] | NotGiven = NOT_GIVEN,
91
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
92
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
93
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
94
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
95
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
96
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
97
+ user: str | NotGiven = NOT_GIVEN,
98
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
99
+ # The extra values given here take precedence over values defined on the client or passed to this method.
100
+ extra_headers: Headers | None = None,
101
+ extra_query: Query | None = None,
102
+ extra_body: Body | None = None,
103
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
104
+ ) -> ParsedChatCompletion[ResponseFormatT]:
105
+ """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
106
+ & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
107
+
108
+ You can pass a pydantic model to this method and it will automatically convert the model
109
+ into a JSON schema, send it to the API and parse the response content back into the given model.
110
+
111
+ This method will also automatically parse `function` tool calls if:
112
+ - You use the `openai.pydantic_function_tool()` helper method
113
+ - You mark your tool schema with `"strict": True`
114
+
115
+ Example usage:
116
+ ```py
117
+ from pydantic import BaseModel
118
+ from openai import OpenAI
119
+
120
+
121
+ class Step(BaseModel):
122
+ explanation: str
123
+ output: str
124
+
125
+
126
+ class MathResponse(BaseModel):
127
+ steps: List[Step]
128
+ final_answer: str
129
+
130
+
131
+ client = OpenAI()
132
+ completion = client.beta.chat.completions.parse(
133
+ model="gpt-4o-2024-08-06",
134
+ messages=[
135
+ {"role": "system", "content": "You are a helpful math tutor."},
136
+ {"role": "user", "content": "solve 8x + 31 = 2"},
137
+ ],
138
+ response_format=MathResponse,
139
+ )
140
+
141
+ message = completion.choices[0].message
142
+ if message.parsed:
143
+ print(message.parsed.steps)
144
+ print("answer: ", message.parsed.final_answer)
145
+ ```
146
+ """
147
+ _validate_input_tools(tools)
148
+
149
+ extra_headers = {
150
+ "X-Stainless-Helper-Method": "beta.chat.completions.parse",
151
+ **(extra_headers or {}),
152
+ }
153
+
154
+ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
155
+ return _parse_chat_completion(
156
+ response_format=response_format,
157
+ chat_completion=raw_completion,
158
+ input_tools=tools,
159
+ )
160
+
161
+ return self._post(
162
+ "/chat/completions",
163
+ body=maybe_transform(
164
+ {
165
+ "messages": messages,
166
+ "model": model,
167
+ "audio": audio,
168
+ "frequency_penalty": frequency_penalty,
169
+ "function_call": function_call,
170
+ "functions": functions,
171
+ "logit_bias": logit_bias,
172
+ "logprobs": logprobs,
173
+ "max_completion_tokens": max_completion_tokens,
174
+ "max_tokens": max_tokens,
175
+ "metadata": metadata,
176
+ "modalities": modalities,
177
+ "n": n,
178
+ "parallel_tool_calls": parallel_tool_calls,
179
+ "prediction": prediction,
180
+ "presence_penalty": presence_penalty,
181
+ "reasoning_effort": reasoning_effort,
182
+ "response_format": _type_to_response_format(response_format),
183
+ "seed": seed,
184
+ "service_tier": service_tier,
185
+ "stop": stop,
186
+ "store": store,
187
+ "stream": False,
188
+ "stream_options": stream_options,
189
+ "temperature": temperature,
190
+ "tool_choice": tool_choice,
191
+ "tools": tools,
192
+ "top_logprobs": top_logprobs,
193
+ "top_p": top_p,
194
+ "user": user,
195
+ },
196
+ completion_create_params.CompletionCreateParams,
197
+ ),
198
+ options=make_request_options(
199
+ extra_headers=extra_headers,
200
+ extra_query=extra_query,
201
+ extra_body=extra_body,
202
+ timeout=timeout,
203
+ post_parser=parser,
204
+ ),
205
+ # we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
206
+ # in the `parser` function above
207
+ cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
208
+ stream=False,
209
+ )
210
+
211
+ def stream(
212
+ self,
213
+ *,
214
+ messages: Iterable[ChatCompletionMessageParam],
215
+ model: Union[str, ChatModel],
216
+ audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
217
+ response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
218
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
219
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
220
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
221
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
222
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
223
+ max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
224
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
225
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
226
+ modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
227
+ n: Optional[int] | NotGiven = NOT_GIVEN,
228
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
229
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
230
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
231
+ reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
232
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
233
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
234
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
235
+ store: Optional[bool] | NotGiven = NOT_GIVEN,
236
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
237
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
238
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
239
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
240
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
241
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
242
+ user: str | NotGiven = NOT_GIVEN,
243
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
244
+ # The extra values given here take precedence over values defined on the client or passed to this method.
245
+ extra_headers: Headers | None = None,
246
+ extra_query: Query | None = None,
247
+ extra_body: Body | None = None,
248
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
249
+ ) -> ChatCompletionStreamManager[ResponseFormatT]:
250
+ """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
251
+ and automatic accumulation of each delta.
252
+
253
+ This also supports all of the parsing utilities that `.parse()` does.
254
+
255
+ Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
256
+
257
+ ```py
258
+ with client.beta.chat.completions.stream(
259
+ model="gpt-4o-2024-08-06",
260
+ messages=[...],
261
+ ) as stream:
262
+ for event in stream:
263
+ if event.type == "content.delta":
264
+ print(event.delta, flush=True, end="")
265
+ ```
266
+
267
+ When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
268
+
269
+ When the context manager exits, the response will be closed, however the `stream` instance is still available outside
270
+ the context manager.
271
+ """
272
+ extra_headers = {
273
+ "X-Stainless-Helper-Method": "beta.chat.completions.stream",
274
+ **(extra_headers or {}),
275
+ }
276
+
277
+ api_request: partial[Stream[ChatCompletionChunk]] = partial(
278
+ self._client.chat.completions.create,
279
+ messages=messages,
280
+ model=model,
281
+ audio=audio,
282
+ stream=True,
283
+ response_format=_type_to_response_format(response_format),
284
+ frequency_penalty=frequency_penalty,
285
+ function_call=function_call,
286
+ functions=functions,
287
+ logit_bias=logit_bias,
288
+ logprobs=logprobs,
289
+ max_completion_tokens=max_completion_tokens,
290
+ max_tokens=max_tokens,
291
+ metadata=metadata,
292
+ modalities=modalities,
293
+ n=n,
294
+ parallel_tool_calls=parallel_tool_calls,
295
+ prediction=prediction,
296
+ presence_penalty=presence_penalty,
297
+ reasoning_effort=reasoning_effort,
298
+ seed=seed,
299
+ service_tier=service_tier,
300
+ store=store,
301
+ stop=stop,
302
+ stream_options=stream_options,
303
+ temperature=temperature,
304
+ tool_choice=tool_choice,
305
+ tools=tools,
306
+ top_logprobs=top_logprobs,
307
+ top_p=top_p,
308
+ user=user,
309
+ extra_headers=extra_headers,
310
+ extra_query=extra_query,
311
+ extra_body=extra_body,
312
+ timeout=timeout,
313
+ )
314
+ return ChatCompletionStreamManager(
315
+ api_request,
316
+ response_format=response_format,
317
+ input_tools=tools,
318
+ )
319
+
320
+
321
+ class AsyncCompletions(AsyncAPIResource):
322
+ @cached_property
323
+ def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
324
+ """
325
+ This property can be used as a prefix for any HTTP method call to return the
326
+ the raw response object instead of the parsed content.
327
+
328
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
329
+ """
330
+ return AsyncCompletionsWithRawResponse(self)
331
+
332
+ @cached_property
333
+ def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
334
+ """
335
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
336
+
337
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
338
+ """
339
+ return AsyncCompletionsWithStreamingResponse(self)
340
+
341
+ async def parse(
342
+ self,
343
+ *,
344
+ messages: Iterable[ChatCompletionMessageParam],
345
+ model: Union[str, ChatModel],
346
+ audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
347
+ response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
348
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
349
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
350
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
351
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
352
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
353
+ max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
354
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
355
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
356
+ modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
357
+ n: Optional[int] | NotGiven = NOT_GIVEN,
358
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
359
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
360
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
361
+ reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
362
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
363
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
364
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
365
+ store: Optional[bool] | NotGiven = NOT_GIVEN,
366
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
367
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
368
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
369
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
370
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
371
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
372
+ user: str | NotGiven = NOT_GIVEN,
373
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
374
+ # The extra values given here take precedence over values defined on the client or passed to this method.
375
+ extra_headers: Headers | None = None,
376
+ extra_query: Query | None = None,
377
+ extra_body: Body | None = None,
378
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
379
+ ) -> ParsedChatCompletion[ResponseFormatT]:
380
+ """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
381
+ & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
382
+
383
+ You can pass a pydantic model to this method and it will automatically convert the model
384
+ into a JSON schema, send it to the API and parse the response content back into the given model.
385
+
386
+ This method will also automatically parse `function` tool calls if:
387
+ - You use the `openai.pydantic_function_tool()` helper method
388
+ - You mark your tool schema with `"strict": True`
389
+
390
+ Example usage:
391
+ ```py
392
+ from pydantic import BaseModel
393
+ from openai import AsyncOpenAI
394
+
395
+
396
+ class Step(BaseModel):
397
+ explanation: str
398
+ output: str
399
+
400
+
401
+ class MathResponse(BaseModel):
402
+ steps: List[Step]
403
+ final_answer: str
404
+
405
+
406
+ client = AsyncOpenAI()
407
+ completion = await client.beta.chat.completions.parse(
408
+ model="gpt-4o-2024-08-06",
409
+ messages=[
410
+ {"role": "system", "content": "You are a helpful math tutor."},
411
+ {"role": "user", "content": "solve 8x + 31 = 2"},
412
+ ],
413
+ response_format=MathResponse,
414
+ )
415
+
416
+ message = completion.choices[0].message
417
+ if message.parsed:
418
+ print(message.parsed.steps)
419
+ print("answer: ", message.parsed.final_answer)
420
+ ```
421
+ """
422
+ _validate_input_tools(tools)
423
+
424
+ extra_headers = {
425
+ "X-Stainless-Helper-Method": "beta.chat.completions.parse",
426
+ **(extra_headers or {}),
427
+ }
428
+
429
+ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
430
+ return _parse_chat_completion(
431
+ response_format=response_format,
432
+ chat_completion=raw_completion,
433
+ input_tools=tools,
434
+ )
435
+
436
+ return await self._post(
437
+ "/chat/completions",
438
+ body=await async_maybe_transform(
439
+ {
440
+ "messages": messages,
441
+ "model": model,
442
+ "audio": audio,
443
+ "frequency_penalty": frequency_penalty,
444
+ "function_call": function_call,
445
+ "functions": functions,
446
+ "logit_bias": logit_bias,
447
+ "logprobs": logprobs,
448
+ "max_completion_tokens": max_completion_tokens,
449
+ "max_tokens": max_tokens,
450
+ "metadata": metadata,
451
+ "modalities": modalities,
452
+ "n": n,
453
+ "parallel_tool_calls": parallel_tool_calls,
454
+ "prediction": prediction,
455
+ "presence_penalty": presence_penalty,
456
+ "reasoning_effort": reasoning_effort,
457
+ "response_format": _type_to_response_format(response_format),
458
+ "seed": seed,
459
+ "service_tier": service_tier,
460
+ "store": store,
461
+ "stop": stop,
462
+ "stream": False,
463
+ "stream_options": stream_options,
464
+ "temperature": temperature,
465
+ "tool_choice": tool_choice,
466
+ "tools": tools,
467
+ "top_logprobs": top_logprobs,
468
+ "top_p": top_p,
469
+ "user": user,
470
+ },
471
+ completion_create_params.CompletionCreateParams,
472
+ ),
473
+ options=make_request_options(
474
+ extra_headers=extra_headers,
475
+ extra_query=extra_query,
476
+ extra_body=extra_body,
477
+ timeout=timeout,
478
+ post_parser=parser,
479
+ ),
480
+ # we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
481
+ # in the `parser` function above
482
+ cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
483
+ stream=False,
484
+ )
485
+
486
+ def stream(
487
+ self,
488
+ *,
489
+ messages: Iterable[ChatCompletionMessageParam],
490
+ model: Union[str, ChatModel],
491
+ audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
492
+ response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
493
+ frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
494
+ function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
495
+ functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
496
+ logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
497
+ logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
498
+ max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
499
+ max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
500
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
501
+ modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
502
+ n: Optional[int] | NotGiven = NOT_GIVEN,
503
+ parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
504
+ prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
505
+ presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
506
+ reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
507
+ seed: Optional[int] | NotGiven = NOT_GIVEN,
508
+ service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
509
+ stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
510
+ store: Optional[bool] | NotGiven = NOT_GIVEN,
511
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
512
+ temperature: Optional[float] | NotGiven = NOT_GIVEN,
513
+ tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
514
+ tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
515
+ top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
516
+ top_p: Optional[float] | NotGiven = NOT_GIVEN,
517
+ user: str | NotGiven = NOT_GIVEN,
518
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
519
+ # The extra values given here take precedence over values defined on the client or passed to this method.
520
+ extra_headers: Headers | None = None,
521
+ extra_query: Query | None = None,
522
+ extra_body: Body | None = None,
523
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
524
+ ) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
525
+ """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
526
+ and automatic accumulation of each delta.
527
+
528
+ This also supports all of the parsing utilities that `.parse()` does.
529
+
530
+ Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
531
+
532
+ ```py
533
+ async with client.beta.chat.completions.stream(
534
+ model="gpt-4o-2024-08-06",
535
+ messages=[...],
536
+ ) as stream:
537
+ async for event in stream:
538
+ if event.type == "content.delta":
539
+ print(event.delta, flush=True, end="")
540
+ ```
541
+
542
+ When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
543
+
544
+ When the context manager exits, the response will be closed, however the `stream` instance is still available outside
545
+ the context manager.
546
+ """
547
+ _validate_input_tools(tools)
548
+
549
+ extra_headers = {
550
+ "X-Stainless-Helper-Method": "beta.chat.completions.stream",
551
+ **(extra_headers or {}),
552
+ }
553
+
554
+ api_request = self._client.chat.completions.create(
555
+ messages=messages,
556
+ model=model,
557
+ audio=audio,
558
+ stream=True,
559
+ response_format=_type_to_response_format(response_format),
560
+ frequency_penalty=frequency_penalty,
561
+ function_call=function_call,
562
+ functions=functions,
563
+ logit_bias=logit_bias,
564
+ logprobs=logprobs,
565
+ max_completion_tokens=max_completion_tokens,
566
+ max_tokens=max_tokens,
567
+ metadata=metadata,
568
+ modalities=modalities,
569
+ n=n,
570
+ parallel_tool_calls=parallel_tool_calls,
571
+ prediction=prediction,
572
+ presence_penalty=presence_penalty,
573
+ reasoning_effort=reasoning_effort,
574
+ seed=seed,
575
+ service_tier=service_tier,
576
+ stop=stop,
577
+ store=store,
578
+ stream_options=stream_options,
579
+ temperature=temperature,
580
+ tool_choice=tool_choice,
581
+ tools=tools,
582
+ top_logprobs=top_logprobs,
583
+ top_p=top_p,
584
+ user=user,
585
+ extra_headers=extra_headers,
586
+ extra_query=extra_query,
587
+ extra_body=extra_body,
588
+ timeout=timeout,
589
+ )
590
+ return AsyncChatCompletionStreamManager(
591
+ api_request,
592
+ response_format=response_format,
593
+ input_tools=tools,
594
+ )
595
+
596
+
597
+ class CompletionsWithRawResponse:
598
+ def __init__(self, completions: Completions) -> None:
599
+ self._completions = completions
600
+
601
+ self.parse = _legacy_response.to_raw_response_wrapper(
602
+ completions.parse,
603
+ )
604
+
605
+
606
+ class AsyncCompletionsWithRawResponse:
607
+ def __init__(self, completions: AsyncCompletions) -> None:
608
+ self._completions = completions
609
+
610
+ self.parse = _legacy_response.async_to_raw_response_wrapper(
611
+ completions.parse,
612
+ )
613
+
614
+
615
+ class CompletionsWithStreamingResponse:
616
+ def __init__(self, completions: Completions) -> None:
617
+ self._completions = completions
618
+
619
+ self.parse = to_streamed_response_wrapper(
620
+ completions.parse,
621
+ )
622
+
623
+
624
+ class AsyncCompletionsWithStreamingResponse:
625
+ def __init__(self, completions: AsyncCompletions) -> None:
626
+ self._completions = completions
627
+
628
+ self.parse = async_to_streamed_response_wrapper(
629
+ completions.parse,
630
+ )
.venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__init__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .realtime import (
4
+ Realtime,
5
+ AsyncRealtime,
6
+ RealtimeWithRawResponse,
7
+ AsyncRealtimeWithRawResponse,
8
+ RealtimeWithStreamingResponse,
9
+ AsyncRealtimeWithStreamingResponse,
10
+ )
11
+ from .sessions import (
12
+ Sessions,
13
+ AsyncSessions,
14
+ SessionsWithRawResponse,
15
+ AsyncSessionsWithRawResponse,
16
+ SessionsWithStreamingResponse,
17
+ AsyncSessionsWithStreamingResponse,
18
+ )
19
+
20
+ __all__ = [
21
+ "Sessions",
22
+ "AsyncSessions",
23
+ "SessionsWithRawResponse",
24
+ "AsyncSessionsWithRawResponse",
25
+ "SessionsWithStreamingResponse",
26
+ "AsyncSessionsWithStreamingResponse",
27
+ "Realtime",
28
+ "AsyncRealtime",
29
+ "RealtimeWithRawResponse",
30
+ "AsyncRealtimeWithRawResponse",
31
+ "RealtimeWithStreamingResponse",
32
+ "AsyncRealtimeWithStreamingResponse",
33
+ ]
.venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (897 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__pycache__/realtime.cpython-311.pyc ADDED
Binary file (43.1 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/realtime/__pycache__/sessions.cpython-311.pyc ADDED
Binary file (12.3 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/realtime/realtime.py ADDED
@@ -0,0 +1,966 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import logging
7
+ from types import TracebackType
8
+ from typing import TYPE_CHECKING, Any, Iterator, cast
9
+ from typing_extensions import AsyncIterator
10
+
11
+ import httpx
12
+ from pydantic import BaseModel
13
+
14
+ from .sessions import (
15
+ Sessions,
16
+ AsyncSessions,
17
+ SessionsWithRawResponse,
18
+ AsyncSessionsWithRawResponse,
19
+ SessionsWithStreamingResponse,
20
+ AsyncSessionsWithStreamingResponse,
21
+ )
22
+ from ...._types import NOT_GIVEN, Query, Headers, NotGiven
23
+ from ...._utils import (
24
+ is_azure_client,
25
+ maybe_transform,
26
+ strip_not_given,
27
+ async_maybe_transform,
28
+ is_async_azure_client,
29
+ )
30
+ from ...._compat import cached_property
31
+ from ...._models import construct_type_unchecked
32
+ from ...._resource import SyncAPIResource, AsyncAPIResource
33
+ from ...._exceptions import OpenAIError
34
+ from ...._base_client import _merge_mappings
35
+ from ....types.beta.realtime import session_update_event_param, response_create_event_param
36
+ from ....types.websocket_connection_options import WebsocketConnectionOptions
37
+ from ....types.beta.realtime.realtime_client_event import RealtimeClientEvent
38
+ from ....types.beta.realtime.realtime_server_event import RealtimeServerEvent
39
+ from ....types.beta.realtime.conversation_item_param import ConversationItemParam
40
+ from ....types.beta.realtime.realtime_client_event_param import RealtimeClientEventParam
41
+
42
+ if TYPE_CHECKING:
43
+ from websockets.sync.client import ClientConnection as WebsocketConnection
44
+ from websockets.asyncio.client import ClientConnection as AsyncWebsocketConnection
45
+
46
+ from ...._client import OpenAI, AsyncOpenAI
47
+
48
+ __all__ = ["Realtime", "AsyncRealtime"]
49
+
50
+ log: logging.Logger = logging.getLogger(__name__)
51
+
52
+
53
+ class Realtime(SyncAPIResource):
54
+ @cached_property
55
+ def sessions(self) -> Sessions:
56
+ return Sessions(self._client)
57
+
58
+ @cached_property
59
+ def with_raw_response(self) -> RealtimeWithRawResponse:
60
+ """
61
+ This property can be used as a prefix for any HTTP method call to return
62
+ the raw response object instead of the parsed content.
63
+
64
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
65
+ """
66
+ return RealtimeWithRawResponse(self)
67
+
68
+ @cached_property
69
+ def with_streaming_response(self) -> RealtimeWithStreamingResponse:
70
+ """
71
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
72
+
73
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
74
+ """
75
+ return RealtimeWithStreamingResponse(self)
76
+
77
+ def connect(
78
+ self,
79
+ *,
80
+ model: str,
81
+ extra_query: Query = {},
82
+ extra_headers: Headers = {},
83
+ websocket_connection_options: WebsocketConnectionOptions = {},
84
+ ) -> RealtimeConnectionManager:
85
+ """
86
+ The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
87
+
88
+ Some notable benefits of the API include:
89
+
90
+ - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
91
+ - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
92
+ - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
93
+
94
+ The Realtime API is a stateful, event-based API that communicates over a WebSocket.
95
+ """
96
+ return RealtimeConnectionManager(
97
+ client=self._client,
98
+ extra_query=extra_query,
99
+ extra_headers=extra_headers,
100
+ websocket_connection_options=websocket_connection_options,
101
+ model=model,
102
+ )
103
+
104
+
105
+ class AsyncRealtime(AsyncAPIResource):
106
+ @cached_property
107
+ def sessions(self) -> AsyncSessions:
108
+ return AsyncSessions(self._client)
109
+
110
+ @cached_property
111
+ def with_raw_response(self) -> AsyncRealtimeWithRawResponse:
112
+ """
113
+ This property can be used as a prefix for any HTTP method call to return
114
+ the raw response object instead of the parsed content.
115
+
116
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
117
+ """
118
+ return AsyncRealtimeWithRawResponse(self)
119
+
120
+ @cached_property
121
+ def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse:
122
+ """
123
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
124
+
125
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
126
+ """
127
+ return AsyncRealtimeWithStreamingResponse(self)
128
+
129
+ def connect(
130
+ self,
131
+ *,
132
+ model: str,
133
+ extra_query: Query = {},
134
+ extra_headers: Headers = {},
135
+ websocket_connection_options: WebsocketConnectionOptions = {},
136
+ ) -> AsyncRealtimeConnectionManager:
137
+ """
138
+ The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
139
+
140
+ Some notable benefits of the API include:
141
+
142
+ - Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
143
+ - Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
144
+ - Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
145
+
146
+ The Realtime API is a stateful, event-based API that communicates over a WebSocket.
147
+ """
148
+ return AsyncRealtimeConnectionManager(
149
+ client=self._client,
150
+ extra_query=extra_query,
151
+ extra_headers=extra_headers,
152
+ websocket_connection_options=websocket_connection_options,
153
+ model=model,
154
+ )
155
+
156
+
157
+ class RealtimeWithRawResponse:
158
+ def __init__(self, realtime: Realtime) -> None:
159
+ self._realtime = realtime
160
+
161
+ @cached_property
162
+ def sessions(self) -> SessionsWithRawResponse:
163
+ return SessionsWithRawResponse(self._realtime.sessions)
164
+
165
+
166
+ class AsyncRealtimeWithRawResponse:
167
+ def __init__(self, realtime: AsyncRealtime) -> None:
168
+ self._realtime = realtime
169
+
170
+ @cached_property
171
+ def sessions(self) -> AsyncSessionsWithRawResponse:
172
+ return AsyncSessionsWithRawResponse(self._realtime.sessions)
173
+
174
+
175
+ class RealtimeWithStreamingResponse:
176
+ def __init__(self, realtime: Realtime) -> None:
177
+ self._realtime = realtime
178
+
179
+ @cached_property
180
+ def sessions(self) -> SessionsWithStreamingResponse:
181
+ return SessionsWithStreamingResponse(self._realtime.sessions)
182
+
183
+
184
+ class AsyncRealtimeWithStreamingResponse:
185
+ def __init__(self, realtime: AsyncRealtime) -> None:
186
+ self._realtime = realtime
187
+
188
+ @cached_property
189
+ def sessions(self) -> AsyncSessionsWithStreamingResponse:
190
+ return AsyncSessionsWithStreamingResponse(self._realtime.sessions)
191
+
192
+
193
+ class AsyncRealtimeConnection:
194
+ """Represents a live websocket connection to the Realtime API"""
195
+
196
+ session: AsyncRealtimeSessionResource
197
+ response: AsyncRealtimeResponseResource
198
+ conversation: AsyncRealtimeConversationResource
199
+ input_audio_buffer: AsyncRealtimeInputAudioBufferResource
200
+
201
+ _connection: AsyncWebsocketConnection
202
+
203
+ def __init__(self, connection: AsyncWebsocketConnection) -> None:
204
+ self._connection = connection
205
+
206
+ self.session = AsyncRealtimeSessionResource(self)
207
+ self.response = AsyncRealtimeResponseResource(self)
208
+ self.conversation = AsyncRealtimeConversationResource(self)
209
+ self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self)
210
+
211
+ async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]:
212
+ """
213
+ An infinite-iterator that will continue to yield events until
214
+ the connection is closed.
215
+ """
216
+ from websockets.exceptions import ConnectionClosedOK
217
+
218
+ try:
219
+ while True:
220
+ yield await self.recv()
221
+ except ConnectionClosedOK:
222
+ return
223
+
224
+ async def recv(self) -> RealtimeServerEvent:
225
+ """
226
+ Receive the next message from the connection and parses it into a `RealtimeServerEvent` object.
227
+
228
+ Canceling this method is safe. There's no risk of losing data.
229
+ """
230
+ return self.parse_event(await self.recv_bytes())
231
+
232
+ async def recv_bytes(self) -> bytes:
233
+ """Receive the next message from the connection as raw bytes.
234
+
235
+ Canceling this method is safe. There's no risk of losing data.
236
+
237
+ If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does,
238
+ then you can call `.parse_event(data)`.
239
+ """
240
+ message = await self._connection.recv(decode=False)
241
+ log.debug(f"Received websocket message: %s", message)
242
+ if not isinstance(message, bytes):
243
+ # passing `decode=False` should always result in us getting `bytes` back
244
+ raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}")
245
+
246
+ return message
247
+
248
+ async def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
249
+ data = (
250
+ event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
251
+ if isinstance(event, BaseModel)
252
+ else json.dumps(await async_maybe_transform(event, RealtimeClientEventParam))
253
+ )
254
+ await self._connection.send(data)
255
+
256
+ async def close(self, *, code: int = 1000, reason: str = "") -> None:
257
+ await self._connection.close(code=code, reason=reason)
258
+
259
+ def parse_event(self, data: str | bytes) -> RealtimeServerEvent:
260
+ """
261
+ Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object.
262
+
263
+ This is helpful if you're using `.recv_bytes()`.
264
+ """
265
+ return cast(
266
+ RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent))
267
+ )
268
+
269
+
270
+ class AsyncRealtimeConnectionManager:
271
+ """
272
+ Context manager over a `AsyncRealtimeConnection` that is returned by `beta.realtime.connect()`
273
+
274
+ This context manager ensures that the connection will be closed when it exits.
275
+
276
+ ---
277
+
278
+ Note that if your application doesn't work well with the context manager approach then you
279
+ can call the `.enter()` method directly to initiate a connection.
280
+
281
+ **Warning**: You must remember to close the connection with `.close()`.
282
+
283
+ ```py
284
+ connection = await client.beta.realtime.connect(...).enter()
285
+ # ...
286
+ await connection.close()
287
+ ```
288
+ """
289
+
290
+ def __init__(
291
+ self,
292
+ *,
293
+ client: AsyncOpenAI,
294
+ model: str,
295
+ extra_query: Query,
296
+ extra_headers: Headers,
297
+ websocket_connection_options: WebsocketConnectionOptions,
298
+ ) -> None:
299
+ self.__client = client
300
+ self.__model = model
301
+ self.__connection: AsyncRealtimeConnection | None = None
302
+ self.__extra_query = extra_query
303
+ self.__extra_headers = extra_headers
304
+ self.__websocket_connection_options = websocket_connection_options
305
+
306
+ async def __aenter__(self) -> AsyncRealtimeConnection:
307
+ """
308
+ 👋 If your application doesn't work well with the context manager approach then you
309
+ can call this method directly to initiate a connection.
310
+
311
+ **Warning**: You must remember to close the connection with `.close()`.
312
+
313
+ ```py
314
+ connection = await client.beta.realtime.connect(...).enter()
315
+ # ...
316
+ await connection.close()
317
+ ```
318
+ """
319
+ try:
320
+ from websockets.asyncio.client import connect
321
+ except ImportError as exc:
322
+ raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
323
+
324
+ extra_query = self.__extra_query
325
+ auth_headers = self.__client.auth_headers
326
+ if is_async_azure_client(self.__client):
327
+ extra_query, auth_headers = await self.__client._configure_realtime(self.__model, extra_query)
328
+
329
+ url = self._prepare_url().copy_with(
330
+ params={
331
+ **self.__client.base_url.params,
332
+ "model": self.__model,
333
+ **extra_query,
334
+ },
335
+ )
336
+ log.debug("Connecting to %s", url)
337
+ if self.__websocket_connection_options:
338
+ log.debug("Connection options: %s", self.__websocket_connection_options)
339
+
340
+ self.__connection = AsyncRealtimeConnection(
341
+ await connect(
342
+ str(url),
343
+ user_agent_header=self.__client.user_agent,
344
+ additional_headers=_merge_mappings(
345
+ {
346
+ **auth_headers,
347
+ "OpenAI-Beta": "realtime=v1",
348
+ },
349
+ self.__extra_headers,
350
+ ),
351
+ **self.__websocket_connection_options,
352
+ )
353
+ )
354
+
355
+ return self.__connection
356
+
357
+ enter = __aenter__
358
+
359
+ def _prepare_url(self) -> httpx.URL:
360
+ if self.__client.websocket_base_url is not None:
361
+ base_url = httpx.URL(self.__client.websocket_base_url)
362
+ else:
363
+ base_url = self.__client._base_url.copy_with(scheme="wss")
364
+
365
+ merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
366
+ return base_url.copy_with(raw_path=merge_raw_path)
367
+
368
+ async def __aexit__(
369
+ self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
370
+ ) -> None:
371
+ if self.__connection is not None:
372
+ await self.__connection.close()
373
+
374
+
375
+ class RealtimeConnection:
376
+ """Represents a live websocket connection to the Realtime API"""
377
+
378
+ session: RealtimeSessionResource
379
+ response: RealtimeResponseResource
380
+ conversation: RealtimeConversationResource
381
+ input_audio_buffer: RealtimeInputAudioBufferResource
382
+
383
+ _connection: WebsocketConnection
384
+
385
+ def __init__(self, connection: WebsocketConnection) -> None:
386
+ self._connection = connection
387
+
388
+ self.session = RealtimeSessionResource(self)
389
+ self.response = RealtimeResponseResource(self)
390
+ self.conversation = RealtimeConversationResource(self)
391
+ self.input_audio_buffer = RealtimeInputAudioBufferResource(self)
392
+
393
+ def __iter__(self) -> Iterator[RealtimeServerEvent]:
394
+ """
395
+ An infinite-iterator that will continue to yield events until
396
+ the connection is closed.
397
+ """
398
+ from websockets.exceptions import ConnectionClosedOK
399
+
400
+ try:
401
+ while True:
402
+ yield self.recv()
403
+ except ConnectionClosedOK:
404
+ return
405
+
406
+ def recv(self) -> RealtimeServerEvent:
407
+ """
408
+ Receive the next message from the connection and parses it into a `RealtimeServerEvent` object.
409
+
410
+ Canceling this method is safe. There's no risk of losing data.
411
+ """
412
+ return self.parse_event(self.recv_bytes())
413
+
414
+ def recv_bytes(self) -> bytes:
415
+ """Receive the next message from the connection as raw bytes.
416
+
417
+ Canceling this method is safe. There's no risk of losing data.
418
+
419
+ If you want to parse the message into a `RealtimeServerEvent` object like `.recv()` does,
420
+ then you can call `.parse_event(data)`.
421
+ """
422
+ message = self._connection.recv(decode=False)
423
+ log.debug(f"Received websocket message: %s", message)
424
+ if not isinstance(message, bytes):
425
+ # passing `decode=False` should always result in us getting `bytes` back
426
+ raise TypeError(f"Expected `.recv(decode=False)` to return `bytes` but got {type(message)}")
427
+
428
+ return message
429
+
430
+ def send(self, event: RealtimeClientEvent | RealtimeClientEventParam) -> None:
431
+ data = (
432
+ event.to_json(use_api_names=True, exclude_defaults=True, exclude_unset=True)
433
+ if isinstance(event, BaseModel)
434
+ else json.dumps(maybe_transform(event, RealtimeClientEventParam))
435
+ )
436
+ self._connection.send(data)
437
+
438
+ def close(self, *, code: int = 1000, reason: str = "") -> None:
439
+ self._connection.close(code=code, reason=reason)
440
+
441
+ def parse_event(self, data: str | bytes) -> RealtimeServerEvent:
442
+ """
443
+ Converts a raw `str` or `bytes` message into a `RealtimeServerEvent` object.
444
+
445
+ This is helpful if you're using `.recv_bytes()`.
446
+ """
447
+ return cast(
448
+ RealtimeServerEvent, construct_type_unchecked(value=json.loads(data), type_=cast(Any, RealtimeServerEvent))
449
+ )
450
+
451
+
452
+ class RealtimeConnectionManager:
453
+ """
454
+ Context manager over a `RealtimeConnection` that is returned by `beta.realtime.connect()`
455
+
456
+ This context manager ensures that the connection will be closed when it exits.
457
+
458
+ ---
459
+
460
+ Note that if your application doesn't work well with the context manager approach then you
461
+ can call the `.enter()` method directly to initiate a connection.
462
+
463
+ **Warning**: You must remember to close the connection with `.close()`.
464
+
465
+ ```py
466
+ connection = client.beta.realtime.connect(...).enter()
467
+ # ...
468
+ connection.close()
469
+ ```
470
+ """
471
+
472
+ def __init__(
473
+ self,
474
+ *,
475
+ client: OpenAI,
476
+ model: str,
477
+ extra_query: Query,
478
+ extra_headers: Headers,
479
+ websocket_connection_options: WebsocketConnectionOptions,
480
+ ) -> None:
481
+ self.__client = client
482
+ self.__model = model
483
+ self.__connection: RealtimeConnection | None = None
484
+ self.__extra_query = extra_query
485
+ self.__extra_headers = extra_headers
486
+ self.__websocket_connection_options = websocket_connection_options
487
+
488
+ def __enter__(self) -> RealtimeConnection:
489
+ """
490
+ 👋 If your application doesn't work well with the context manager approach then you
491
+ can call this method directly to initiate a connection.
492
+
493
+ **Warning**: You must remember to close the connection with `.close()`.
494
+
495
+ ```py
496
+ connection = client.beta.realtime.connect(...).enter()
497
+ # ...
498
+ connection.close()
499
+ ```
500
+ """
501
+ try:
502
+ from websockets.sync.client import connect
503
+ except ImportError as exc:
504
+ raise OpenAIError("You need to install `openai[realtime]` to use this method") from exc
505
+
506
+ extra_query = self.__extra_query
507
+ auth_headers = self.__client.auth_headers
508
+ if is_azure_client(self.__client):
509
+ extra_query, auth_headers = self.__client._configure_realtime(self.__model, extra_query)
510
+
511
+ url = self._prepare_url().copy_with(
512
+ params={
513
+ **self.__client.base_url.params,
514
+ "model": self.__model,
515
+ **extra_query,
516
+ },
517
+ )
518
+ log.debug("Connecting to %s", url)
519
+ if self.__websocket_connection_options:
520
+ log.debug("Connection options: %s", self.__websocket_connection_options)
521
+
522
+ self.__connection = RealtimeConnection(
523
+ connect(
524
+ str(url),
525
+ user_agent_header=self.__client.user_agent,
526
+ additional_headers=_merge_mappings(
527
+ {
528
+ **auth_headers,
529
+ "OpenAI-Beta": "realtime=v1",
530
+ },
531
+ self.__extra_headers,
532
+ ),
533
+ **self.__websocket_connection_options,
534
+ )
535
+ )
536
+
537
+ return self.__connection
538
+
539
+ enter = __enter__
540
+
541
+ def _prepare_url(self) -> httpx.URL:
542
+ if self.__client.websocket_base_url is not None:
543
+ base_url = httpx.URL(self.__client.websocket_base_url)
544
+ else:
545
+ base_url = self.__client._base_url.copy_with(scheme="wss")
546
+
547
+ merge_raw_path = base_url.raw_path.rstrip(b"/") + b"/realtime"
548
+ return base_url.copy_with(raw_path=merge_raw_path)
549
+
550
+ def __exit__(
551
+ self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None
552
+ ) -> None:
553
+ if self.__connection is not None:
554
+ self.__connection.close()
555
+
556
+
557
+ class BaseRealtimeConnectionResource:
558
+ def __init__(self, connection: RealtimeConnection) -> None:
559
+ self._connection = connection
560
+
561
+
562
+ class RealtimeSessionResource(BaseRealtimeConnectionResource):
563
+ def update(self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN) -> None:
564
+ """Send this event to update the session’s default configuration.
565
+
566
+ The client may
567
+ send this event at any time to update the session configuration, and any
568
+ field may be updated at any time, except for "voice". The server will respond
569
+ with a `session.updated` event that shows the full effective configuration.
570
+ Only fields that are present are updated, thus the correct way to clear a
571
+ field like "instructions" is to pass an empty string.
572
+ """
573
+ self._connection.send(
574
+ cast(
575
+ RealtimeClientEventParam,
576
+ strip_not_given({"type": "session.update", "session": session, "event_id": event_id}),
577
+ )
578
+ )
579
+
580
+
581
+ class RealtimeResponseResource(BaseRealtimeConnectionResource):
582
+ def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None:
583
+ """Send this event to cancel an in-progress response.
584
+
585
+ The server will respond
586
+ with a `response.cancelled` event or an error if there is no response to
587
+ cancel.
588
+ """
589
+ self._connection.send(
590
+ cast(
591
+ RealtimeClientEventParam,
592
+ strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
593
+ )
594
+ )
595
+
596
+ def create(
597
+ self,
598
+ *,
599
+ event_id: str | NotGiven = NOT_GIVEN,
600
+ response: response_create_event_param.Response | NotGiven = NOT_GIVEN,
601
+ ) -> None:
602
+ """
603
+ This event instructs the server to create a Response, which means triggering
604
+ model inference. When in Server VAD mode, the server will create Responses
605
+ automatically.
606
+
607
+ A Response will include at least one Item, and may have two, in which case
608
+ the second will be a function call. These Items will be appended to the
609
+ conversation history.
610
+
611
+ The server will respond with a `response.created` event, events for Items
612
+ and content created, and finally a `response.done` event to indicate the
613
+ Response is complete.
614
+
615
+ The `response.create` event includes inference configuration like
616
+ `instructions`, and `temperature`. These fields will override the Session's
617
+ configuration for this Response only.
618
+ """
619
+ self._connection.send(
620
+ cast(
621
+ RealtimeClientEventParam,
622
+ strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
623
+ )
624
+ )
625
+
626
+
627
+ class RealtimeConversationResource(BaseRealtimeConnectionResource):
628
+ @cached_property
629
+ def item(self) -> RealtimeConversationItemResource:
630
+ return RealtimeConversationItemResource(self._connection)
631
+
632
+
633
+ class RealtimeConversationItemResource(BaseRealtimeConnectionResource):
634
+ def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
635
+ """Send this event when you want to remove any item from the conversation
636
+ history.
637
+
638
+ The server will respond with a `conversation.item.deleted` event,
639
+ unless the item does not exist in the conversation history, in which case the
640
+ server will respond with an error.
641
+ """
642
+ self._connection.send(
643
+ cast(
644
+ RealtimeClientEventParam,
645
+ strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}),
646
+ )
647
+ )
648
+
649
+ def create(
650
+ self,
651
+ *,
652
+ item: ConversationItemParam,
653
+ event_id: str | NotGiven = NOT_GIVEN,
654
+ previous_item_id: str | NotGiven = NOT_GIVEN,
655
+ ) -> None:
656
+ """
657
+ Add a new Item to the Conversation's context, including messages, function
658
+ calls, and function call responses. This event can be used both to populate a
659
+ "history" of the conversation and to add new items mid-stream, but has the
660
+ current limitation that it cannot populate assistant audio messages.
661
+
662
+ If successful, the server will respond with a `conversation.item.created`
663
+ event, otherwise an `error` event will be sent.
664
+ """
665
+ self._connection.send(
666
+ cast(
667
+ RealtimeClientEventParam,
668
+ strip_not_given(
669
+ {
670
+ "type": "conversation.item.create",
671
+ "item": item,
672
+ "event_id": event_id,
673
+ "previous_item_id": previous_item_id,
674
+ }
675
+ ),
676
+ )
677
+ )
678
+
679
+ def truncate(
680
+ self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN
681
+ ) -> None:
682
+ """Send this event to truncate a previous assistant message’s audio.
683
+
684
+ The server
685
+ will produce audio faster than realtime, so this event is useful when the user
686
+ interrupts to truncate audio that has already been sent to the client but not
687
+ yet played. This will synchronize the server's understanding of the audio with
688
+ the client's playback.
689
+
690
+ Truncating audio will delete the server-side text transcript to ensure there
691
+ is not text in the context that hasn't been heard by the user.
692
+
693
+ If successful, the server will respond with a `conversation.item.truncated`
694
+ event.
695
+ """
696
+ self._connection.send(
697
+ cast(
698
+ RealtimeClientEventParam,
699
+ strip_not_given(
700
+ {
701
+ "type": "conversation.item.truncate",
702
+ "audio_end_ms": audio_end_ms,
703
+ "content_index": content_index,
704
+ "item_id": item_id,
705
+ "event_id": event_id,
706
+ }
707
+ ),
708
+ )
709
+ )
710
+
711
+
712
+ class RealtimeInputAudioBufferResource(BaseRealtimeConnectionResource):
713
+ def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
714
+ """Send this event to clear the audio bytes in the buffer.
715
+
716
+ The server will
717
+ respond with an `input_audio_buffer.cleared` event.
718
+ """
719
+ self._connection.send(
720
+ cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
721
+ )
722
+
723
+ def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
724
+ """
725
+ Send this event to commit the user input audio buffer, which will create a
726
+ new user message item in the conversation. This event will produce an error
727
+ if the input audio buffer is empty. When in Server VAD mode, the client does
728
+ not need to send this event, the server will commit the audio buffer
729
+ automatically.
730
+
731
+ Committing the input audio buffer will trigger input audio transcription
732
+ (if enabled in session configuration), but it will not create a response
733
+ from the model. The server will respond with an `input_audio_buffer.committed`
734
+ event.
735
+ """
736
+ self._connection.send(
737
+ cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
738
+ )
739
+
740
+ def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
741
+ """Send this event to append audio bytes to the input audio buffer.
742
+
743
+ The audio
744
+ buffer is temporary storage you can write to and later commit. In Server VAD
745
+ mode, the audio buffer is used to detect speech and the server will decide
746
+ when to commit. When Server VAD is disabled, you must commit the audio buffer
747
+ manually.
748
+
749
+ The client may choose how much audio to place in each event up to a maximum
750
+ of 15 MiB, for example streaming smaller chunks from the client may allow the
751
+ VAD to be more responsive. Unlike made other client events, the server will
752
+ not send a confirmation response to this event.
753
+ """
754
+ self._connection.send(
755
+ cast(
756
+ RealtimeClientEventParam,
757
+ strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
758
+ )
759
+ )
760
+
761
+
762
+ class BaseAsyncRealtimeConnectionResource:
763
+ def __init__(self, connection: AsyncRealtimeConnection) -> None:
764
+ self._connection = connection
765
+
766
+
767
+ class AsyncRealtimeSessionResource(BaseAsyncRealtimeConnectionResource):
768
+ async def update(
769
+ self, *, session: session_update_event_param.Session, event_id: str | NotGiven = NOT_GIVEN
770
+ ) -> None:
771
+ """Send this event to update the session’s default configuration.
772
+
773
+ The client may
774
+ send this event at any time to update the session configuration, and any
775
+ field may be updated at any time, except for "voice". The server will respond
776
+ with a `session.updated` event that shows the full effective configuration.
777
+ Only fields that are present are updated, thus the correct way to clear a
778
+ field like "instructions" is to pass an empty string.
779
+ """
780
+ await self._connection.send(
781
+ cast(
782
+ RealtimeClientEventParam,
783
+ strip_not_given({"type": "session.update", "session": session, "event_id": event_id}),
784
+ )
785
+ )
786
+
787
+
788
+ class AsyncRealtimeResponseResource(BaseAsyncRealtimeConnectionResource):
789
+ async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None:
790
+ """Send this event to cancel an in-progress response.
791
+
792
+ The server will respond
793
+ with a `response.cancelled` event or an error if there is no response to
794
+ cancel.
795
+ """
796
+ await self._connection.send(
797
+ cast(
798
+ RealtimeClientEventParam,
799
+ strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
800
+ )
801
+ )
802
+
803
+ async def create(
804
+ self,
805
+ *,
806
+ event_id: str | NotGiven = NOT_GIVEN,
807
+ response: response_create_event_param.Response | NotGiven = NOT_GIVEN,
808
+ ) -> None:
809
+ """
810
+ This event instructs the server to create a Response, which means triggering
811
+ model inference. When in Server VAD mode, the server will create Responses
812
+ automatically.
813
+
814
+ A Response will include at least one Item, and may have two, in which case
815
+ the second will be a function call. These Items will be appended to the
816
+ conversation history.
817
+
818
+ The server will respond with a `response.created` event, events for Items
819
+ and content created, and finally a `response.done` event to indicate the
820
+ Response is complete.
821
+
822
+ The `response.create` event includes inference configuration like
823
+ `instructions`, and `temperature`. These fields will override the Session's
824
+ configuration for this Response only.
825
+ """
826
+ await self._connection.send(
827
+ cast(
828
+ RealtimeClientEventParam,
829
+ strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
830
+ )
831
+ )
832
+
833
+
834
+ class AsyncRealtimeConversationResource(BaseAsyncRealtimeConnectionResource):
835
+ @cached_property
836
+ def item(self) -> AsyncRealtimeConversationItemResource:
837
+ return AsyncRealtimeConversationItemResource(self._connection)
838
+
839
+
840
+ class AsyncRealtimeConversationItemResource(BaseAsyncRealtimeConnectionResource):
841
+ async def delete(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
842
+ """Send this event when you want to remove any item from the conversation
843
+ history.
844
+
845
+ The server will respond with a `conversation.item.deleted` event,
846
+ unless the item does not exist in the conversation history, in which case the
847
+ server will respond with an error.
848
+ """
849
+ await self._connection.send(
850
+ cast(
851
+ RealtimeClientEventParam,
852
+ strip_not_given({"type": "conversation.item.delete", "item_id": item_id, "event_id": event_id}),
853
+ )
854
+ )
855
+
856
+ async def create(
857
+ self,
858
+ *,
859
+ item: ConversationItemParam,
860
+ event_id: str | NotGiven = NOT_GIVEN,
861
+ previous_item_id: str | NotGiven = NOT_GIVEN,
862
+ ) -> None:
863
+ """
864
+ Add a new Item to the Conversation's context, including messages, function
865
+ calls, and function call responses. This event can be used both to populate a
866
+ "history" of the conversation and to add new items mid-stream, but has the
867
+ current limitation that it cannot populate assistant audio messages.
868
+
869
+ If successful, the server will respond with a `conversation.item.created`
870
+ event, otherwise an `error` event will be sent.
871
+ """
872
+ await self._connection.send(
873
+ cast(
874
+ RealtimeClientEventParam,
875
+ strip_not_given(
876
+ {
877
+ "type": "conversation.item.create",
878
+ "item": item,
879
+ "event_id": event_id,
880
+ "previous_item_id": previous_item_id,
881
+ }
882
+ ),
883
+ )
884
+ )
885
+
886
+ async def truncate(
887
+ self, *, audio_end_ms: int, content_index: int, item_id: str, event_id: str | NotGiven = NOT_GIVEN
888
+ ) -> None:
889
+ """Send this event to truncate a previous assistant message’s audio.
890
+
891
+ The server
892
+ will produce audio faster than realtime, so this event is useful when the user
893
+ interrupts to truncate audio that has already been sent to the client but not
894
+ yet played. This will synchronize the server's understanding of the audio with
895
+ the client's playback.
896
+
897
+ Truncating audio will delete the server-side text transcript to ensure there
898
+ is not text in the context that hasn't been heard by the user.
899
+
900
+ If successful, the server will respond with a `conversation.item.truncated`
901
+ event.
902
+ """
903
+ await self._connection.send(
904
+ cast(
905
+ RealtimeClientEventParam,
906
+ strip_not_given(
907
+ {
908
+ "type": "conversation.item.truncate",
909
+ "audio_end_ms": audio_end_ms,
910
+ "content_index": content_index,
911
+ "item_id": item_id,
912
+ "event_id": event_id,
913
+ }
914
+ ),
915
+ )
916
+ )
917
+
918
+
919
+ class AsyncRealtimeInputAudioBufferResource(BaseAsyncRealtimeConnectionResource):
920
+ async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
921
+ """Send this event to clear the audio bytes in the buffer.
922
+
923
+ The server will
924
+ respond with an `input_audio_buffer.cleared` event.
925
+ """
926
+ await self._connection.send(
927
+ cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
928
+ )
929
+
930
+ async def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
931
+ """
932
+ Send this event to commit the user input audio buffer, which will create a
933
+ new user message item in the conversation. This event will produce an error
934
+ if the input audio buffer is empty. When in Server VAD mode, the client does
935
+ not need to send this event, the server will commit the audio buffer
936
+ automatically.
937
+
938
+ Committing the input audio buffer will trigger input audio transcription
939
+ (if enabled in session configuration), but it will not create a response
940
+ from the model. The server will respond with an `input_audio_buffer.committed`
941
+ event.
942
+ """
943
+ await self._connection.send(
944
+ cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
945
+ )
946
+
947
+ async def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
948
+ """Send this event to append audio bytes to the input audio buffer.
949
+
950
+ The audio
951
+ buffer is temporary storage you can write to and later commit. In Server VAD
952
+ mode, the audio buffer is used to detect speech and the server will decide
953
+ when to commit. When Server VAD is disabled, you must commit the audio buffer
954
+ manually.
955
+
956
+ The client may choose how much audio to place in each event up to a maximum
957
+ of 15 MiB, for example streaming smaller chunks from the client may allow the
958
+ VAD to be more responsive. Unlike made other client events, the server will
959
+ not send a confirmation response to this event.
960
+ """
961
+ await self._connection.send(
962
+ cast(
963
+ RealtimeClientEventParam,
964
+ strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
965
+ )
966
+ )
.venv/lib/python3.11/site-packages/openai/resources/beta/realtime/sessions.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List, Union, Iterable
6
+ from typing_extensions import Literal
7
+
8
+ import httpx
9
+
10
+ from .... import _legacy_response
11
+ from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
12
+ from ...._utils import (
13
+ maybe_transform,
14
+ async_maybe_transform,
15
+ )
16
+ from ...._compat import cached_property
17
+ from ...._resource import SyncAPIResource, AsyncAPIResource
18
+ from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
19
+ from ...._base_client import make_request_options
20
+ from ....types.beta.realtime import session_create_params
21
+ from ....types.beta.realtime.session_create_response import SessionCreateResponse
22
+
23
+ __all__ = ["Sessions", "AsyncSessions"]
24
+
25
+
26
+ class Sessions(SyncAPIResource):
27
+ @cached_property
28
+ def with_raw_response(self) -> SessionsWithRawResponse:
29
+ """
30
+ This property can be used as a prefix for any HTTP method call to return
31
+ the raw response object instead of the parsed content.
32
+
33
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
34
+ """
35
+ return SessionsWithRawResponse(self)
36
+
37
+ @cached_property
38
+ def with_streaming_response(self) -> SessionsWithStreamingResponse:
39
+ """
40
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
41
+
42
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
43
+ """
44
+ return SessionsWithStreamingResponse(self)
45
+
46
+ def create(
47
+ self,
48
+ *,
49
+ input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
50
+ input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
51
+ instructions: str | NotGiven = NOT_GIVEN,
52
+ max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
53
+ modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
54
+ model: Literal[
55
+ "gpt-4o-realtime-preview",
56
+ "gpt-4o-realtime-preview-2024-10-01",
57
+ "gpt-4o-realtime-preview-2024-12-17",
58
+ "gpt-4o-mini-realtime-preview",
59
+ "gpt-4o-mini-realtime-preview-2024-12-17",
60
+ ]
61
+ | NotGiven = NOT_GIVEN,
62
+ output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
63
+ temperature: float | NotGiven = NOT_GIVEN,
64
+ tool_choice: str | NotGiven = NOT_GIVEN,
65
+ tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
66
+ turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
67
+ voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN,
68
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
69
+ # The extra values given here take precedence over values defined on the client or passed to this method.
70
+ extra_headers: Headers | None = None,
71
+ extra_query: Query | None = None,
72
+ extra_body: Body | None = None,
73
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
74
+ ) -> SessionCreateResponse:
75
+ """
76
+ Create an ephemeral API token for use in client-side applications with the
77
+ Realtime API. Can be configured with the same session parameters as the
78
+ `session.update` client event.
79
+
80
+ It responds with a session object, plus a `client_secret` key which contains a
81
+ usable ephemeral API token that can be used to authenticate browser clients for
82
+ the Realtime API.
83
+
84
+ Args:
85
+ input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
86
+ `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
87
+ (mono), and little-endian byte order.
88
+
89
+ input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
90
+ `null` to turn off once on. Input audio transcription is not native to the
91
+ model, since the model consumes audio directly. Transcription runs
92
+ asynchronously through
93
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
94
+ and should be treated as rough guidance rather than the representation
95
+ understood by the model. The client can optionally set the language and prompt
96
+ for transcription, these fields will be passed to the Whisper API.
97
+
98
+ instructions: The default system instructions (i.e. system message) prepended to model calls.
99
+ This field allows the client to guide the model on desired responses. The model
100
+ can be instructed on response content and format, (e.g. "be extremely succinct",
101
+ "act friendly", "here are examples of good responses") and on audio behavior
102
+ (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
103
+ instructions are not guaranteed to be followed by the model, but they provide
104
+ guidance to the model on the desired behavior.
105
+
106
+ Note that the server sets default instructions which will be used if this field
107
+ is not set and are visible in the `session.created` event at the start of the
108
+ session.
109
+
110
+ max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
111
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
112
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
113
+
114
+ modalities: The set of modalities the model can respond with. To disable audio, set this to
115
+ ["text"].
116
+
117
+ model: The Realtime model used for this session.
118
+
119
+ output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
120
+ For `pcm16`, output audio is sampled at a rate of 24kHz.
121
+
122
+ temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
123
+
124
+ tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
125
+ a function.
126
+
127
+ tools: Tools (functions) available to the model.
128
+
129
+ turn_detection: Configuration for turn detection. Can be set to `null` to turn off. Server VAD
130
+ means that the model will detect the start and end of speech based on audio
131
+ volume and respond at the end of user speech.
132
+
133
+ voice: The voice the model uses to respond. Voice cannot be changed during the session
134
+ once the model has responded with audio at least once. Current voice options are
135
+ `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
136
+
137
+ extra_headers: Send extra headers
138
+
139
+ extra_query: Add additional query parameters to the request
140
+
141
+ extra_body: Add additional JSON properties to the request
142
+
143
+ timeout: Override the client-level default timeout for this request, in seconds
144
+ """
145
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
146
+ return self._post(
147
+ "/realtime/sessions",
148
+ body=maybe_transform(
149
+ {
150
+ "input_audio_format": input_audio_format,
151
+ "input_audio_transcription": input_audio_transcription,
152
+ "instructions": instructions,
153
+ "max_response_output_tokens": max_response_output_tokens,
154
+ "modalities": modalities,
155
+ "model": model,
156
+ "output_audio_format": output_audio_format,
157
+ "temperature": temperature,
158
+ "tool_choice": tool_choice,
159
+ "tools": tools,
160
+ "turn_detection": turn_detection,
161
+ "voice": voice,
162
+ },
163
+ session_create_params.SessionCreateParams,
164
+ ),
165
+ options=make_request_options(
166
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
167
+ ),
168
+ cast_to=SessionCreateResponse,
169
+ )
170
+
171
+
172
+ class AsyncSessions(AsyncAPIResource):
173
+ @cached_property
174
+ def with_raw_response(self) -> AsyncSessionsWithRawResponse:
175
+ """
176
+ This property can be used as a prefix for any HTTP method call to return
177
+ the raw response object instead of the parsed content.
178
+
179
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
180
+ """
181
+ return AsyncSessionsWithRawResponse(self)
182
+
183
+ @cached_property
184
+ def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
185
+ """
186
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
187
+
188
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
189
+ """
190
+ return AsyncSessionsWithStreamingResponse(self)
191
+
192
+ async def create(
193
+ self,
194
+ *,
195
+ input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
196
+ input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
197
+ instructions: str | NotGiven = NOT_GIVEN,
198
+ max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
199
+ modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
200
+ model: Literal[
201
+ "gpt-4o-realtime-preview",
202
+ "gpt-4o-realtime-preview-2024-10-01",
203
+ "gpt-4o-realtime-preview-2024-12-17",
204
+ "gpt-4o-mini-realtime-preview",
205
+ "gpt-4o-mini-realtime-preview-2024-12-17",
206
+ ]
207
+ | NotGiven = NOT_GIVEN,
208
+ output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
209
+ temperature: float | NotGiven = NOT_GIVEN,
210
+ tool_choice: str | NotGiven = NOT_GIVEN,
211
+ tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN,
212
+ turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN,
213
+ voice: Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"] | NotGiven = NOT_GIVEN,
214
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
215
+ # The extra values given here take precedence over values defined on the client or passed to this method.
216
+ extra_headers: Headers | None = None,
217
+ extra_query: Query | None = None,
218
+ extra_body: Body | None = None,
219
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
220
+ ) -> SessionCreateResponse:
221
+ """
222
+ Create an ephemeral API token for use in client-side applications with the
223
+ Realtime API. Can be configured with the same session parameters as the
224
+ `session.update` client event.
225
+
226
+ It responds with a session object, plus a `client_secret` key which contains a
227
+ usable ephemeral API token that can be used to authenticate browser clients for
228
+ the Realtime API.
229
+
230
+ Args:
231
+ input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
232
+ `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
233
+ (mono), and little-endian byte order.
234
+
235
+ input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
236
+ `null` to turn off once on. Input audio transcription is not native to the
237
+ model, since the model consumes audio directly. Transcription runs
238
+ asynchronously through
239
+ [OpenAI Whisper transcription](https://platform.openai.com/docs/api-reference/audio/createTranscription)
240
+ and should be treated as rough guidance rather than the representation
241
+ understood by the model. The client can optionally set the language and prompt
242
+ for transcription, these fields will be passed to the Whisper API.
243
+
244
+ instructions: The default system instructions (i.e. system message) prepended to model calls.
245
+ This field allows the client to guide the model on desired responses. The model
246
+ can be instructed on response content and format, (e.g. "be extremely succinct",
247
+ "act friendly", "here are examples of good responses") and on audio behavior
248
+ (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
249
+ instructions are not guaranteed to be followed by the model, but they provide
250
+ guidance to the model on the desired behavior.
251
+
252
+ Note that the server sets default instructions which will be used if this field
253
+ is not set and are visible in the `session.created` event at the start of the
254
+ session.
255
+
256
+ max_response_output_tokens: Maximum number of output tokens for a single assistant response, inclusive of
257
+ tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
258
+ `inf` for the maximum available tokens for a given model. Defaults to `inf`.
259
+
260
+ modalities: The set of modalities the model can respond with. To disable audio, set this to
261
+ ["text"].
262
+
263
+ model: The Realtime model used for this session.
264
+
265
+ output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
266
+ For `pcm16`, output audio is sampled at a rate of 24kHz.
267
+
268
+ temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
269
+
270
+ tool_choice: How the model chooses tools. Options are `auto`, `none`, `required`, or specify
271
+ a function.
272
+
273
+ tools: Tools (functions) available to the model.
274
+
275
+ turn_detection: Configuration for turn detection. Can be set to `null` to turn off. Server VAD
276
+ means that the model will detect the start and end of speech based on audio
277
+ volume and respond at the end of user speech.
278
+
279
+ voice: The voice the model uses to respond. Voice cannot be changed during the session
280
+ once the model has responded with audio at least once. Current voice options are
281
+ `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
282
+
283
+ extra_headers: Send extra headers
284
+
285
+ extra_query: Add additional query parameters to the request
286
+
287
+ extra_body: Add additional JSON properties to the request
288
+
289
+ timeout: Override the client-level default timeout for this request, in seconds
290
+ """
291
+ extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
292
+ return await self._post(
293
+ "/realtime/sessions",
294
+ body=await async_maybe_transform(
295
+ {
296
+ "input_audio_format": input_audio_format,
297
+ "input_audio_transcription": input_audio_transcription,
298
+ "instructions": instructions,
299
+ "max_response_output_tokens": max_response_output_tokens,
300
+ "modalities": modalities,
301
+ "model": model,
302
+ "output_audio_format": output_audio_format,
303
+ "temperature": temperature,
304
+ "tool_choice": tool_choice,
305
+ "tools": tools,
306
+ "turn_detection": turn_detection,
307
+ "voice": voice,
308
+ },
309
+ session_create_params.SessionCreateParams,
310
+ ),
311
+ options=make_request_options(
312
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
313
+ ),
314
+ cast_to=SessionCreateResponse,
315
+ )
316
+
317
+
318
+ class SessionsWithRawResponse:
319
+ def __init__(self, sessions: Sessions) -> None:
320
+ self._sessions = sessions
321
+
322
+ self.create = _legacy_response.to_raw_response_wrapper(
323
+ sessions.create,
324
+ )
325
+
326
+
327
+ class AsyncSessionsWithRawResponse:
328
+ def __init__(self, sessions: AsyncSessions) -> None:
329
+ self._sessions = sessions
330
+
331
+ self.create = _legacy_response.async_to_raw_response_wrapper(
332
+ sessions.create,
333
+ )
334
+
335
+
336
+ class SessionsWithStreamingResponse:
337
+ def __init__(self, sessions: Sessions) -> None:
338
+ self._sessions = sessions
339
+
340
+ self.create = to_streamed_response_wrapper(
341
+ sessions.create,
342
+ )
343
+
344
+
345
+ class AsyncSessionsWithStreamingResponse:
346
+ def __init__(self, sessions: AsyncSessions) -> None:
347
+ self._sessions = sessions
348
+
349
+ self.create = async_to_streamed_response_wrapper(
350
+ sessions.create,
351
+ )
.venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from .files import (
4
+ Files,
5
+ AsyncFiles,
6
+ FilesWithRawResponse,
7
+ AsyncFilesWithRawResponse,
8
+ FilesWithStreamingResponse,
9
+ AsyncFilesWithStreamingResponse,
10
+ )
11
+ from .file_batches import (
12
+ FileBatches,
13
+ AsyncFileBatches,
14
+ FileBatchesWithRawResponse,
15
+ AsyncFileBatchesWithRawResponse,
16
+ FileBatchesWithStreamingResponse,
17
+ AsyncFileBatchesWithStreamingResponse,
18
+ )
19
+ from .vector_stores import (
20
+ VectorStores,
21
+ AsyncVectorStores,
22
+ VectorStoresWithRawResponse,
23
+ AsyncVectorStoresWithRawResponse,
24
+ VectorStoresWithStreamingResponse,
25
+ AsyncVectorStoresWithStreamingResponse,
26
+ )
27
+
28
+ __all__ = [
29
+ "Files",
30
+ "AsyncFiles",
31
+ "FilesWithRawResponse",
32
+ "AsyncFilesWithRawResponse",
33
+ "FilesWithStreamingResponse",
34
+ "AsyncFilesWithStreamingResponse",
35
+ "FileBatches",
36
+ "AsyncFileBatches",
37
+ "FileBatchesWithRawResponse",
38
+ "AsyncFileBatchesWithRawResponse",
39
+ "FileBatchesWithStreamingResponse",
40
+ "AsyncFileBatchesWithStreamingResponse",
41
+ "VectorStores",
42
+ "AsyncVectorStores",
43
+ "VectorStoresWithRawResponse",
44
+ "AsyncVectorStoresWithRawResponse",
45
+ "VectorStoresWithStreamingResponse",
46
+ "AsyncVectorStoresWithStreamingResponse",
47
+ ]
.venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (1.26 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/file_batches.cpython-311.pyc ADDED
Binary file (26.4 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/files.cpython-311.pyc ADDED
Binary file (22.5 kB). View file
 
.venv/lib/python3.11/site-packages/openai/resources/beta/vector_stores/__pycache__/vector_stores.cpython-311.pyc ADDED
Binary file (23.1 kB). View file