koichi12 commited on
Commit
8b1c684
·
verified ·
1 Parent(s): 5ddb512

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .venv/lib/python3.11/site-packages/google/ai/generativelanguage/__init__.py +426 -0
  3. .venv/lib/python3.11/site-packages/google/ai/generativelanguage/__pycache__/__init__.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/google/ai/generativelanguage/__pycache__/gapic_version.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/google/ai/generativelanguage/gapic_version.py +16 -0
  6. .venv/lib/python3.11/site-packages/google/ai/generativelanguage/py.typed +2 -0
  7. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/retriever_service/__pycache__/client.cpython-311.pyc +3 -0
  8. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/__init__.py +387 -0
  9. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/gapic_metadata.json +1005 -0
  10. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/gapic_version.py +16 -0
  11. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/py.typed +2 -0
  12. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/__pycache__/generative_service.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/cache_service.py +167 -0
  14. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/cached_content.py +182 -0
  15. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/citation.py +101 -0
  16. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/content.py +819 -0
  17. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/discuss_service.py +356 -0
  18. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/file.py +174 -0
  19. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/file_service.py +145 -0
  20. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/generative_service.py +1751 -0
  21. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/model.py +171 -0
  22. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/model_service.py +332 -0
  23. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/permission.py +141 -0
  24. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/permission_service.py +220 -0
  25. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/prediction_service.py +79 -0
  26. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/retriever.py +411 -0
  27. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/retriever_service.py +793 -0
  28. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/safety.py +276 -0
  29. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/text_service.py +441 -0
  30. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/tuned_model.py +442 -0
  31. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/__pycache__/__init__.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/__pycache__/gapic_version.cpython-311.pyc +0 -0
  33. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__init__.py +22 -0
  34. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__pycache__/__init__.cpython-311.pyc +0 -0
  35. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__pycache__/async_client.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__pycache__/client.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/async_client.py +628 -0
  38. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/client.py +1016 -0
  39. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__init__.py +36 -0
  40. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  41. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  45. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  46. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/base.py +199 -0
  47. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/grpc.py +394 -0
  48. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/grpc_asyncio.py +439 -0
  49. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/rest.py +578 -0
  50. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/rest_base.py +205 -0
.gitattributes CHANGED
@@ -192,3 +192,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/torch/_inductor/_
192
  .venv/lib/python3.11/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
193
  .venv/lib/python3.11/site-packages/pillow.libs/libjpeg-77ae51ab.so.62.4.0 filter=lfs diff=lfs merge=lfs -text
194
  .venv/lib/python3.11/site-packages/pillow.libs/libharfbuzz-89381d8f.so.0.60850.0 filter=lfs diff=lfs merge=lfs -text
 
 
192
  .venv/lib/python3.11/site-packages/pillow.libs/libpng16-58efbb84.so.16.43.0 filter=lfs diff=lfs merge=lfs -text
193
  .venv/lib/python3.11/site-packages/pillow.libs/libjpeg-77ae51ab.so.62.4.0 filter=lfs diff=lfs merge=lfs -text
194
  .venv/lib/python3.11/site-packages/pillow.libs/libharfbuzz-89381d8f.so.0.60850.0 filter=lfs diff=lfs merge=lfs -text
195
+ .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/retriever_service/__pycache__/client.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/google/ai/generativelanguage/__init__.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from google.ai.generativelanguage import gapic_version as package_version
17
+
18
+ __version__ = package_version.__version__
19
+
20
+
21
+ from google.ai.generativelanguage_v1beta.services.cache_service.async_client import (
22
+ CacheServiceAsyncClient,
23
+ )
24
+ from google.ai.generativelanguage_v1beta.services.cache_service.client import (
25
+ CacheServiceClient,
26
+ )
27
+ from google.ai.generativelanguage_v1beta.services.discuss_service.async_client import (
28
+ DiscussServiceAsyncClient,
29
+ )
30
+ from google.ai.generativelanguage_v1beta.services.discuss_service.client import (
31
+ DiscussServiceClient,
32
+ )
33
+ from google.ai.generativelanguage_v1beta.services.file_service.async_client import (
34
+ FileServiceAsyncClient,
35
+ )
36
+ from google.ai.generativelanguage_v1beta.services.file_service.client import (
37
+ FileServiceClient,
38
+ )
39
+ from google.ai.generativelanguage_v1beta.services.generative_service.async_client import (
40
+ GenerativeServiceAsyncClient,
41
+ )
42
+ from google.ai.generativelanguage_v1beta.services.generative_service.client import (
43
+ GenerativeServiceClient,
44
+ )
45
+ from google.ai.generativelanguage_v1beta.services.model_service.async_client import (
46
+ ModelServiceAsyncClient,
47
+ )
48
+ from google.ai.generativelanguage_v1beta.services.model_service.client import (
49
+ ModelServiceClient,
50
+ )
51
+ from google.ai.generativelanguage_v1beta.services.permission_service.async_client import (
52
+ PermissionServiceAsyncClient,
53
+ )
54
+ from google.ai.generativelanguage_v1beta.services.permission_service.client import (
55
+ PermissionServiceClient,
56
+ )
57
+ from google.ai.generativelanguage_v1beta.services.prediction_service.async_client import (
58
+ PredictionServiceAsyncClient,
59
+ )
60
+ from google.ai.generativelanguage_v1beta.services.prediction_service.client import (
61
+ PredictionServiceClient,
62
+ )
63
+ from google.ai.generativelanguage_v1beta.services.retriever_service.async_client import (
64
+ RetrieverServiceAsyncClient,
65
+ )
66
+ from google.ai.generativelanguage_v1beta.services.retriever_service.client import (
67
+ RetrieverServiceClient,
68
+ )
69
+ from google.ai.generativelanguage_v1beta.services.text_service.async_client import (
70
+ TextServiceAsyncClient,
71
+ )
72
+ from google.ai.generativelanguage_v1beta.services.text_service.client import (
73
+ TextServiceClient,
74
+ )
75
+ from google.ai.generativelanguage_v1beta.types.cache_service import (
76
+ CreateCachedContentRequest,
77
+ DeleteCachedContentRequest,
78
+ GetCachedContentRequest,
79
+ ListCachedContentsRequest,
80
+ ListCachedContentsResponse,
81
+ UpdateCachedContentRequest,
82
+ )
83
+ from google.ai.generativelanguage_v1beta.types.cached_content import CachedContent
84
+ from google.ai.generativelanguage_v1beta.types.citation import (
85
+ CitationMetadata,
86
+ CitationSource,
87
+ )
88
+ from google.ai.generativelanguage_v1beta.types.content import (
89
+ Blob,
90
+ CodeExecution,
91
+ CodeExecutionResult,
92
+ Content,
93
+ DynamicRetrievalConfig,
94
+ ExecutableCode,
95
+ FileData,
96
+ FunctionCall,
97
+ FunctionCallingConfig,
98
+ FunctionDeclaration,
99
+ FunctionResponse,
100
+ GoogleSearchRetrieval,
101
+ GroundingPassage,
102
+ GroundingPassages,
103
+ Part,
104
+ Schema,
105
+ Tool,
106
+ ToolConfig,
107
+ Type,
108
+ )
109
+ from google.ai.generativelanguage_v1beta.types.discuss_service import (
110
+ CountMessageTokensRequest,
111
+ CountMessageTokensResponse,
112
+ Example,
113
+ GenerateMessageRequest,
114
+ GenerateMessageResponse,
115
+ Message,
116
+ MessagePrompt,
117
+ )
118
+ from google.ai.generativelanguage_v1beta.types.file import File, VideoMetadata
119
+ from google.ai.generativelanguage_v1beta.types.file_service import (
120
+ CreateFileRequest,
121
+ CreateFileResponse,
122
+ DeleteFileRequest,
123
+ GetFileRequest,
124
+ ListFilesRequest,
125
+ ListFilesResponse,
126
+ )
127
+ from google.ai.generativelanguage_v1beta.types.generative_service import (
128
+ AttributionSourceId,
129
+ BatchEmbedContentsRequest,
130
+ BatchEmbedContentsResponse,
131
+ Candidate,
132
+ ContentEmbedding,
133
+ CountTokensRequest,
134
+ CountTokensResponse,
135
+ EmbedContentRequest,
136
+ EmbedContentResponse,
137
+ GenerateAnswerRequest,
138
+ GenerateAnswerResponse,
139
+ GenerateContentRequest,
140
+ GenerateContentResponse,
141
+ GenerationConfig,
142
+ GroundingAttribution,
143
+ GroundingChunk,
144
+ GroundingMetadata,
145
+ GroundingSupport,
146
+ LogprobsResult,
147
+ PrebuiltVoiceConfig,
148
+ RetrievalMetadata,
149
+ SearchEntryPoint,
150
+ Segment,
151
+ SemanticRetrieverConfig,
152
+ SpeechConfig,
153
+ TaskType,
154
+ VoiceConfig,
155
+ )
156
+ from google.ai.generativelanguage_v1beta.types.model import Model
157
+ from google.ai.generativelanguage_v1beta.types.model_service import (
158
+ CreateTunedModelMetadata,
159
+ CreateTunedModelRequest,
160
+ DeleteTunedModelRequest,
161
+ GetModelRequest,
162
+ GetTunedModelRequest,
163
+ ListModelsRequest,
164
+ ListModelsResponse,
165
+ ListTunedModelsRequest,
166
+ ListTunedModelsResponse,
167
+ UpdateTunedModelRequest,
168
+ )
169
+ from google.ai.generativelanguage_v1beta.types.permission import Permission
170
+ from google.ai.generativelanguage_v1beta.types.permission_service import (
171
+ CreatePermissionRequest,
172
+ DeletePermissionRequest,
173
+ GetPermissionRequest,
174
+ ListPermissionsRequest,
175
+ ListPermissionsResponse,
176
+ TransferOwnershipRequest,
177
+ TransferOwnershipResponse,
178
+ UpdatePermissionRequest,
179
+ )
180
+ from google.ai.generativelanguage_v1beta.types.prediction_service import (
181
+ PredictRequest,
182
+ PredictResponse,
183
+ )
184
+ from google.ai.generativelanguage_v1beta.types.retriever import (
185
+ Chunk,
186
+ ChunkData,
187
+ Condition,
188
+ Corpus,
189
+ CustomMetadata,
190
+ Document,
191
+ MetadataFilter,
192
+ StringList,
193
+ )
194
+ from google.ai.generativelanguage_v1beta.types.retriever_service import (
195
+ BatchCreateChunksRequest,
196
+ BatchCreateChunksResponse,
197
+ BatchDeleteChunksRequest,
198
+ BatchUpdateChunksRequest,
199
+ BatchUpdateChunksResponse,
200
+ CreateChunkRequest,
201
+ CreateCorpusRequest,
202
+ CreateDocumentRequest,
203
+ DeleteChunkRequest,
204
+ DeleteCorpusRequest,
205
+ DeleteDocumentRequest,
206
+ GetChunkRequest,
207
+ GetCorpusRequest,
208
+ GetDocumentRequest,
209
+ ListChunksRequest,
210
+ ListChunksResponse,
211
+ ListCorporaRequest,
212
+ ListCorporaResponse,
213
+ ListDocumentsRequest,
214
+ ListDocumentsResponse,
215
+ QueryCorpusRequest,
216
+ QueryCorpusResponse,
217
+ QueryDocumentRequest,
218
+ QueryDocumentResponse,
219
+ RelevantChunk,
220
+ UpdateChunkRequest,
221
+ UpdateCorpusRequest,
222
+ UpdateDocumentRequest,
223
+ )
224
+ from google.ai.generativelanguage_v1beta.types.safety import (
225
+ ContentFilter,
226
+ HarmCategory,
227
+ SafetyFeedback,
228
+ SafetyRating,
229
+ SafetySetting,
230
+ )
231
+ from google.ai.generativelanguage_v1beta.types.text_service import (
232
+ BatchEmbedTextRequest,
233
+ BatchEmbedTextResponse,
234
+ CountTextTokensRequest,
235
+ CountTextTokensResponse,
236
+ Embedding,
237
+ EmbedTextRequest,
238
+ EmbedTextResponse,
239
+ GenerateTextRequest,
240
+ GenerateTextResponse,
241
+ TextCompletion,
242
+ TextPrompt,
243
+ )
244
+ from google.ai.generativelanguage_v1beta.types.tuned_model import (
245
+ Dataset,
246
+ Hyperparameters,
247
+ TunedModel,
248
+ TunedModelSource,
249
+ TuningExample,
250
+ TuningExamples,
251
+ TuningSnapshot,
252
+ TuningTask,
253
+ )
254
+
255
+ __all__ = (
256
+ "CacheServiceClient",
257
+ "CacheServiceAsyncClient",
258
+ "DiscussServiceClient",
259
+ "DiscussServiceAsyncClient",
260
+ "FileServiceClient",
261
+ "FileServiceAsyncClient",
262
+ "GenerativeServiceClient",
263
+ "GenerativeServiceAsyncClient",
264
+ "ModelServiceClient",
265
+ "ModelServiceAsyncClient",
266
+ "PermissionServiceClient",
267
+ "PermissionServiceAsyncClient",
268
+ "PredictionServiceClient",
269
+ "PredictionServiceAsyncClient",
270
+ "RetrieverServiceClient",
271
+ "RetrieverServiceAsyncClient",
272
+ "TextServiceClient",
273
+ "TextServiceAsyncClient",
274
+ "CreateCachedContentRequest",
275
+ "DeleteCachedContentRequest",
276
+ "GetCachedContentRequest",
277
+ "ListCachedContentsRequest",
278
+ "ListCachedContentsResponse",
279
+ "UpdateCachedContentRequest",
280
+ "CachedContent",
281
+ "CitationMetadata",
282
+ "CitationSource",
283
+ "Blob",
284
+ "CodeExecution",
285
+ "CodeExecutionResult",
286
+ "Content",
287
+ "DynamicRetrievalConfig",
288
+ "ExecutableCode",
289
+ "FileData",
290
+ "FunctionCall",
291
+ "FunctionCallingConfig",
292
+ "FunctionDeclaration",
293
+ "FunctionResponse",
294
+ "GoogleSearchRetrieval",
295
+ "GroundingPassage",
296
+ "GroundingPassages",
297
+ "Part",
298
+ "Schema",
299
+ "Tool",
300
+ "ToolConfig",
301
+ "Type",
302
+ "CountMessageTokensRequest",
303
+ "CountMessageTokensResponse",
304
+ "Example",
305
+ "GenerateMessageRequest",
306
+ "GenerateMessageResponse",
307
+ "Message",
308
+ "MessagePrompt",
309
+ "File",
310
+ "VideoMetadata",
311
+ "CreateFileRequest",
312
+ "CreateFileResponse",
313
+ "DeleteFileRequest",
314
+ "GetFileRequest",
315
+ "ListFilesRequest",
316
+ "ListFilesResponse",
317
+ "AttributionSourceId",
318
+ "BatchEmbedContentsRequest",
319
+ "BatchEmbedContentsResponse",
320
+ "Candidate",
321
+ "ContentEmbedding",
322
+ "CountTokensRequest",
323
+ "CountTokensResponse",
324
+ "EmbedContentRequest",
325
+ "EmbedContentResponse",
326
+ "GenerateAnswerRequest",
327
+ "GenerateAnswerResponse",
328
+ "GenerateContentRequest",
329
+ "GenerateContentResponse",
330
+ "GenerationConfig",
331
+ "GroundingAttribution",
332
+ "GroundingChunk",
333
+ "GroundingMetadata",
334
+ "GroundingSupport",
335
+ "LogprobsResult",
336
+ "PrebuiltVoiceConfig",
337
+ "RetrievalMetadata",
338
+ "SearchEntryPoint",
339
+ "Segment",
340
+ "SemanticRetrieverConfig",
341
+ "SpeechConfig",
342
+ "VoiceConfig",
343
+ "TaskType",
344
+ "Model",
345
+ "CreateTunedModelMetadata",
346
+ "CreateTunedModelRequest",
347
+ "DeleteTunedModelRequest",
348
+ "GetModelRequest",
349
+ "GetTunedModelRequest",
350
+ "ListModelsRequest",
351
+ "ListModelsResponse",
352
+ "ListTunedModelsRequest",
353
+ "ListTunedModelsResponse",
354
+ "UpdateTunedModelRequest",
355
+ "Permission",
356
+ "CreatePermissionRequest",
357
+ "DeletePermissionRequest",
358
+ "GetPermissionRequest",
359
+ "ListPermissionsRequest",
360
+ "ListPermissionsResponse",
361
+ "TransferOwnershipRequest",
362
+ "TransferOwnershipResponse",
363
+ "UpdatePermissionRequest",
364
+ "PredictRequest",
365
+ "PredictResponse",
366
+ "Chunk",
367
+ "ChunkData",
368
+ "Condition",
369
+ "Corpus",
370
+ "CustomMetadata",
371
+ "Document",
372
+ "MetadataFilter",
373
+ "StringList",
374
+ "BatchCreateChunksRequest",
375
+ "BatchCreateChunksResponse",
376
+ "BatchDeleteChunksRequest",
377
+ "BatchUpdateChunksRequest",
378
+ "BatchUpdateChunksResponse",
379
+ "CreateChunkRequest",
380
+ "CreateCorpusRequest",
381
+ "CreateDocumentRequest",
382
+ "DeleteChunkRequest",
383
+ "DeleteCorpusRequest",
384
+ "DeleteDocumentRequest",
385
+ "GetChunkRequest",
386
+ "GetCorpusRequest",
387
+ "GetDocumentRequest",
388
+ "ListChunksRequest",
389
+ "ListChunksResponse",
390
+ "ListCorporaRequest",
391
+ "ListCorporaResponse",
392
+ "ListDocumentsRequest",
393
+ "ListDocumentsResponse",
394
+ "QueryCorpusRequest",
395
+ "QueryCorpusResponse",
396
+ "QueryDocumentRequest",
397
+ "QueryDocumentResponse",
398
+ "RelevantChunk",
399
+ "UpdateChunkRequest",
400
+ "UpdateCorpusRequest",
401
+ "UpdateDocumentRequest",
402
+ "ContentFilter",
403
+ "SafetyFeedback",
404
+ "SafetyRating",
405
+ "SafetySetting",
406
+ "HarmCategory",
407
+ "BatchEmbedTextRequest",
408
+ "BatchEmbedTextResponse",
409
+ "CountTextTokensRequest",
410
+ "CountTextTokensResponse",
411
+ "Embedding",
412
+ "EmbedTextRequest",
413
+ "EmbedTextResponse",
414
+ "GenerateTextRequest",
415
+ "GenerateTextResponse",
416
+ "TextCompletion",
417
+ "TextPrompt",
418
+ "Dataset",
419
+ "Hyperparameters",
420
+ "TunedModel",
421
+ "TunedModelSource",
422
+ "TuningExample",
423
+ "TuningExamples",
424
+ "TuningSnapshot",
425
+ "TuningTask",
426
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (11.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage/__pycache__/gapic_version.cpython-311.pyc ADDED
Binary file (229 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage/gapic_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ __version__ = "0.6.15" # {x-release-please-version}
.venv/lib/python3.11/site-packages/google/ai/generativelanguage/py.typed ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Marker file for PEP 561.
2
+ # The google-ai-generativelanguage package uses inline types.
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/retriever_service/__pycache__/client.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43f1a6d645023b1fc80e9bf5e3f4a748e3d23de004dcb032d944047dea1aa539
3
+ size 111351
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/__init__.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from google.ai.generativelanguage_v1beta import gapic_version as package_version
17
+
18
+ __version__ = package_version.__version__
19
+
20
+
21
+ from .services.cache_service import CacheServiceAsyncClient, CacheServiceClient
22
+ from .services.discuss_service import DiscussServiceAsyncClient, DiscussServiceClient
23
+ from .services.file_service import FileServiceAsyncClient, FileServiceClient
24
+ from .services.generative_service import (
25
+ GenerativeServiceAsyncClient,
26
+ GenerativeServiceClient,
27
+ )
28
+ from .services.model_service import ModelServiceAsyncClient, ModelServiceClient
29
+ from .services.permission_service import (
30
+ PermissionServiceAsyncClient,
31
+ PermissionServiceClient,
32
+ )
33
+ from .services.prediction_service import (
34
+ PredictionServiceAsyncClient,
35
+ PredictionServiceClient,
36
+ )
37
+ from .services.retriever_service import (
38
+ RetrieverServiceAsyncClient,
39
+ RetrieverServiceClient,
40
+ )
41
+ from .services.text_service import TextServiceAsyncClient, TextServiceClient
42
+ from .types.cache_service import (
43
+ CreateCachedContentRequest,
44
+ DeleteCachedContentRequest,
45
+ GetCachedContentRequest,
46
+ ListCachedContentsRequest,
47
+ ListCachedContentsResponse,
48
+ UpdateCachedContentRequest,
49
+ )
50
+ from .types.cached_content import CachedContent
51
+ from .types.citation import CitationMetadata, CitationSource
52
+ from .types.content import (
53
+ Blob,
54
+ CodeExecution,
55
+ CodeExecutionResult,
56
+ Content,
57
+ DynamicRetrievalConfig,
58
+ ExecutableCode,
59
+ FileData,
60
+ FunctionCall,
61
+ FunctionCallingConfig,
62
+ FunctionDeclaration,
63
+ FunctionResponse,
64
+ GoogleSearchRetrieval,
65
+ GroundingPassage,
66
+ GroundingPassages,
67
+ Part,
68
+ Schema,
69
+ Tool,
70
+ ToolConfig,
71
+ Type,
72
+ )
73
+ from .types.discuss_service import (
74
+ CountMessageTokensRequest,
75
+ CountMessageTokensResponse,
76
+ Example,
77
+ GenerateMessageRequest,
78
+ GenerateMessageResponse,
79
+ Message,
80
+ MessagePrompt,
81
+ )
82
+ from .types.file import File, VideoMetadata
83
+ from .types.file_service import (
84
+ CreateFileRequest,
85
+ CreateFileResponse,
86
+ DeleteFileRequest,
87
+ GetFileRequest,
88
+ ListFilesRequest,
89
+ ListFilesResponse,
90
+ )
91
+ from .types.generative_service import (
92
+ AttributionSourceId,
93
+ BatchEmbedContentsRequest,
94
+ BatchEmbedContentsResponse,
95
+ Candidate,
96
+ ContentEmbedding,
97
+ CountTokensRequest,
98
+ CountTokensResponse,
99
+ EmbedContentRequest,
100
+ EmbedContentResponse,
101
+ GenerateAnswerRequest,
102
+ GenerateAnswerResponse,
103
+ GenerateContentRequest,
104
+ GenerateContentResponse,
105
+ GenerationConfig,
106
+ GroundingAttribution,
107
+ GroundingChunk,
108
+ GroundingMetadata,
109
+ GroundingSupport,
110
+ LogprobsResult,
111
+ PrebuiltVoiceConfig,
112
+ RetrievalMetadata,
113
+ SearchEntryPoint,
114
+ Segment,
115
+ SemanticRetrieverConfig,
116
+ SpeechConfig,
117
+ TaskType,
118
+ VoiceConfig,
119
+ )
120
+ from .types.model import Model
121
+ from .types.model_service import (
122
+ CreateTunedModelMetadata,
123
+ CreateTunedModelRequest,
124
+ DeleteTunedModelRequest,
125
+ GetModelRequest,
126
+ GetTunedModelRequest,
127
+ ListModelsRequest,
128
+ ListModelsResponse,
129
+ ListTunedModelsRequest,
130
+ ListTunedModelsResponse,
131
+ UpdateTunedModelRequest,
132
+ )
133
+ from .types.permission import Permission
134
+ from .types.permission_service import (
135
+ CreatePermissionRequest,
136
+ DeletePermissionRequest,
137
+ GetPermissionRequest,
138
+ ListPermissionsRequest,
139
+ ListPermissionsResponse,
140
+ TransferOwnershipRequest,
141
+ TransferOwnershipResponse,
142
+ UpdatePermissionRequest,
143
+ )
144
+ from .types.prediction_service import PredictRequest, PredictResponse
145
+ from .types.retriever import (
146
+ Chunk,
147
+ ChunkData,
148
+ Condition,
149
+ Corpus,
150
+ CustomMetadata,
151
+ Document,
152
+ MetadataFilter,
153
+ StringList,
154
+ )
155
+ from .types.retriever_service import (
156
+ BatchCreateChunksRequest,
157
+ BatchCreateChunksResponse,
158
+ BatchDeleteChunksRequest,
159
+ BatchUpdateChunksRequest,
160
+ BatchUpdateChunksResponse,
161
+ CreateChunkRequest,
162
+ CreateCorpusRequest,
163
+ CreateDocumentRequest,
164
+ DeleteChunkRequest,
165
+ DeleteCorpusRequest,
166
+ DeleteDocumentRequest,
167
+ GetChunkRequest,
168
+ GetCorpusRequest,
169
+ GetDocumentRequest,
170
+ ListChunksRequest,
171
+ ListChunksResponse,
172
+ ListCorporaRequest,
173
+ ListCorporaResponse,
174
+ ListDocumentsRequest,
175
+ ListDocumentsResponse,
176
+ QueryCorpusRequest,
177
+ QueryCorpusResponse,
178
+ QueryDocumentRequest,
179
+ QueryDocumentResponse,
180
+ RelevantChunk,
181
+ UpdateChunkRequest,
182
+ UpdateCorpusRequest,
183
+ UpdateDocumentRequest,
184
+ )
185
+ from .types.safety import (
186
+ ContentFilter,
187
+ HarmCategory,
188
+ SafetyFeedback,
189
+ SafetyRating,
190
+ SafetySetting,
191
+ )
192
+ from .types.text_service import (
193
+ BatchEmbedTextRequest,
194
+ BatchEmbedTextResponse,
195
+ CountTextTokensRequest,
196
+ CountTextTokensResponse,
197
+ Embedding,
198
+ EmbedTextRequest,
199
+ EmbedTextResponse,
200
+ GenerateTextRequest,
201
+ GenerateTextResponse,
202
+ TextCompletion,
203
+ TextPrompt,
204
+ )
205
+ from .types.tuned_model import (
206
+ Dataset,
207
+ Hyperparameters,
208
+ TunedModel,
209
+ TunedModelSource,
210
+ TuningExample,
211
+ TuningExamples,
212
+ TuningSnapshot,
213
+ TuningTask,
214
+ )
215
+
216
+ __all__ = (
217
+ "CacheServiceAsyncClient",
218
+ "DiscussServiceAsyncClient",
219
+ "FileServiceAsyncClient",
220
+ "GenerativeServiceAsyncClient",
221
+ "ModelServiceAsyncClient",
222
+ "PermissionServiceAsyncClient",
223
+ "PredictionServiceAsyncClient",
224
+ "RetrieverServiceAsyncClient",
225
+ "TextServiceAsyncClient",
226
+ "AttributionSourceId",
227
+ "BatchCreateChunksRequest",
228
+ "BatchCreateChunksResponse",
229
+ "BatchDeleteChunksRequest",
230
+ "BatchEmbedContentsRequest",
231
+ "BatchEmbedContentsResponse",
232
+ "BatchEmbedTextRequest",
233
+ "BatchEmbedTextResponse",
234
+ "BatchUpdateChunksRequest",
235
+ "BatchUpdateChunksResponse",
236
+ "Blob",
237
+ "CacheServiceClient",
238
+ "CachedContent",
239
+ "Candidate",
240
+ "Chunk",
241
+ "ChunkData",
242
+ "CitationMetadata",
243
+ "CitationSource",
244
+ "CodeExecution",
245
+ "CodeExecutionResult",
246
+ "Condition",
247
+ "Content",
248
+ "ContentEmbedding",
249
+ "ContentFilter",
250
+ "Corpus",
251
+ "CountMessageTokensRequest",
252
+ "CountMessageTokensResponse",
253
+ "CountTextTokensRequest",
254
+ "CountTextTokensResponse",
255
+ "CountTokensRequest",
256
+ "CountTokensResponse",
257
+ "CreateCachedContentRequest",
258
+ "CreateChunkRequest",
259
+ "CreateCorpusRequest",
260
+ "CreateDocumentRequest",
261
+ "CreateFileRequest",
262
+ "CreateFileResponse",
263
+ "CreatePermissionRequest",
264
+ "CreateTunedModelMetadata",
265
+ "CreateTunedModelRequest",
266
+ "CustomMetadata",
267
+ "Dataset",
268
+ "DeleteCachedContentRequest",
269
+ "DeleteChunkRequest",
270
+ "DeleteCorpusRequest",
271
+ "DeleteDocumentRequest",
272
+ "DeleteFileRequest",
273
+ "DeletePermissionRequest",
274
+ "DeleteTunedModelRequest",
275
+ "DiscussServiceClient",
276
+ "Document",
277
+ "DynamicRetrievalConfig",
278
+ "EmbedContentRequest",
279
+ "EmbedContentResponse",
280
+ "EmbedTextRequest",
281
+ "EmbedTextResponse",
282
+ "Embedding",
283
+ "Example",
284
+ "ExecutableCode",
285
+ "File",
286
+ "FileData",
287
+ "FileServiceClient",
288
+ "FunctionCall",
289
+ "FunctionCallingConfig",
290
+ "FunctionDeclaration",
291
+ "FunctionResponse",
292
+ "GenerateAnswerRequest",
293
+ "GenerateAnswerResponse",
294
+ "GenerateContentRequest",
295
+ "GenerateContentResponse",
296
+ "GenerateMessageRequest",
297
+ "GenerateMessageResponse",
298
+ "GenerateTextRequest",
299
+ "GenerateTextResponse",
300
+ "GenerationConfig",
301
+ "GenerativeServiceClient",
302
+ "GetCachedContentRequest",
303
+ "GetChunkRequest",
304
+ "GetCorpusRequest",
305
+ "GetDocumentRequest",
306
+ "GetFileRequest",
307
+ "GetModelRequest",
308
+ "GetPermissionRequest",
309
+ "GetTunedModelRequest",
310
+ "GoogleSearchRetrieval",
311
+ "GroundingAttribution",
312
+ "GroundingChunk",
313
+ "GroundingMetadata",
314
+ "GroundingPassage",
315
+ "GroundingPassages",
316
+ "GroundingSupport",
317
+ "HarmCategory",
318
+ "Hyperparameters",
319
+ "ListCachedContentsRequest",
320
+ "ListCachedContentsResponse",
321
+ "ListChunksRequest",
322
+ "ListChunksResponse",
323
+ "ListCorporaRequest",
324
+ "ListCorporaResponse",
325
+ "ListDocumentsRequest",
326
+ "ListDocumentsResponse",
327
+ "ListFilesRequest",
328
+ "ListFilesResponse",
329
+ "ListModelsRequest",
330
+ "ListModelsResponse",
331
+ "ListPermissionsRequest",
332
+ "ListPermissionsResponse",
333
+ "ListTunedModelsRequest",
334
+ "ListTunedModelsResponse",
335
+ "LogprobsResult",
336
+ "Message",
337
+ "MessagePrompt",
338
+ "MetadataFilter",
339
+ "Model",
340
+ "ModelServiceClient",
341
+ "Part",
342
+ "Permission",
343
+ "PermissionServiceClient",
344
+ "PrebuiltVoiceConfig",
345
+ "PredictRequest",
346
+ "PredictResponse",
347
+ "PredictionServiceClient",
348
+ "QueryCorpusRequest",
349
+ "QueryCorpusResponse",
350
+ "QueryDocumentRequest",
351
+ "QueryDocumentResponse",
352
+ "RelevantChunk",
353
+ "RetrievalMetadata",
354
+ "RetrieverServiceClient",
355
+ "SafetyFeedback",
356
+ "SafetyRating",
357
+ "SafetySetting",
358
+ "Schema",
359
+ "SearchEntryPoint",
360
+ "Segment",
361
+ "SemanticRetrieverConfig",
362
+ "SpeechConfig",
363
+ "StringList",
364
+ "TaskType",
365
+ "TextCompletion",
366
+ "TextPrompt",
367
+ "TextServiceClient",
368
+ "Tool",
369
+ "ToolConfig",
370
+ "TransferOwnershipRequest",
371
+ "TransferOwnershipResponse",
372
+ "TunedModel",
373
+ "TunedModelSource",
374
+ "TuningExample",
375
+ "TuningExamples",
376
+ "TuningSnapshot",
377
+ "TuningTask",
378
+ "Type",
379
+ "UpdateCachedContentRequest",
380
+ "UpdateChunkRequest",
381
+ "UpdateCorpusRequest",
382
+ "UpdateDocumentRequest",
383
+ "UpdatePermissionRequest",
384
+ "UpdateTunedModelRequest",
385
+ "VideoMetadata",
386
+ "VoiceConfig",
387
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/gapic_metadata.json ADDED
@@ -0,0 +1,1005 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods",
3
+ "language": "python",
4
+ "libraryPackage": "google.ai.generativelanguage_v1beta",
5
+ "protoPackage": "google.ai.generativelanguage.v1beta",
6
+ "schema": "1.0",
7
+ "services": {
8
+ "CacheService": {
9
+ "clients": {
10
+ "grpc": {
11
+ "libraryClient": "CacheServiceClient",
12
+ "rpcs": {
13
+ "CreateCachedContent": {
14
+ "methods": [
15
+ "create_cached_content"
16
+ ]
17
+ },
18
+ "DeleteCachedContent": {
19
+ "methods": [
20
+ "delete_cached_content"
21
+ ]
22
+ },
23
+ "GetCachedContent": {
24
+ "methods": [
25
+ "get_cached_content"
26
+ ]
27
+ },
28
+ "ListCachedContents": {
29
+ "methods": [
30
+ "list_cached_contents"
31
+ ]
32
+ },
33
+ "UpdateCachedContent": {
34
+ "methods": [
35
+ "update_cached_content"
36
+ ]
37
+ }
38
+ }
39
+ },
40
+ "grpc-async": {
41
+ "libraryClient": "CacheServiceAsyncClient",
42
+ "rpcs": {
43
+ "CreateCachedContent": {
44
+ "methods": [
45
+ "create_cached_content"
46
+ ]
47
+ },
48
+ "DeleteCachedContent": {
49
+ "methods": [
50
+ "delete_cached_content"
51
+ ]
52
+ },
53
+ "GetCachedContent": {
54
+ "methods": [
55
+ "get_cached_content"
56
+ ]
57
+ },
58
+ "ListCachedContents": {
59
+ "methods": [
60
+ "list_cached_contents"
61
+ ]
62
+ },
63
+ "UpdateCachedContent": {
64
+ "methods": [
65
+ "update_cached_content"
66
+ ]
67
+ }
68
+ }
69
+ },
70
+ "rest": {
71
+ "libraryClient": "CacheServiceClient",
72
+ "rpcs": {
73
+ "CreateCachedContent": {
74
+ "methods": [
75
+ "create_cached_content"
76
+ ]
77
+ },
78
+ "DeleteCachedContent": {
79
+ "methods": [
80
+ "delete_cached_content"
81
+ ]
82
+ },
83
+ "GetCachedContent": {
84
+ "methods": [
85
+ "get_cached_content"
86
+ ]
87
+ },
88
+ "ListCachedContents": {
89
+ "methods": [
90
+ "list_cached_contents"
91
+ ]
92
+ },
93
+ "UpdateCachedContent": {
94
+ "methods": [
95
+ "update_cached_content"
96
+ ]
97
+ }
98
+ }
99
+ }
100
+ }
101
+ },
102
+ "DiscussService": {
103
+ "clients": {
104
+ "grpc": {
105
+ "libraryClient": "DiscussServiceClient",
106
+ "rpcs": {
107
+ "CountMessageTokens": {
108
+ "methods": [
109
+ "count_message_tokens"
110
+ ]
111
+ },
112
+ "GenerateMessage": {
113
+ "methods": [
114
+ "generate_message"
115
+ ]
116
+ }
117
+ }
118
+ },
119
+ "grpc-async": {
120
+ "libraryClient": "DiscussServiceAsyncClient",
121
+ "rpcs": {
122
+ "CountMessageTokens": {
123
+ "methods": [
124
+ "count_message_tokens"
125
+ ]
126
+ },
127
+ "GenerateMessage": {
128
+ "methods": [
129
+ "generate_message"
130
+ ]
131
+ }
132
+ }
133
+ },
134
+ "rest": {
135
+ "libraryClient": "DiscussServiceClient",
136
+ "rpcs": {
137
+ "CountMessageTokens": {
138
+ "methods": [
139
+ "count_message_tokens"
140
+ ]
141
+ },
142
+ "GenerateMessage": {
143
+ "methods": [
144
+ "generate_message"
145
+ ]
146
+ }
147
+ }
148
+ }
149
+ }
150
+ },
151
+ "FileService": {
152
+ "clients": {
153
+ "grpc": {
154
+ "libraryClient": "FileServiceClient",
155
+ "rpcs": {
156
+ "CreateFile": {
157
+ "methods": [
158
+ "create_file"
159
+ ]
160
+ },
161
+ "DeleteFile": {
162
+ "methods": [
163
+ "delete_file"
164
+ ]
165
+ },
166
+ "GetFile": {
167
+ "methods": [
168
+ "get_file"
169
+ ]
170
+ },
171
+ "ListFiles": {
172
+ "methods": [
173
+ "list_files"
174
+ ]
175
+ }
176
+ }
177
+ },
178
+ "grpc-async": {
179
+ "libraryClient": "FileServiceAsyncClient",
180
+ "rpcs": {
181
+ "CreateFile": {
182
+ "methods": [
183
+ "create_file"
184
+ ]
185
+ },
186
+ "DeleteFile": {
187
+ "methods": [
188
+ "delete_file"
189
+ ]
190
+ },
191
+ "GetFile": {
192
+ "methods": [
193
+ "get_file"
194
+ ]
195
+ },
196
+ "ListFiles": {
197
+ "methods": [
198
+ "list_files"
199
+ ]
200
+ }
201
+ }
202
+ },
203
+ "rest": {
204
+ "libraryClient": "FileServiceClient",
205
+ "rpcs": {
206
+ "CreateFile": {
207
+ "methods": [
208
+ "create_file"
209
+ ]
210
+ },
211
+ "DeleteFile": {
212
+ "methods": [
213
+ "delete_file"
214
+ ]
215
+ },
216
+ "GetFile": {
217
+ "methods": [
218
+ "get_file"
219
+ ]
220
+ },
221
+ "ListFiles": {
222
+ "methods": [
223
+ "list_files"
224
+ ]
225
+ }
226
+ }
227
+ }
228
+ }
229
+ },
230
+ "GenerativeService": {
231
+ "clients": {
232
+ "grpc": {
233
+ "libraryClient": "GenerativeServiceClient",
234
+ "rpcs": {
235
+ "BatchEmbedContents": {
236
+ "methods": [
237
+ "batch_embed_contents"
238
+ ]
239
+ },
240
+ "CountTokens": {
241
+ "methods": [
242
+ "count_tokens"
243
+ ]
244
+ },
245
+ "EmbedContent": {
246
+ "methods": [
247
+ "embed_content"
248
+ ]
249
+ },
250
+ "GenerateAnswer": {
251
+ "methods": [
252
+ "generate_answer"
253
+ ]
254
+ },
255
+ "GenerateContent": {
256
+ "methods": [
257
+ "generate_content"
258
+ ]
259
+ },
260
+ "StreamGenerateContent": {
261
+ "methods": [
262
+ "stream_generate_content"
263
+ ]
264
+ }
265
+ }
266
+ },
267
+ "grpc-async": {
268
+ "libraryClient": "GenerativeServiceAsyncClient",
269
+ "rpcs": {
270
+ "BatchEmbedContents": {
271
+ "methods": [
272
+ "batch_embed_contents"
273
+ ]
274
+ },
275
+ "CountTokens": {
276
+ "methods": [
277
+ "count_tokens"
278
+ ]
279
+ },
280
+ "EmbedContent": {
281
+ "methods": [
282
+ "embed_content"
283
+ ]
284
+ },
285
+ "GenerateAnswer": {
286
+ "methods": [
287
+ "generate_answer"
288
+ ]
289
+ },
290
+ "GenerateContent": {
291
+ "methods": [
292
+ "generate_content"
293
+ ]
294
+ },
295
+ "StreamGenerateContent": {
296
+ "methods": [
297
+ "stream_generate_content"
298
+ ]
299
+ }
300
+ }
301
+ },
302
+ "rest": {
303
+ "libraryClient": "GenerativeServiceClient",
304
+ "rpcs": {
305
+ "BatchEmbedContents": {
306
+ "methods": [
307
+ "batch_embed_contents"
308
+ ]
309
+ },
310
+ "CountTokens": {
311
+ "methods": [
312
+ "count_tokens"
313
+ ]
314
+ },
315
+ "EmbedContent": {
316
+ "methods": [
317
+ "embed_content"
318
+ ]
319
+ },
320
+ "GenerateAnswer": {
321
+ "methods": [
322
+ "generate_answer"
323
+ ]
324
+ },
325
+ "GenerateContent": {
326
+ "methods": [
327
+ "generate_content"
328
+ ]
329
+ },
330
+ "StreamGenerateContent": {
331
+ "methods": [
332
+ "stream_generate_content"
333
+ ]
334
+ }
335
+ }
336
+ }
337
+ }
338
+ },
339
+ "ModelService": {
340
+ "clients": {
341
+ "grpc": {
342
+ "libraryClient": "ModelServiceClient",
343
+ "rpcs": {
344
+ "CreateTunedModel": {
345
+ "methods": [
346
+ "create_tuned_model"
347
+ ]
348
+ },
349
+ "DeleteTunedModel": {
350
+ "methods": [
351
+ "delete_tuned_model"
352
+ ]
353
+ },
354
+ "GetModel": {
355
+ "methods": [
356
+ "get_model"
357
+ ]
358
+ },
359
+ "GetTunedModel": {
360
+ "methods": [
361
+ "get_tuned_model"
362
+ ]
363
+ },
364
+ "ListModels": {
365
+ "methods": [
366
+ "list_models"
367
+ ]
368
+ },
369
+ "ListTunedModels": {
370
+ "methods": [
371
+ "list_tuned_models"
372
+ ]
373
+ },
374
+ "UpdateTunedModel": {
375
+ "methods": [
376
+ "update_tuned_model"
377
+ ]
378
+ }
379
+ }
380
+ },
381
+ "grpc-async": {
382
+ "libraryClient": "ModelServiceAsyncClient",
383
+ "rpcs": {
384
+ "CreateTunedModel": {
385
+ "methods": [
386
+ "create_tuned_model"
387
+ ]
388
+ },
389
+ "DeleteTunedModel": {
390
+ "methods": [
391
+ "delete_tuned_model"
392
+ ]
393
+ },
394
+ "GetModel": {
395
+ "methods": [
396
+ "get_model"
397
+ ]
398
+ },
399
+ "GetTunedModel": {
400
+ "methods": [
401
+ "get_tuned_model"
402
+ ]
403
+ },
404
+ "ListModels": {
405
+ "methods": [
406
+ "list_models"
407
+ ]
408
+ },
409
+ "ListTunedModels": {
410
+ "methods": [
411
+ "list_tuned_models"
412
+ ]
413
+ },
414
+ "UpdateTunedModel": {
415
+ "methods": [
416
+ "update_tuned_model"
417
+ ]
418
+ }
419
+ }
420
+ },
421
+ "rest": {
422
+ "libraryClient": "ModelServiceClient",
423
+ "rpcs": {
424
+ "CreateTunedModel": {
425
+ "methods": [
426
+ "create_tuned_model"
427
+ ]
428
+ },
429
+ "DeleteTunedModel": {
430
+ "methods": [
431
+ "delete_tuned_model"
432
+ ]
433
+ },
434
+ "GetModel": {
435
+ "methods": [
436
+ "get_model"
437
+ ]
438
+ },
439
+ "GetTunedModel": {
440
+ "methods": [
441
+ "get_tuned_model"
442
+ ]
443
+ },
444
+ "ListModels": {
445
+ "methods": [
446
+ "list_models"
447
+ ]
448
+ },
449
+ "ListTunedModels": {
450
+ "methods": [
451
+ "list_tuned_models"
452
+ ]
453
+ },
454
+ "UpdateTunedModel": {
455
+ "methods": [
456
+ "update_tuned_model"
457
+ ]
458
+ }
459
+ }
460
+ }
461
+ }
462
+ },
463
+ "PermissionService": {
464
+ "clients": {
465
+ "grpc": {
466
+ "libraryClient": "PermissionServiceClient",
467
+ "rpcs": {
468
+ "CreatePermission": {
469
+ "methods": [
470
+ "create_permission"
471
+ ]
472
+ },
473
+ "DeletePermission": {
474
+ "methods": [
475
+ "delete_permission"
476
+ ]
477
+ },
478
+ "GetPermission": {
479
+ "methods": [
480
+ "get_permission"
481
+ ]
482
+ },
483
+ "ListPermissions": {
484
+ "methods": [
485
+ "list_permissions"
486
+ ]
487
+ },
488
+ "TransferOwnership": {
489
+ "methods": [
490
+ "transfer_ownership"
491
+ ]
492
+ },
493
+ "UpdatePermission": {
494
+ "methods": [
495
+ "update_permission"
496
+ ]
497
+ }
498
+ }
499
+ },
500
+ "grpc-async": {
501
+ "libraryClient": "PermissionServiceAsyncClient",
502
+ "rpcs": {
503
+ "CreatePermission": {
504
+ "methods": [
505
+ "create_permission"
506
+ ]
507
+ },
508
+ "DeletePermission": {
509
+ "methods": [
510
+ "delete_permission"
511
+ ]
512
+ },
513
+ "GetPermission": {
514
+ "methods": [
515
+ "get_permission"
516
+ ]
517
+ },
518
+ "ListPermissions": {
519
+ "methods": [
520
+ "list_permissions"
521
+ ]
522
+ },
523
+ "TransferOwnership": {
524
+ "methods": [
525
+ "transfer_ownership"
526
+ ]
527
+ },
528
+ "UpdatePermission": {
529
+ "methods": [
530
+ "update_permission"
531
+ ]
532
+ }
533
+ }
534
+ },
535
+ "rest": {
536
+ "libraryClient": "PermissionServiceClient",
537
+ "rpcs": {
538
+ "CreatePermission": {
539
+ "methods": [
540
+ "create_permission"
541
+ ]
542
+ },
543
+ "DeletePermission": {
544
+ "methods": [
545
+ "delete_permission"
546
+ ]
547
+ },
548
+ "GetPermission": {
549
+ "methods": [
550
+ "get_permission"
551
+ ]
552
+ },
553
+ "ListPermissions": {
554
+ "methods": [
555
+ "list_permissions"
556
+ ]
557
+ },
558
+ "TransferOwnership": {
559
+ "methods": [
560
+ "transfer_ownership"
561
+ ]
562
+ },
563
+ "UpdatePermission": {
564
+ "methods": [
565
+ "update_permission"
566
+ ]
567
+ }
568
+ }
569
+ }
570
+ }
571
+ },
572
+ "PredictionService": {
573
+ "clients": {
574
+ "grpc": {
575
+ "libraryClient": "PredictionServiceClient",
576
+ "rpcs": {
577
+ "Predict": {
578
+ "methods": [
579
+ "predict"
580
+ ]
581
+ }
582
+ }
583
+ },
584
+ "grpc-async": {
585
+ "libraryClient": "PredictionServiceAsyncClient",
586
+ "rpcs": {
587
+ "Predict": {
588
+ "methods": [
589
+ "predict"
590
+ ]
591
+ }
592
+ }
593
+ },
594
+ "rest": {
595
+ "libraryClient": "PredictionServiceClient",
596
+ "rpcs": {
597
+ "Predict": {
598
+ "methods": [
599
+ "predict"
600
+ ]
601
+ }
602
+ }
603
+ }
604
+ }
605
+ },
606
+ "RetrieverService": {
607
+ "clients": {
608
+ "grpc": {
609
+ "libraryClient": "RetrieverServiceClient",
610
+ "rpcs": {
611
+ "BatchCreateChunks": {
612
+ "methods": [
613
+ "batch_create_chunks"
614
+ ]
615
+ },
616
+ "BatchDeleteChunks": {
617
+ "methods": [
618
+ "batch_delete_chunks"
619
+ ]
620
+ },
621
+ "BatchUpdateChunks": {
622
+ "methods": [
623
+ "batch_update_chunks"
624
+ ]
625
+ },
626
+ "CreateChunk": {
627
+ "methods": [
628
+ "create_chunk"
629
+ ]
630
+ },
631
+ "CreateCorpus": {
632
+ "methods": [
633
+ "create_corpus"
634
+ ]
635
+ },
636
+ "CreateDocument": {
637
+ "methods": [
638
+ "create_document"
639
+ ]
640
+ },
641
+ "DeleteChunk": {
642
+ "methods": [
643
+ "delete_chunk"
644
+ ]
645
+ },
646
+ "DeleteCorpus": {
647
+ "methods": [
648
+ "delete_corpus"
649
+ ]
650
+ },
651
+ "DeleteDocument": {
652
+ "methods": [
653
+ "delete_document"
654
+ ]
655
+ },
656
+ "GetChunk": {
657
+ "methods": [
658
+ "get_chunk"
659
+ ]
660
+ },
661
+ "GetCorpus": {
662
+ "methods": [
663
+ "get_corpus"
664
+ ]
665
+ },
666
+ "GetDocument": {
667
+ "methods": [
668
+ "get_document"
669
+ ]
670
+ },
671
+ "ListChunks": {
672
+ "methods": [
673
+ "list_chunks"
674
+ ]
675
+ },
676
+ "ListCorpora": {
677
+ "methods": [
678
+ "list_corpora"
679
+ ]
680
+ },
681
+ "ListDocuments": {
682
+ "methods": [
683
+ "list_documents"
684
+ ]
685
+ },
686
+ "QueryCorpus": {
687
+ "methods": [
688
+ "query_corpus"
689
+ ]
690
+ },
691
+ "QueryDocument": {
692
+ "methods": [
693
+ "query_document"
694
+ ]
695
+ },
696
+ "UpdateChunk": {
697
+ "methods": [
698
+ "update_chunk"
699
+ ]
700
+ },
701
+ "UpdateCorpus": {
702
+ "methods": [
703
+ "update_corpus"
704
+ ]
705
+ },
706
+ "UpdateDocument": {
707
+ "methods": [
708
+ "update_document"
709
+ ]
710
+ }
711
+ }
712
+ },
713
+ "grpc-async": {
714
+ "libraryClient": "RetrieverServiceAsyncClient",
715
+ "rpcs": {
716
+ "BatchCreateChunks": {
717
+ "methods": [
718
+ "batch_create_chunks"
719
+ ]
720
+ },
721
+ "BatchDeleteChunks": {
722
+ "methods": [
723
+ "batch_delete_chunks"
724
+ ]
725
+ },
726
+ "BatchUpdateChunks": {
727
+ "methods": [
728
+ "batch_update_chunks"
729
+ ]
730
+ },
731
+ "CreateChunk": {
732
+ "methods": [
733
+ "create_chunk"
734
+ ]
735
+ },
736
+ "CreateCorpus": {
737
+ "methods": [
738
+ "create_corpus"
739
+ ]
740
+ },
741
+ "CreateDocument": {
742
+ "methods": [
743
+ "create_document"
744
+ ]
745
+ },
746
+ "DeleteChunk": {
747
+ "methods": [
748
+ "delete_chunk"
749
+ ]
750
+ },
751
+ "DeleteCorpus": {
752
+ "methods": [
753
+ "delete_corpus"
754
+ ]
755
+ },
756
+ "DeleteDocument": {
757
+ "methods": [
758
+ "delete_document"
759
+ ]
760
+ },
761
+ "GetChunk": {
762
+ "methods": [
763
+ "get_chunk"
764
+ ]
765
+ },
766
+ "GetCorpus": {
767
+ "methods": [
768
+ "get_corpus"
769
+ ]
770
+ },
771
+ "GetDocument": {
772
+ "methods": [
773
+ "get_document"
774
+ ]
775
+ },
776
+ "ListChunks": {
777
+ "methods": [
778
+ "list_chunks"
779
+ ]
780
+ },
781
+ "ListCorpora": {
782
+ "methods": [
783
+ "list_corpora"
784
+ ]
785
+ },
786
+ "ListDocuments": {
787
+ "methods": [
788
+ "list_documents"
789
+ ]
790
+ },
791
+ "QueryCorpus": {
792
+ "methods": [
793
+ "query_corpus"
794
+ ]
795
+ },
796
+ "QueryDocument": {
797
+ "methods": [
798
+ "query_document"
799
+ ]
800
+ },
801
+ "UpdateChunk": {
802
+ "methods": [
803
+ "update_chunk"
804
+ ]
805
+ },
806
+ "UpdateCorpus": {
807
+ "methods": [
808
+ "update_corpus"
809
+ ]
810
+ },
811
+ "UpdateDocument": {
812
+ "methods": [
813
+ "update_document"
814
+ ]
815
+ }
816
+ }
817
+ },
818
+ "rest": {
819
+ "libraryClient": "RetrieverServiceClient",
820
+ "rpcs": {
821
+ "BatchCreateChunks": {
822
+ "methods": [
823
+ "batch_create_chunks"
824
+ ]
825
+ },
826
+ "BatchDeleteChunks": {
827
+ "methods": [
828
+ "batch_delete_chunks"
829
+ ]
830
+ },
831
+ "BatchUpdateChunks": {
832
+ "methods": [
833
+ "batch_update_chunks"
834
+ ]
835
+ },
836
+ "CreateChunk": {
837
+ "methods": [
838
+ "create_chunk"
839
+ ]
840
+ },
841
+ "CreateCorpus": {
842
+ "methods": [
843
+ "create_corpus"
844
+ ]
845
+ },
846
+ "CreateDocument": {
847
+ "methods": [
848
+ "create_document"
849
+ ]
850
+ },
851
+ "DeleteChunk": {
852
+ "methods": [
853
+ "delete_chunk"
854
+ ]
855
+ },
856
+ "DeleteCorpus": {
857
+ "methods": [
858
+ "delete_corpus"
859
+ ]
860
+ },
861
+ "DeleteDocument": {
862
+ "methods": [
863
+ "delete_document"
864
+ ]
865
+ },
866
+ "GetChunk": {
867
+ "methods": [
868
+ "get_chunk"
869
+ ]
870
+ },
871
+ "GetCorpus": {
872
+ "methods": [
873
+ "get_corpus"
874
+ ]
875
+ },
876
+ "GetDocument": {
877
+ "methods": [
878
+ "get_document"
879
+ ]
880
+ },
881
+ "ListChunks": {
882
+ "methods": [
883
+ "list_chunks"
884
+ ]
885
+ },
886
+ "ListCorpora": {
887
+ "methods": [
888
+ "list_corpora"
889
+ ]
890
+ },
891
+ "ListDocuments": {
892
+ "methods": [
893
+ "list_documents"
894
+ ]
895
+ },
896
+ "QueryCorpus": {
897
+ "methods": [
898
+ "query_corpus"
899
+ ]
900
+ },
901
+ "QueryDocument": {
902
+ "methods": [
903
+ "query_document"
904
+ ]
905
+ },
906
+ "UpdateChunk": {
907
+ "methods": [
908
+ "update_chunk"
909
+ ]
910
+ },
911
+ "UpdateCorpus": {
912
+ "methods": [
913
+ "update_corpus"
914
+ ]
915
+ },
916
+ "UpdateDocument": {
917
+ "methods": [
918
+ "update_document"
919
+ ]
920
+ }
921
+ }
922
+ }
923
+ }
924
+ },
925
+ "TextService": {
926
+ "clients": {
927
+ "grpc": {
928
+ "libraryClient": "TextServiceClient",
929
+ "rpcs": {
930
+ "BatchEmbedText": {
931
+ "methods": [
932
+ "batch_embed_text"
933
+ ]
934
+ },
935
+ "CountTextTokens": {
936
+ "methods": [
937
+ "count_text_tokens"
938
+ ]
939
+ },
940
+ "EmbedText": {
941
+ "methods": [
942
+ "embed_text"
943
+ ]
944
+ },
945
+ "GenerateText": {
946
+ "methods": [
947
+ "generate_text"
948
+ ]
949
+ }
950
+ }
951
+ },
952
+ "grpc-async": {
953
+ "libraryClient": "TextServiceAsyncClient",
954
+ "rpcs": {
955
+ "BatchEmbedText": {
956
+ "methods": [
957
+ "batch_embed_text"
958
+ ]
959
+ },
960
+ "CountTextTokens": {
961
+ "methods": [
962
+ "count_text_tokens"
963
+ ]
964
+ },
965
+ "EmbedText": {
966
+ "methods": [
967
+ "embed_text"
968
+ ]
969
+ },
970
+ "GenerateText": {
971
+ "methods": [
972
+ "generate_text"
973
+ ]
974
+ }
975
+ }
976
+ },
977
+ "rest": {
978
+ "libraryClient": "TextServiceClient",
979
+ "rpcs": {
980
+ "BatchEmbedText": {
981
+ "methods": [
982
+ "batch_embed_text"
983
+ ]
984
+ },
985
+ "CountTextTokens": {
986
+ "methods": [
987
+ "count_text_tokens"
988
+ ]
989
+ },
990
+ "EmbedText": {
991
+ "methods": [
992
+ "embed_text"
993
+ ]
994
+ },
995
+ "GenerateText": {
996
+ "methods": [
997
+ "generate_text"
998
+ ]
999
+ }
1000
+ }
1001
+ }
1002
+ }
1003
+ }
1004
+ }
1005
+ }
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/gapic_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ __version__ = "0.6.15" # {x-release-please-version}
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/py.typed ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Marker file for PEP 561.
2
+ # The google-ai-generativelanguage package uses inline types.
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/__pycache__/generative_service.cpython-311.pyc ADDED
Binary file (75.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/cache_service.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1beta.types import (
24
+ cached_content as gag_cached_content,
25
+ )
26
+
27
+ __protobuf__ = proto.module(
28
+ package="google.ai.generativelanguage.v1beta",
29
+ manifest={
30
+ "ListCachedContentsRequest",
31
+ "ListCachedContentsResponse",
32
+ "CreateCachedContentRequest",
33
+ "GetCachedContentRequest",
34
+ "UpdateCachedContentRequest",
35
+ "DeleteCachedContentRequest",
36
+ },
37
+ )
38
+
39
+
40
+ class ListCachedContentsRequest(proto.Message):
41
+ r"""Request to list CachedContents.
42
+
43
+ Attributes:
44
+ page_size (int):
45
+ Optional. The maximum number of cached
46
+ contents to return. The service may return fewer
47
+ than this value. If unspecified, some default
48
+ (under maximum) number of items will be
49
+ returned. The maximum value is 1000; values
50
+ above 1000 will be coerced to 1000.
51
+ page_token (str):
52
+ Optional. A page token, received from a previous
53
+ ``ListCachedContents`` call. Provide this to retrieve the
54
+ subsequent page.
55
+
56
+ When paginating, all other parameters provided to
57
+ ``ListCachedContents`` must match the call that provided the
58
+ page token.
59
+ """
60
+
61
+ page_size: int = proto.Field(
62
+ proto.INT32,
63
+ number=1,
64
+ )
65
+ page_token: str = proto.Field(
66
+ proto.STRING,
67
+ number=2,
68
+ )
69
+
70
+
71
+ class ListCachedContentsResponse(proto.Message):
72
+ r"""Response with CachedContents list.
73
+
74
+ Attributes:
75
+ cached_contents (MutableSequence[google.ai.generativelanguage_v1beta.types.CachedContent]):
76
+ List of cached contents.
77
+ next_page_token (str):
78
+ A token, which can be sent as ``page_token`` to retrieve the
79
+ next page. If this field is omitted, there are no subsequent
80
+ pages.
81
+ """
82
+
83
+ @property
84
+ def raw_page(self):
85
+ return self
86
+
87
+ cached_contents: MutableSequence[
88
+ gag_cached_content.CachedContent
89
+ ] = proto.RepeatedField(
90
+ proto.MESSAGE,
91
+ number=1,
92
+ message=gag_cached_content.CachedContent,
93
+ )
94
+ next_page_token: str = proto.Field(
95
+ proto.STRING,
96
+ number=2,
97
+ )
98
+
99
+
100
+ class CreateCachedContentRequest(proto.Message):
101
+ r"""Request to create CachedContent.
102
+
103
+ Attributes:
104
+ cached_content (google.ai.generativelanguage_v1beta.types.CachedContent):
105
+ Required. The cached content to create.
106
+ """
107
+
108
+ cached_content: gag_cached_content.CachedContent = proto.Field(
109
+ proto.MESSAGE,
110
+ number=1,
111
+ message=gag_cached_content.CachedContent,
112
+ )
113
+
114
+
115
+ class GetCachedContentRequest(proto.Message):
116
+ r"""Request to read CachedContent.
117
+
118
+ Attributes:
119
+ name (str):
120
+ Required. The resource name referring to the content cache
121
+ entry. Format: ``cachedContents/{id}``
122
+ """
123
+
124
+ name: str = proto.Field(
125
+ proto.STRING,
126
+ number=1,
127
+ )
128
+
129
+
130
+ class UpdateCachedContentRequest(proto.Message):
131
+ r"""Request to update CachedContent.
132
+
133
+ Attributes:
134
+ cached_content (google.ai.generativelanguage_v1beta.types.CachedContent):
135
+ Required. The content cache entry to update
136
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
137
+ The list of fields to update.
138
+ """
139
+
140
+ cached_content: gag_cached_content.CachedContent = proto.Field(
141
+ proto.MESSAGE,
142
+ number=1,
143
+ message=gag_cached_content.CachedContent,
144
+ )
145
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
146
+ proto.MESSAGE,
147
+ number=2,
148
+ message=field_mask_pb2.FieldMask,
149
+ )
150
+
151
+
152
+ class DeleteCachedContentRequest(proto.Message):
153
+ r"""Request to delete CachedContent.
154
+
155
+ Attributes:
156
+ name (str):
157
+ Required. The resource name referring to the content cache
158
+ entry Format: ``cachedContents/{id}``
159
+ """
160
+
161
+ name: str = proto.Field(
162
+ proto.STRING,
163
+ number=1,
164
+ )
165
+
166
+
167
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/cached_content.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import duration_pb2 # type: ignore
21
+ from google.protobuf import timestamp_pb2 # type: ignore
22
+ import proto # type: ignore
23
+
24
+ from google.ai.generativelanguage_v1beta.types import content
25
+
26
+ __protobuf__ = proto.module(
27
+ package="google.ai.generativelanguage.v1beta",
28
+ manifest={
29
+ "CachedContent",
30
+ },
31
+ )
32
+
33
+
34
+ class CachedContent(proto.Message):
35
+ r"""Content that has been preprocessed and can be used in
36
+ subsequent request to GenerativeService.
37
+
38
+ Cached content can be only used with model it was created for.
39
+
40
+ This message has `oneof`_ fields (mutually exclusive fields).
41
+ For each oneof, at most one member field can be set at the same time.
42
+ Setting any member of the oneof automatically clears all other
43
+ members.
44
+
45
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
46
+
47
+ Attributes:
48
+ expire_time (google.protobuf.timestamp_pb2.Timestamp):
49
+ Timestamp in UTC of when this resource is considered
50
+ expired. This is *always* provided on output, regardless of
51
+ what was sent on input.
52
+
53
+ This field is a member of `oneof`_ ``expiration``.
54
+ ttl (google.protobuf.duration_pb2.Duration):
55
+ Input only. New TTL for this resource, input
56
+ only.
57
+
58
+ This field is a member of `oneof`_ ``expiration``.
59
+ name (str):
60
+ Optional. Identifier. The resource name referring to the
61
+ cached content. Format: ``cachedContents/{id}``
62
+
63
+ This field is a member of `oneof`_ ``_name``.
64
+ display_name (str):
65
+ Optional. Immutable. The user-generated
66
+ meaningful display name of the cached content.
67
+ Maximum 128 Unicode characters.
68
+
69
+ This field is a member of `oneof`_ ``_display_name``.
70
+ model (str):
71
+ Required. Immutable. The name of the ``Model`` to use for
72
+ cached content Format: ``models/{model}``
73
+
74
+ This field is a member of `oneof`_ ``_model``.
75
+ system_instruction (google.ai.generativelanguage_v1beta.types.Content):
76
+ Optional. Input only. Immutable. Developer
77
+ set system instruction. Currently text only.
78
+
79
+ This field is a member of `oneof`_ ``_system_instruction``.
80
+ contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]):
81
+ Optional. Input only. Immutable. The content
82
+ to cache.
83
+ tools (MutableSequence[google.ai.generativelanguage_v1beta.types.Tool]):
84
+ Optional. Input only. Immutable. A list of ``Tools`` the
85
+ model may use to generate the next response
86
+ tool_config (google.ai.generativelanguage_v1beta.types.ToolConfig):
87
+ Optional. Input only. Immutable. Tool config.
88
+ This config is shared for all tools.
89
+
90
+ This field is a member of `oneof`_ ``_tool_config``.
91
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
92
+ Output only. Creation time of the cache
93
+ entry.
94
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
95
+ Output only. When the cache entry was last
96
+ updated in UTC time.
97
+ usage_metadata (google.ai.generativelanguage_v1beta.types.CachedContent.UsageMetadata):
98
+ Output only. Metadata on the usage of the
99
+ cached content.
100
+ """
101
+
102
+ class UsageMetadata(proto.Message):
103
+ r"""Metadata on the usage of the cached content.
104
+
105
+ Attributes:
106
+ total_token_count (int):
107
+ Total number of tokens that the cached
108
+ content consumes.
109
+ """
110
+
111
+ total_token_count: int = proto.Field(
112
+ proto.INT32,
113
+ number=1,
114
+ )
115
+
116
+ expire_time: timestamp_pb2.Timestamp = proto.Field(
117
+ proto.MESSAGE,
118
+ number=9,
119
+ oneof="expiration",
120
+ message=timestamp_pb2.Timestamp,
121
+ )
122
+ ttl: duration_pb2.Duration = proto.Field(
123
+ proto.MESSAGE,
124
+ number=10,
125
+ oneof="expiration",
126
+ message=duration_pb2.Duration,
127
+ )
128
+ name: str = proto.Field(
129
+ proto.STRING,
130
+ number=1,
131
+ optional=True,
132
+ )
133
+ display_name: str = proto.Field(
134
+ proto.STRING,
135
+ number=11,
136
+ optional=True,
137
+ )
138
+ model: str = proto.Field(
139
+ proto.STRING,
140
+ number=2,
141
+ optional=True,
142
+ )
143
+ system_instruction: content.Content = proto.Field(
144
+ proto.MESSAGE,
145
+ number=3,
146
+ optional=True,
147
+ message=content.Content,
148
+ )
149
+ contents: MutableSequence[content.Content] = proto.RepeatedField(
150
+ proto.MESSAGE,
151
+ number=4,
152
+ message=content.Content,
153
+ )
154
+ tools: MutableSequence[content.Tool] = proto.RepeatedField(
155
+ proto.MESSAGE,
156
+ number=5,
157
+ message=content.Tool,
158
+ )
159
+ tool_config: content.ToolConfig = proto.Field(
160
+ proto.MESSAGE,
161
+ number=6,
162
+ optional=True,
163
+ message=content.ToolConfig,
164
+ )
165
+ create_time: timestamp_pb2.Timestamp = proto.Field(
166
+ proto.MESSAGE,
167
+ number=7,
168
+ message=timestamp_pb2.Timestamp,
169
+ )
170
+ update_time: timestamp_pb2.Timestamp = proto.Field(
171
+ proto.MESSAGE,
172
+ number=8,
173
+ message=timestamp_pb2.Timestamp,
174
+ )
175
+ usage_metadata: UsageMetadata = proto.Field(
176
+ proto.MESSAGE,
177
+ number=12,
178
+ message=UsageMetadata,
179
+ )
180
+
181
+
182
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/citation.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta",
24
+ manifest={
25
+ "CitationMetadata",
26
+ "CitationSource",
27
+ },
28
+ )
29
+
30
+
31
+ class CitationMetadata(proto.Message):
32
+ r"""A collection of source attributions for a piece of content.
33
+
34
+ Attributes:
35
+ citation_sources (MutableSequence[google.ai.generativelanguage_v1beta.types.CitationSource]):
36
+ Citations to sources for a specific response.
37
+ """
38
+
39
+ citation_sources: MutableSequence["CitationSource"] = proto.RepeatedField(
40
+ proto.MESSAGE,
41
+ number=1,
42
+ message="CitationSource",
43
+ )
44
+
45
+
46
+ class CitationSource(proto.Message):
47
+ r"""A citation to a source for a portion of a specific response.
48
+
49
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
50
+
51
+ Attributes:
52
+ start_index (int):
53
+ Optional. Start of segment of the response
54
+ that is attributed to this source.
55
+
56
+ Index indicates the start of the segment,
57
+ measured in bytes.
58
+
59
+ This field is a member of `oneof`_ ``_start_index``.
60
+ end_index (int):
61
+ Optional. End of the attributed segment,
62
+ exclusive.
63
+
64
+ This field is a member of `oneof`_ ``_end_index``.
65
+ uri (str):
66
+ Optional. URI that is attributed as a source
67
+ for a portion of the text.
68
+
69
+ This field is a member of `oneof`_ ``_uri``.
70
+ license_ (str):
71
+ Optional. License for the GitHub project that
72
+ is attributed as a source for segment.
73
+
74
+ License info is required for code citations.
75
+
76
+ This field is a member of `oneof`_ ``_license``.
77
+ """
78
+
79
+ start_index: int = proto.Field(
80
+ proto.INT32,
81
+ number=1,
82
+ optional=True,
83
+ )
84
+ end_index: int = proto.Field(
85
+ proto.INT32,
86
+ number=2,
87
+ optional=True,
88
+ )
89
+ uri: str = proto.Field(
90
+ proto.STRING,
91
+ number=3,
92
+ optional=True,
93
+ )
94
+ license_: str = proto.Field(
95
+ proto.STRING,
96
+ number=4,
97
+ optional=True,
98
+ )
99
+
100
+
101
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/content.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import struct_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1beta",
25
+ manifest={
26
+ "Type",
27
+ "Content",
28
+ "Part",
29
+ "Blob",
30
+ "FileData",
31
+ "ExecutableCode",
32
+ "CodeExecutionResult",
33
+ "Tool",
34
+ "GoogleSearchRetrieval",
35
+ "DynamicRetrievalConfig",
36
+ "CodeExecution",
37
+ "ToolConfig",
38
+ "FunctionCallingConfig",
39
+ "FunctionDeclaration",
40
+ "FunctionCall",
41
+ "FunctionResponse",
42
+ "Schema",
43
+ "GroundingPassage",
44
+ "GroundingPassages",
45
+ },
46
+ )
47
+
48
+
49
+ class Type(proto.Enum):
50
+ r"""Type contains the list of OpenAPI data types as defined by
51
+ https://spec.openapis.org/oas/v3.0.3#data-types
52
+
53
+ Values:
54
+ TYPE_UNSPECIFIED (0):
55
+ Not specified, should not be used.
56
+ STRING (1):
57
+ String type.
58
+ NUMBER (2):
59
+ Number type.
60
+ INTEGER (3):
61
+ Integer type.
62
+ BOOLEAN (4):
63
+ Boolean type.
64
+ ARRAY (5):
65
+ Array type.
66
+ OBJECT (6):
67
+ Object type.
68
+ """
69
+ TYPE_UNSPECIFIED = 0
70
+ STRING = 1
71
+ NUMBER = 2
72
+ INTEGER = 3
73
+ BOOLEAN = 4
74
+ ARRAY = 5
75
+ OBJECT = 6
76
+
77
+
78
+ class Content(proto.Message):
79
+ r"""The base structured datatype containing multi-part content of a
80
+ message.
81
+
82
+ A ``Content`` includes a ``role`` field designating the producer of
83
+ the ``Content`` and a ``parts`` field containing multi-part data
84
+ that contains the content of the message turn.
85
+
86
+ Attributes:
87
+ parts (MutableSequence[google.ai.generativelanguage_v1beta.types.Part]):
88
+ Ordered ``Parts`` that constitute a single message. Parts
89
+ may have different MIME types.
90
+ role (str):
91
+ Optional. The producer of the content. Must
92
+ be either 'user' or 'model'.
93
+ Useful to set for multi-turn conversations,
94
+ otherwise can be left blank or unset.
95
+ """
96
+
97
+ parts: MutableSequence["Part"] = proto.RepeatedField(
98
+ proto.MESSAGE,
99
+ number=1,
100
+ message="Part",
101
+ )
102
+ role: str = proto.Field(
103
+ proto.STRING,
104
+ number=2,
105
+ )
106
+
107
+
108
+ class Part(proto.Message):
109
+ r"""A datatype containing media that is part of a multi-part ``Content``
110
+ message.
111
+
112
+ A ``Part`` consists of data which has an associated datatype. A
113
+ ``Part`` can only contain one of the accepted types in
114
+ ``Part.data``.
115
+
116
+ A ``Part`` must have a fixed IANA MIME type identifying the type and
117
+ subtype of the media if the ``inline_data`` field is filled with raw
118
+ bytes.
119
+
120
+ This message has `oneof`_ fields (mutually exclusive fields).
121
+ For each oneof, at most one member field can be set at the same time.
122
+ Setting any member of the oneof automatically clears all other
123
+ members.
124
+
125
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
126
+
127
+ Attributes:
128
+ text (str):
129
+ Inline text.
130
+
131
+ This field is a member of `oneof`_ ``data``.
132
+ inline_data (google.ai.generativelanguage_v1beta.types.Blob):
133
+ Inline media bytes.
134
+
135
+ This field is a member of `oneof`_ ``data``.
136
+ function_call (google.ai.generativelanguage_v1beta.types.FunctionCall):
137
+ A predicted ``FunctionCall`` returned from the model that
138
+ contains a string representing the
139
+ ``FunctionDeclaration.name`` with the arguments and their
140
+ values.
141
+
142
+ This field is a member of `oneof`_ ``data``.
143
+ function_response (google.ai.generativelanguage_v1beta.types.FunctionResponse):
144
+ The result output of a ``FunctionCall`` that contains a
145
+ string representing the ``FunctionDeclaration.name`` and a
146
+ structured JSON object containing any output from the
147
+ function is used as context to the model.
148
+
149
+ This field is a member of `oneof`_ ``data``.
150
+ file_data (google.ai.generativelanguage_v1beta.types.FileData):
151
+ URI based data.
152
+
153
+ This field is a member of `oneof`_ ``data``.
154
+ executable_code (google.ai.generativelanguage_v1beta.types.ExecutableCode):
155
+ Code generated by the model that is meant to
156
+ be executed.
157
+
158
+ This field is a member of `oneof`_ ``data``.
159
+ code_execution_result (google.ai.generativelanguage_v1beta.types.CodeExecutionResult):
160
+ Result of executing the ``ExecutableCode``.
161
+
162
+ This field is a member of `oneof`_ ``data``.
163
+ """
164
+
165
+ text: str = proto.Field(
166
+ proto.STRING,
167
+ number=2,
168
+ oneof="data",
169
+ )
170
+ inline_data: "Blob" = proto.Field(
171
+ proto.MESSAGE,
172
+ number=3,
173
+ oneof="data",
174
+ message="Blob",
175
+ )
176
+ function_call: "FunctionCall" = proto.Field(
177
+ proto.MESSAGE,
178
+ number=4,
179
+ oneof="data",
180
+ message="FunctionCall",
181
+ )
182
+ function_response: "FunctionResponse" = proto.Field(
183
+ proto.MESSAGE,
184
+ number=5,
185
+ oneof="data",
186
+ message="FunctionResponse",
187
+ )
188
+ file_data: "FileData" = proto.Field(
189
+ proto.MESSAGE,
190
+ number=6,
191
+ oneof="data",
192
+ message="FileData",
193
+ )
194
+ executable_code: "ExecutableCode" = proto.Field(
195
+ proto.MESSAGE,
196
+ number=9,
197
+ oneof="data",
198
+ message="ExecutableCode",
199
+ )
200
+ code_execution_result: "CodeExecutionResult" = proto.Field(
201
+ proto.MESSAGE,
202
+ number=10,
203
+ oneof="data",
204
+ message="CodeExecutionResult",
205
+ )
206
+
207
+
208
+ class Blob(proto.Message):
209
+ r"""Raw media bytes.
210
+
211
+ Text should not be sent as raw bytes, use the 'text' field.
212
+
213
+ Attributes:
214
+ mime_type (str):
215
+ The IANA standard MIME type of the source data. Examples:
216
+
217
+ - image/png
218
+ - image/jpeg If an unsupported MIME type is provided, an
219
+ error will be returned. For a complete list of supported
220
+ types, see `Supported file
221
+ formats <https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats>`__.
222
+ data (bytes):
223
+ Raw bytes for media formats.
224
+ """
225
+
226
+ mime_type: str = proto.Field(
227
+ proto.STRING,
228
+ number=1,
229
+ )
230
+ data: bytes = proto.Field(
231
+ proto.BYTES,
232
+ number=2,
233
+ )
234
+
235
+
236
+ class FileData(proto.Message):
237
+ r"""URI based data.
238
+
239
+ Attributes:
240
+ mime_type (str):
241
+ Optional. The IANA standard MIME type of the
242
+ source data.
243
+ file_uri (str):
244
+ Required. URI.
245
+ """
246
+
247
+ mime_type: str = proto.Field(
248
+ proto.STRING,
249
+ number=1,
250
+ )
251
+ file_uri: str = proto.Field(
252
+ proto.STRING,
253
+ number=2,
254
+ )
255
+
256
+
257
+ class ExecutableCode(proto.Message):
258
+ r"""Code generated by the model that is meant to be executed, and the
259
+ result returned to the model.
260
+
261
+ Only generated when using the ``CodeExecution`` tool, in which the
262
+ code will be automatically executed, and a corresponding
263
+ ``CodeExecutionResult`` will also be generated.
264
+
265
+ Attributes:
266
+ language (google.ai.generativelanguage_v1beta.types.ExecutableCode.Language):
267
+ Required. Programming language of the ``code``.
268
+ code (str):
269
+ Required. The code to be executed.
270
+ """
271
+
272
+ class Language(proto.Enum):
273
+ r"""Supported programming languages for the generated code.
274
+
275
+ Values:
276
+ LANGUAGE_UNSPECIFIED (0):
277
+ Unspecified language. This value should not
278
+ be used.
279
+ PYTHON (1):
280
+ Python >= 3.10, with numpy and simpy
281
+ available.
282
+ """
283
+ LANGUAGE_UNSPECIFIED = 0
284
+ PYTHON = 1
285
+
286
+ language: Language = proto.Field(
287
+ proto.ENUM,
288
+ number=1,
289
+ enum=Language,
290
+ )
291
+ code: str = proto.Field(
292
+ proto.STRING,
293
+ number=2,
294
+ )
295
+
296
+
297
+ class CodeExecutionResult(proto.Message):
298
+ r"""Result of executing the ``ExecutableCode``.
299
+
300
+ Only generated when using the ``CodeExecution``, and always follows
301
+ a ``part`` containing the ``ExecutableCode``.
302
+
303
+ Attributes:
304
+ outcome (google.ai.generativelanguage_v1beta.types.CodeExecutionResult.Outcome):
305
+ Required. Outcome of the code execution.
306
+ output (str):
307
+ Optional. Contains stdout when code execution
308
+ is successful, stderr or other description
309
+ otherwise.
310
+ """
311
+
312
+ class Outcome(proto.Enum):
313
+ r"""Enumeration of possible outcomes of the code execution.
314
+
315
+ Values:
316
+ OUTCOME_UNSPECIFIED (0):
317
+ Unspecified status. This value should not be
318
+ used.
319
+ OUTCOME_OK (1):
320
+ Code execution completed successfully.
321
+ OUTCOME_FAILED (2):
322
+ Code execution finished but with a failure. ``stderr``
323
+ should contain the reason.
324
+ OUTCOME_DEADLINE_EXCEEDED (3):
325
+ Code execution ran for too long, and was
326
+ cancelled. There may or may not be a partial
327
+ output present.
328
+ """
329
+ OUTCOME_UNSPECIFIED = 0
330
+ OUTCOME_OK = 1
331
+ OUTCOME_FAILED = 2
332
+ OUTCOME_DEADLINE_EXCEEDED = 3
333
+
334
+ outcome: Outcome = proto.Field(
335
+ proto.ENUM,
336
+ number=1,
337
+ enum=Outcome,
338
+ )
339
+ output: str = proto.Field(
340
+ proto.STRING,
341
+ number=2,
342
+ )
343
+
344
+
345
+ class Tool(proto.Message):
346
+ r"""Tool details that the model may use to generate response.
347
+
348
+ A ``Tool`` is a piece of code that enables the system to interact
349
+ with external systems to perform an action, or set of actions,
350
+ outside of knowledge and scope of the model.
351
+
352
+ Attributes:
353
+ function_declarations (MutableSequence[google.ai.generativelanguage_v1beta.types.FunctionDeclaration]):
354
+ Optional. A list of ``FunctionDeclarations`` available to
355
+ the model that can be used for function calling.
356
+
357
+ The model or system does not execute the function. Instead
358
+ the defined function may be returned as a
359
+ [FunctionCall][google.ai.generativelanguage.v1beta.Part.function_call]
360
+ with arguments to the client side for execution. The model
361
+ may decide to call a subset of these functions by populating
362
+ [FunctionCall][google.ai.generativelanguage.v1beta.Part.function_call]
363
+ in the response. The next conversation turn may contain a
364
+ [FunctionResponse][google.ai.generativelanguage.v1beta.Part.function_response]
365
+ with the
366
+ [Content.role][google.ai.generativelanguage.v1beta.Content.role]
367
+ "function" generation context for the next model turn.
368
+ google_search_retrieval (google.ai.generativelanguage_v1beta.types.GoogleSearchRetrieval):
369
+ Optional. Retrieval tool that is powered by
370
+ Google search.
371
+ code_execution (google.ai.generativelanguage_v1beta.types.CodeExecution):
372
+ Optional. Enables the model to execute code
373
+ as part of generation.
374
+ google_search (google.ai.generativelanguage_v1beta.types.Tool.GoogleSearch):
375
+ Optional. GoogleSearch tool type.
376
+ Tool to support Google Search in Model. Powered
377
+ by Google.
378
+ """
379
+
380
+ class GoogleSearch(proto.Message):
381
+ r"""GoogleSearch tool type.
382
+ Tool to support Google Search in Model. Powered by Google.
383
+
384
+ """
385
+
386
+ function_declarations: MutableSequence["FunctionDeclaration"] = proto.RepeatedField(
387
+ proto.MESSAGE,
388
+ number=1,
389
+ message="FunctionDeclaration",
390
+ )
391
+ google_search_retrieval: "GoogleSearchRetrieval" = proto.Field(
392
+ proto.MESSAGE,
393
+ number=2,
394
+ message="GoogleSearchRetrieval",
395
+ )
396
+ code_execution: "CodeExecution" = proto.Field(
397
+ proto.MESSAGE,
398
+ number=3,
399
+ message="CodeExecution",
400
+ )
401
+ google_search: GoogleSearch = proto.Field(
402
+ proto.MESSAGE,
403
+ number=4,
404
+ message=GoogleSearch,
405
+ )
406
+
407
+
408
+ class GoogleSearchRetrieval(proto.Message):
409
+ r"""Tool to retrieve public web data for grounding, powered by
410
+ Google.
411
+
412
+ Attributes:
413
+ dynamic_retrieval_config (google.ai.generativelanguage_v1beta.types.DynamicRetrievalConfig):
414
+ Specifies the dynamic retrieval configuration
415
+ for the given source.
416
+ """
417
+
418
+ dynamic_retrieval_config: "DynamicRetrievalConfig" = proto.Field(
419
+ proto.MESSAGE,
420
+ number=1,
421
+ message="DynamicRetrievalConfig",
422
+ )
423
+
424
+
425
+ class DynamicRetrievalConfig(proto.Message):
426
+ r"""Describes the options to customize dynamic retrieval.
427
+
428
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
429
+
430
+ Attributes:
431
+ mode (google.ai.generativelanguage_v1beta.types.DynamicRetrievalConfig.Mode):
432
+ The mode of the predictor to be used in
433
+ dynamic retrieval.
434
+ dynamic_threshold (float):
435
+ The threshold to be used in dynamic
436
+ retrieval. If not set, a system default value is
437
+ used.
438
+
439
+ This field is a member of `oneof`_ ``_dynamic_threshold``.
440
+ """
441
+
442
+ class Mode(proto.Enum):
443
+ r"""The mode of the predictor to be used in dynamic retrieval.
444
+
445
+ Values:
446
+ MODE_UNSPECIFIED (0):
447
+ Always trigger retrieval.
448
+ MODE_DYNAMIC (1):
449
+ Run retrieval only when system decides it is
450
+ necessary.
451
+ """
452
+ MODE_UNSPECIFIED = 0
453
+ MODE_DYNAMIC = 1
454
+
455
+ mode: Mode = proto.Field(
456
+ proto.ENUM,
457
+ number=1,
458
+ enum=Mode,
459
+ )
460
+ dynamic_threshold: float = proto.Field(
461
+ proto.FLOAT,
462
+ number=2,
463
+ optional=True,
464
+ )
465
+
466
+
467
+ class CodeExecution(proto.Message):
468
+ r"""Tool that executes code generated by the model, and automatically
469
+ returns the result to the model.
470
+
471
+ See also ``ExecutableCode`` and ``CodeExecutionResult`` which are
472
+ only generated when using this tool.
473
+
474
+ """
475
+
476
+
477
+ class ToolConfig(proto.Message):
478
+ r"""The Tool configuration containing parameters for specifying ``Tool``
479
+ use in the request.
480
+
481
+ Attributes:
482
+ function_calling_config (google.ai.generativelanguage_v1beta.types.FunctionCallingConfig):
483
+ Optional. Function calling config.
484
+ """
485
+
486
+ function_calling_config: "FunctionCallingConfig" = proto.Field(
487
+ proto.MESSAGE,
488
+ number=1,
489
+ message="FunctionCallingConfig",
490
+ )
491
+
492
+
493
+ class FunctionCallingConfig(proto.Message):
494
+ r"""Configuration for specifying function calling behavior.
495
+
496
+ Attributes:
497
+ mode (google.ai.generativelanguage_v1beta.types.FunctionCallingConfig.Mode):
498
+ Optional. Specifies the mode in which
499
+ function calling should execute. If unspecified,
500
+ the default value will be set to AUTO.
501
+ allowed_function_names (MutableSequence[str]):
502
+ Optional. A set of function names that, when provided,
503
+ limits the functions the model will call.
504
+
505
+ This should only be set when the Mode is ANY. Function names
506
+ should match [FunctionDeclaration.name]. With mode set to
507
+ ANY, model will predict a function call from the set of
508
+ function names provided.
509
+ """
510
+
511
+ class Mode(proto.Enum):
512
+ r"""Defines the execution behavior for function calling by
513
+ defining the execution mode.
514
+
515
+ Values:
516
+ MODE_UNSPECIFIED (0):
517
+ Unspecified function calling mode. This value
518
+ should not be used.
519
+ AUTO (1):
520
+ Default model behavior, model decides to
521
+ predict either a function call or a natural
522
+ language response.
523
+ ANY (2):
524
+ Model is constrained to always predicting a function call
525
+ only. If "allowed_function_names" are set, the predicted
526
+ function call will be limited to any one of
527
+ "allowed_function_names", else the predicted function call
528
+ will be any one of the provided "function_declarations".
529
+ NONE (3):
530
+ Model will not predict any function call.
531
+ Model behavior is same as when not passing any
532
+ function declarations.
533
+ """
534
+ MODE_UNSPECIFIED = 0
535
+ AUTO = 1
536
+ ANY = 2
537
+ NONE = 3
538
+
539
+ mode: Mode = proto.Field(
540
+ proto.ENUM,
541
+ number=1,
542
+ enum=Mode,
543
+ )
544
+ allowed_function_names: MutableSequence[str] = proto.RepeatedField(
545
+ proto.STRING,
546
+ number=2,
547
+ )
548
+
549
+
550
+ class FunctionDeclaration(proto.Message):
551
+ r"""Structured representation of a function declaration as defined by
552
+ the `OpenAPI 3.03
553
+ specification <https://spec.openapis.org/oas/v3.0.3>`__. Included in
554
+ this declaration are the function name and parameters. This
555
+ FunctionDeclaration is a representation of a block of code that can
556
+ be used as a ``Tool`` by the model and executed by the client.
557
+
558
+
559
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
560
+
561
+ Attributes:
562
+ name (str):
563
+ Required. The name of the function.
564
+ Must be a-z, A-Z, 0-9, or contain underscores
565
+ and dashes, with a maximum length of 63.
566
+ description (str):
567
+ Required. A brief description of the
568
+ function.
569
+ parameters (google.ai.generativelanguage_v1beta.types.Schema):
570
+ Optional. Describes the parameters to this
571
+ function. Reflects the Open API 3.03 Parameter
572
+ Object string Key: the name of the parameter.
573
+ Parameter names are case sensitive. Schema
574
+ Value: the Schema defining the type used for the
575
+ parameter.
576
+
577
+ This field is a member of `oneof`_ ``_parameters``.
578
+ response (google.ai.generativelanguage_v1beta.types.Schema):
579
+ Optional. Describes the output from this
580
+ function in JSON Schema format. Reflects the
581
+ Open API 3.03 Response Object. The Schema
582
+ defines the type used for the response value of
583
+ the function.
584
+
585
+ This field is a member of `oneof`_ ``_response``.
586
+ """
587
+
588
+ name: str = proto.Field(
589
+ proto.STRING,
590
+ number=1,
591
+ )
592
+ description: str = proto.Field(
593
+ proto.STRING,
594
+ number=2,
595
+ )
596
+ parameters: "Schema" = proto.Field(
597
+ proto.MESSAGE,
598
+ number=3,
599
+ optional=True,
600
+ message="Schema",
601
+ )
602
+ response: "Schema" = proto.Field(
603
+ proto.MESSAGE,
604
+ number=4,
605
+ optional=True,
606
+ message="Schema",
607
+ )
608
+
609
+
610
+ class FunctionCall(proto.Message):
611
+ r"""A predicted ``FunctionCall`` returned from the model that contains a
612
+ string representing the ``FunctionDeclaration.name`` with the
613
+ arguments and their values.
614
+
615
+
616
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
617
+
618
+ Attributes:
619
+ id (str):
620
+ Optional. The unique id of the function call. If populated,
621
+ the client to execute the ``function_call`` and return the
622
+ response with the matching ``id``.
623
+ name (str):
624
+ Required. The name of the function to call.
625
+ Must be a-z, A-Z, 0-9, or contain underscores
626
+ and dashes, with a maximum length of 63.
627
+ args (google.protobuf.struct_pb2.Struct):
628
+ Optional. The function parameters and values
629
+ in JSON object format.
630
+
631
+ This field is a member of `oneof`_ ``_args``.
632
+ """
633
+
634
+ id: str = proto.Field(
635
+ proto.STRING,
636
+ number=3,
637
+ )
638
+ name: str = proto.Field(
639
+ proto.STRING,
640
+ number=1,
641
+ )
642
+ args: struct_pb2.Struct = proto.Field(
643
+ proto.MESSAGE,
644
+ number=2,
645
+ optional=True,
646
+ message=struct_pb2.Struct,
647
+ )
648
+
649
+
650
+ class FunctionResponse(proto.Message):
651
+ r"""The result output from a ``FunctionCall`` that contains a string
652
+ representing the ``FunctionDeclaration.name`` and a structured JSON
653
+ object containing any output from the function is used as context to
654
+ the model. This should contain the result of a\ ``FunctionCall``
655
+ made based on model prediction.
656
+
657
+ Attributes:
658
+ id (str):
659
+ Optional. The id of the function call this response is for.
660
+ Populated by the client to match the corresponding function
661
+ call ``id``.
662
+ name (str):
663
+ Required. The name of the function to call.
664
+ Must be a-z, A-Z, 0-9, or contain underscores
665
+ and dashes, with a maximum length of 63.
666
+ response (google.protobuf.struct_pb2.Struct):
667
+ Required. The function response in JSON
668
+ object format.
669
+ """
670
+
671
+ id: str = proto.Field(
672
+ proto.STRING,
673
+ number=3,
674
+ )
675
+ name: str = proto.Field(
676
+ proto.STRING,
677
+ number=1,
678
+ )
679
+ response: struct_pb2.Struct = proto.Field(
680
+ proto.MESSAGE,
681
+ number=2,
682
+ message=struct_pb2.Struct,
683
+ )
684
+
685
+
686
+ class Schema(proto.Message):
687
+ r"""The ``Schema`` object allows the definition of input and output data
688
+ types. These types can be objects, but also primitives and arrays.
689
+ Represents a select subset of an `OpenAPI 3.0 schema
690
+ object <https://spec.openapis.org/oas/v3.0.3#schema>`__.
691
+
692
+
693
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
694
+
695
+ Attributes:
696
+ type_ (google.ai.generativelanguage_v1beta.types.Type):
697
+ Required. Data type.
698
+ format_ (str):
699
+ Optional. The format of the data. This is
700
+ used only for primitive datatypes. Supported
701
+ formats:
702
+
703
+ for NUMBER type: float, double
704
+ for INTEGER type: int32, int64
705
+ for STRING type: enum
706
+ description (str):
707
+ Optional. A brief description of the
708
+ parameter. This could contain examples of use.
709
+ Parameter description may be formatted as
710
+ Markdown.
711
+ nullable (bool):
712
+ Optional. Indicates if the value may be null.
713
+ enum (MutableSequence[str]):
714
+ Optional. Possible values of the element of Type.STRING with
715
+ enum format. For example we can define an Enum Direction as
716
+ : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH",
717
+ "WEST"]}
718
+ items (google.ai.generativelanguage_v1beta.types.Schema):
719
+ Optional. Schema of the elements of
720
+ Type.ARRAY.
721
+
722
+ This field is a member of `oneof`_ ``_items``.
723
+ max_items (int):
724
+ Optional. Maximum number of the elements for
725
+ Type.ARRAY.
726
+ min_items (int):
727
+ Optional. Minimum number of the elements for
728
+ Type.ARRAY.
729
+ properties (MutableMapping[str, google.ai.generativelanguage_v1beta.types.Schema]):
730
+ Optional. Properties of Type.OBJECT.
731
+ required (MutableSequence[str]):
732
+ Optional. Required properties of Type.OBJECT.
733
+ """
734
+
735
+ type_: "Type" = proto.Field(
736
+ proto.ENUM,
737
+ number=1,
738
+ enum="Type",
739
+ )
740
+ format_: str = proto.Field(
741
+ proto.STRING,
742
+ number=2,
743
+ )
744
+ description: str = proto.Field(
745
+ proto.STRING,
746
+ number=3,
747
+ )
748
+ nullable: bool = proto.Field(
749
+ proto.BOOL,
750
+ number=4,
751
+ )
752
+ enum: MutableSequence[str] = proto.RepeatedField(
753
+ proto.STRING,
754
+ number=5,
755
+ )
756
+ items: "Schema" = proto.Field(
757
+ proto.MESSAGE,
758
+ number=6,
759
+ optional=True,
760
+ message="Schema",
761
+ )
762
+ max_items: int = proto.Field(
763
+ proto.INT64,
764
+ number=21,
765
+ )
766
+ min_items: int = proto.Field(
767
+ proto.INT64,
768
+ number=22,
769
+ )
770
+ properties: MutableMapping[str, "Schema"] = proto.MapField(
771
+ proto.STRING,
772
+ proto.MESSAGE,
773
+ number=7,
774
+ message="Schema",
775
+ )
776
+ required: MutableSequence[str] = proto.RepeatedField(
777
+ proto.STRING,
778
+ number=8,
779
+ )
780
+
781
+
782
+ class GroundingPassage(proto.Message):
783
+ r"""Passage included inline with a grounding configuration.
784
+
785
+ Attributes:
786
+ id (str):
787
+ Identifier for the passage for attributing
788
+ this passage in grounded answers.
789
+ content (google.ai.generativelanguage_v1beta.types.Content):
790
+ Content of the passage.
791
+ """
792
+
793
+ id: str = proto.Field(
794
+ proto.STRING,
795
+ number=1,
796
+ )
797
+ content: "Content" = proto.Field(
798
+ proto.MESSAGE,
799
+ number=2,
800
+ message="Content",
801
+ )
802
+
803
+
804
+ class GroundingPassages(proto.Message):
805
+ r"""A repeated list of passages.
806
+
807
+ Attributes:
808
+ passages (MutableSequence[google.ai.generativelanguage_v1beta.types.GroundingPassage]):
809
+ List of passages.
810
+ """
811
+
812
+ passages: MutableSequence["GroundingPassage"] = proto.RepeatedField(
813
+ proto.MESSAGE,
814
+ number=1,
815
+ message="GroundingPassage",
816
+ )
817
+
818
+
819
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/discuss_service.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1beta.types import citation, safety
23
+
24
+ __protobuf__ = proto.module(
25
+ package="google.ai.generativelanguage.v1beta",
26
+ manifest={
27
+ "GenerateMessageRequest",
28
+ "GenerateMessageResponse",
29
+ "Message",
30
+ "MessagePrompt",
31
+ "Example",
32
+ "CountMessageTokensRequest",
33
+ "CountMessageTokensResponse",
34
+ },
35
+ )
36
+
37
+
38
+ class GenerateMessageRequest(proto.Message):
39
+ r"""Request to generate a message response from the model.
40
+
41
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
42
+
43
+ Attributes:
44
+ model (str):
45
+ Required. The name of the model to use.
46
+
47
+ Format: ``name=models/{model}``.
48
+ prompt (google.ai.generativelanguage_v1beta.types.MessagePrompt):
49
+ Required. The structured textual input given
50
+ to the model as a prompt.
51
+ Given a
52
+ prompt, the model will return what it predicts
53
+ is the next message in the discussion.
54
+ temperature (float):
55
+ Optional. Controls the randomness of the output.
56
+
57
+ Values can range over ``[0.0,1.0]``, inclusive. A value
58
+ closer to ``1.0`` will produce responses that are more
59
+ varied, while a value closer to ``0.0`` will typically
60
+ result in less surprising responses from the model.
61
+
62
+ This field is a member of `oneof`_ ``_temperature``.
63
+ candidate_count (int):
64
+ Optional. The number of generated response messages to
65
+ return.
66
+
67
+ This value must be between ``[1, 8]``, inclusive. If unset,
68
+ this will default to ``1``.
69
+
70
+ This field is a member of `oneof`_ ``_candidate_count``.
71
+ top_p (float):
72
+ Optional. The maximum cumulative probability of tokens to
73
+ consider when sampling.
74
+
75
+ The model uses combined Top-k and nucleus sampling.
76
+
77
+ Nucleus sampling considers the smallest set of tokens whose
78
+ probability sum is at least ``top_p``.
79
+
80
+ This field is a member of `oneof`_ ``_top_p``.
81
+ top_k (int):
82
+ Optional. The maximum number of tokens to consider when
83
+ sampling.
84
+
85
+ The model uses combined Top-k and nucleus sampling.
86
+
87
+ Top-k sampling considers the set of ``top_k`` most probable
88
+ tokens.
89
+
90
+ This field is a member of `oneof`_ ``_top_k``.
91
+ """
92
+
93
+ model: str = proto.Field(
94
+ proto.STRING,
95
+ number=1,
96
+ )
97
+ prompt: "MessagePrompt" = proto.Field(
98
+ proto.MESSAGE,
99
+ number=2,
100
+ message="MessagePrompt",
101
+ )
102
+ temperature: float = proto.Field(
103
+ proto.FLOAT,
104
+ number=3,
105
+ optional=True,
106
+ )
107
+ candidate_count: int = proto.Field(
108
+ proto.INT32,
109
+ number=4,
110
+ optional=True,
111
+ )
112
+ top_p: float = proto.Field(
113
+ proto.FLOAT,
114
+ number=5,
115
+ optional=True,
116
+ )
117
+ top_k: int = proto.Field(
118
+ proto.INT32,
119
+ number=6,
120
+ optional=True,
121
+ )
122
+
123
+
124
+ class GenerateMessageResponse(proto.Message):
125
+ r"""The response from the model.
126
+
127
+ This includes candidate messages and
128
+ conversation history in the form of chronologically-ordered
129
+ messages.
130
+
131
+ Attributes:
132
+ candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.Message]):
133
+ Candidate response messages from the model.
134
+ messages (MutableSequence[google.ai.generativelanguage_v1beta.types.Message]):
135
+ The conversation history used by the model.
136
+ filters (MutableSequence[google.ai.generativelanguage_v1beta.types.ContentFilter]):
137
+ A set of content filtering metadata for the prompt and
138
+ response text.
139
+
140
+ This indicates which ``SafetyCategory``\ (s) blocked a
141
+ candidate from this response, the lowest ``HarmProbability``
142
+ that triggered a block, and the HarmThreshold setting for
143
+ that category.
144
+ """
145
+
146
+ candidates: MutableSequence["Message"] = proto.RepeatedField(
147
+ proto.MESSAGE,
148
+ number=1,
149
+ message="Message",
150
+ )
151
+ messages: MutableSequence["Message"] = proto.RepeatedField(
152
+ proto.MESSAGE,
153
+ number=2,
154
+ message="Message",
155
+ )
156
+ filters: MutableSequence[safety.ContentFilter] = proto.RepeatedField(
157
+ proto.MESSAGE,
158
+ number=3,
159
+ message=safety.ContentFilter,
160
+ )
161
+
162
+
163
+ class Message(proto.Message):
164
+ r"""The base unit of structured text.
165
+
166
+ A ``Message`` includes an ``author`` and the ``content`` of the
167
+ ``Message``.
168
+
169
+ The ``author`` is used to tag messages when they are fed to the
170
+ model as text.
171
+
172
+
173
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
174
+
175
+ Attributes:
176
+ author (str):
177
+ Optional. The author of this Message.
178
+
179
+ This serves as a key for tagging
180
+ the content of this Message when it is fed to
181
+ the model as text.
182
+
183
+ The author can be any alphanumeric string.
184
+ content (str):
185
+ Required. The text content of the structured ``Message``.
186
+ citation_metadata (google.ai.generativelanguage_v1beta.types.CitationMetadata):
187
+ Output only. Citation information for model-generated
188
+ ``content`` in this ``Message``.
189
+
190
+ If this ``Message`` was generated as output from the model,
191
+ this field may be populated with attribution information for
192
+ any text included in the ``content``. This field is used
193
+ only on output.
194
+
195
+ This field is a member of `oneof`_ ``_citation_metadata``.
196
+ """
197
+
198
+ author: str = proto.Field(
199
+ proto.STRING,
200
+ number=1,
201
+ )
202
+ content: str = proto.Field(
203
+ proto.STRING,
204
+ number=2,
205
+ )
206
+ citation_metadata: citation.CitationMetadata = proto.Field(
207
+ proto.MESSAGE,
208
+ number=3,
209
+ optional=True,
210
+ message=citation.CitationMetadata,
211
+ )
212
+
213
+
214
+ class MessagePrompt(proto.Message):
215
+ r"""All of the structured input text passed to the model as a prompt.
216
+
217
+ A ``MessagePrompt`` contains a structured set of fields that provide
218
+ context for the conversation, examples of user input/model output
219
+ message pairs that prime the model to respond in different ways, and
220
+ the conversation history or list of messages representing the
221
+ alternating turns of the conversation between the user and the
222
+ model.
223
+
224
+ Attributes:
225
+ context (str):
226
+ Optional. Text that should be provided to the model first to
227
+ ground the response.
228
+
229
+ If not empty, this ``context`` will be given to the model
230
+ first before the ``examples`` and ``messages``. When using a
231
+ ``context`` be sure to provide it with every request to
232
+ maintain continuity.
233
+
234
+ This field can be a description of your prompt to the model
235
+ to help provide context and guide the responses. Examples:
236
+ "Translate the phrase from English to French." or "Given a
237
+ statement, classify the sentiment as happy, sad or neutral."
238
+
239
+ Anything included in this field will take precedence over
240
+ message history if the total input size exceeds the model's
241
+ ``input_token_limit`` and the input request is truncated.
242
+ examples (MutableSequence[google.ai.generativelanguage_v1beta.types.Example]):
243
+ Optional. Examples of what the model should generate.
244
+
245
+ This includes both user input and the response that the
246
+ model should emulate.
247
+
248
+ These ``examples`` are treated identically to conversation
249
+ messages except that they take precedence over the history
250
+ in ``messages``: If the total input size exceeds the model's
251
+ ``input_token_limit`` the input will be truncated. Items
252
+ will be dropped from ``messages`` before ``examples``.
253
+ messages (MutableSequence[google.ai.generativelanguage_v1beta.types.Message]):
254
+ Required. A snapshot of the recent conversation history
255
+ sorted chronologically.
256
+
257
+ Turns alternate between two authors.
258
+
259
+ If the total input size exceeds the model's
260
+ ``input_token_limit`` the input will be truncated: The
261
+ oldest items will be dropped from ``messages``.
262
+ """
263
+
264
+ context: str = proto.Field(
265
+ proto.STRING,
266
+ number=1,
267
+ )
268
+ examples: MutableSequence["Example"] = proto.RepeatedField(
269
+ proto.MESSAGE,
270
+ number=2,
271
+ message="Example",
272
+ )
273
+ messages: MutableSequence["Message"] = proto.RepeatedField(
274
+ proto.MESSAGE,
275
+ number=3,
276
+ message="Message",
277
+ )
278
+
279
+
280
+ class Example(proto.Message):
281
+ r"""An input/output example used to instruct the Model.
282
+
283
+ It demonstrates how the model should respond or format its
284
+ response.
285
+
286
+ Attributes:
287
+ input (google.ai.generativelanguage_v1beta.types.Message):
288
+ Required. An example of an input ``Message`` from the user.
289
+ output (google.ai.generativelanguage_v1beta.types.Message):
290
+ Required. An example of what the model should
291
+ output given the input.
292
+ """
293
+
294
+ input: "Message" = proto.Field(
295
+ proto.MESSAGE,
296
+ number=1,
297
+ message="Message",
298
+ )
299
+ output: "Message" = proto.Field(
300
+ proto.MESSAGE,
301
+ number=2,
302
+ message="Message",
303
+ )
304
+
305
+
306
+ class CountMessageTokensRequest(proto.Message):
307
+ r"""Counts the number of tokens in the ``prompt`` sent to a model.
308
+
309
+ Models may tokenize text differently, so each model may return a
310
+ different ``token_count``.
311
+
312
+ Attributes:
313
+ model (str):
314
+ Required. The model's resource name. This serves as an ID
315
+ for the Model to use.
316
+
317
+ This name should match a model name returned by the
318
+ ``ListModels`` method.
319
+
320
+ Format: ``models/{model}``
321
+ prompt (google.ai.generativelanguage_v1beta.types.MessagePrompt):
322
+ Required. The prompt, whose token count is to
323
+ be returned.
324
+ """
325
+
326
+ model: str = proto.Field(
327
+ proto.STRING,
328
+ number=1,
329
+ )
330
+ prompt: "MessagePrompt" = proto.Field(
331
+ proto.MESSAGE,
332
+ number=2,
333
+ message="MessagePrompt",
334
+ )
335
+
336
+
337
+ class CountMessageTokensResponse(proto.Message):
338
+ r"""A response from ``CountMessageTokens``.
339
+
340
+ It returns the model's ``token_count`` for the ``prompt``.
341
+
342
+ Attributes:
343
+ token_count (int):
344
+ The number of tokens that the ``model`` tokenizes the
345
+ ``prompt`` into.
346
+
347
+ Always non-negative.
348
+ """
349
+
350
+ token_count: int = proto.Field(
351
+ proto.INT32,
352
+ number=1,
353
+ )
354
+
355
+
356
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/file.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import duration_pb2 # type: ignore
21
+ from google.protobuf import timestamp_pb2 # type: ignore
22
+ from google.rpc import status_pb2 # type: ignore
23
+ import proto # type: ignore
24
+
25
+ __protobuf__ = proto.module(
26
+ package="google.ai.generativelanguage.v1beta",
27
+ manifest={
28
+ "File",
29
+ "VideoMetadata",
30
+ },
31
+ )
32
+
33
+
34
+ class File(proto.Message):
35
+ r"""A file uploaded to the API.
36
+ Next ID: 15
37
+
38
+
39
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
40
+
41
+ Attributes:
42
+ video_metadata (google.ai.generativelanguage_v1beta.types.VideoMetadata):
43
+ Output only. Metadata for a video.
44
+
45
+ This field is a member of `oneof`_ ``metadata``.
46
+ name (str):
47
+ Immutable. Identifier. The ``File`` resource name. The ID
48
+ (name excluding the "files/" prefix) can contain up to 40
49
+ characters that are lowercase alphanumeric or dashes (-).
50
+ The ID cannot start or end with a dash. If the name is empty
51
+ on create, a unique name will be generated. Example:
52
+ ``files/123-456``
53
+ display_name (str):
54
+ Optional. The human-readable display name for the ``File``.
55
+ The display name must be no more than 512 characters in
56
+ length, including spaces. Example: "Welcome Image".
57
+ mime_type (str):
58
+ Output only. MIME type of the file.
59
+ size_bytes (int):
60
+ Output only. Size of the file in bytes.
61
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
62
+ Output only. The timestamp of when the ``File`` was created.
63
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
64
+ Output only. The timestamp of when the ``File`` was last
65
+ updated.
66
+ expiration_time (google.protobuf.timestamp_pb2.Timestamp):
67
+ Output only. The timestamp of when the ``File`` will be
68
+ deleted. Only set if the ``File`` is scheduled to expire.
69
+ sha256_hash (bytes):
70
+ Output only. SHA-256 hash of the uploaded
71
+ bytes.
72
+ uri (str):
73
+ Output only. The uri of the ``File``.
74
+ state (google.ai.generativelanguage_v1beta.types.File.State):
75
+ Output only. Processing state of the File.
76
+ error (google.rpc.status_pb2.Status):
77
+ Output only. Error status if File processing
78
+ failed.
79
+ """
80
+
81
+ class State(proto.Enum):
82
+ r"""States for the lifecycle of a File.
83
+
84
+ Values:
85
+ STATE_UNSPECIFIED (0):
86
+ The default value. This value is used if the
87
+ state is omitted.
88
+ PROCESSING (1):
89
+ File is being processed and cannot be used
90
+ for inference yet.
91
+ ACTIVE (2):
92
+ File is processed and available for
93
+ inference.
94
+ FAILED (10):
95
+ File failed processing.
96
+ """
97
+ STATE_UNSPECIFIED = 0
98
+ PROCESSING = 1
99
+ ACTIVE = 2
100
+ FAILED = 10
101
+
102
+ video_metadata: "VideoMetadata" = proto.Field(
103
+ proto.MESSAGE,
104
+ number=12,
105
+ oneof="metadata",
106
+ message="VideoMetadata",
107
+ )
108
+ name: str = proto.Field(
109
+ proto.STRING,
110
+ number=1,
111
+ )
112
+ display_name: str = proto.Field(
113
+ proto.STRING,
114
+ number=2,
115
+ )
116
+ mime_type: str = proto.Field(
117
+ proto.STRING,
118
+ number=3,
119
+ )
120
+ size_bytes: int = proto.Field(
121
+ proto.INT64,
122
+ number=4,
123
+ )
124
+ create_time: timestamp_pb2.Timestamp = proto.Field(
125
+ proto.MESSAGE,
126
+ number=5,
127
+ message=timestamp_pb2.Timestamp,
128
+ )
129
+ update_time: timestamp_pb2.Timestamp = proto.Field(
130
+ proto.MESSAGE,
131
+ number=6,
132
+ message=timestamp_pb2.Timestamp,
133
+ )
134
+ expiration_time: timestamp_pb2.Timestamp = proto.Field(
135
+ proto.MESSAGE,
136
+ number=7,
137
+ message=timestamp_pb2.Timestamp,
138
+ )
139
+ sha256_hash: bytes = proto.Field(
140
+ proto.BYTES,
141
+ number=8,
142
+ )
143
+ uri: str = proto.Field(
144
+ proto.STRING,
145
+ number=9,
146
+ )
147
+ state: State = proto.Field(
148
+ proto.ENUM,
149
+ number=10,
150
+ enum=State,
151
+ )
152
+ error: status_pb2.Status = proto.Field(
153
+ proto.MESSAGE,
154
+ number=11,
155
+ message=status_pb2.Status,
156
+ )
157
+
158
+
159
+ class VideoMetadata(proto.Message):
160
+ r"""Metadata for a video ``File``.
161
+
162
+ Attributes:
163
+ video_duration (google.protobuf.duration_pb2.Duration):
164
+ Duration of the video.
165
+ """
166
+
167
+ video_duration: duration_pb2.Duration = proto.Field(
168
+ proto.MESSAGE,
169
+ number=1,
170
+ message=duration_pb2.Duration,
171
+ )
172
+
173
+
174
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/file_service.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1beta.types import file as gag_file
23
+
24
+ __protobuf__ = proto.module(
25
+ package="google.ai.generativelanguage.v1beta",
26
+ manifest={
27
+ "CreateFileRequest",
28
+ "CreateFileResponse",
29
+ "ListFilesRequest",
30
+ "ListFilesResponse",
31
+ "GetFileRequest",
32
+ "DeleteFileRequest",
33
+ },
34
+ )
35
+
36
+
37
+ class CreateFileRequest(proto.Message):
38
+ r"""Request for ``CreateFile``.
39
+
40
+ Attributes:
41
+ file (google.ai.generativelanguage_v1beta.types.File):
42
+ Optional. Metadata for the file to create.
43
+ """
44
+
45
+ file: gag_file.File = proto.Field(
46
+ proto.MESSAGE,
47
+ number=1,
48
+ message=gag_file.File,
49
+ )
50
+
51
+
52
+ class CreateFileResponse(proto.Message):
53
+ r"""Response for ``CreateFile``.
54
+
55
+ Attributes:
56
+ file (google.ai.generativelanguage_v1beta.types.File):
57
+ Metadata for the created file.
58
+ """
59
+
60
+ file: gag_file.File = proto.Field(
61
+ proto.MESSAGE,
62
+ number=1,
63
+ message=gag_file.File,
64
+ )
65
+
66
+
67
+ class ListFilesRequest(proto.Message):
68
+ r"""Request for ``ListFiles``.
69
+
70
+ Attributes:
71
+ page_size (int):
72
+ Optional. Maximum number of ``File``\ s to return per page.
73
+ If unspecified, defaults to 10. Maximum ``page_size`` is
74
+ 100.
75
+ page_token (str):
76
+ Optional. A page token from a previous ``ListFiles`` call.
77
+ """
78
+
79
+ page_size: int = proto.Field(
80
+ proto.INT32,
81
+ number=1,
82
+ )
83
+ page_token: str = proto.Field(
84
+ proto.STRING,
85
+ number=3,
86
+ )
87
+
88
+
89
+ class ListFilesResponse(proto.Message):
90
+ r"""Response for ``ListFiles``.
91
+
92
+ Attributes:
93
+ files (MutableSequence[google.ai.generativelanguage_v1beta.types.File]):
94
+ The list of ``File``\ s.
95
+ next_page_token (str):
96
+ A token that can be sent as a ``page_token`` into a
97
+ subsequent ``ListFiles`` call.
98
+ """
99
+
100
+ @property
101
+ def raw_page(self):
102
+ return self
103
+
104
+ files: MutableSequence[gag_file.File] = proto.RepeatedField(
105
+ proto.MESSAGE,
106
+ number=1,
107
+ message=gag_file.File,
108
+ )
109
+ next_page_token: str = proto.Field(
110
+ proto.STRING,
111
+ number=2,
112
+ )
113
+
114
+
115
+ class GetFileRequest(proto.Message):
116
+ r"""Request for ``GetFile``.
117
+
118
+ Attributes:
119
+ name (str):
120
+ Required. The name of the ``File`` to get. Example:
121
+ ``files/abc-123``
122
+ """
123
+
124
+ name: str = proto.Field(
125
+ proto.STRING,
126
+ number=1,
127
+ )
128
+
129
+
130
+ class DeleteFileRequest(proto.Message):
131
+ r"""Request for ``DeleteFile``.
132
+
133
+ Attributes:
134
+ name (str):
135
+ Required. The name of the ``File`` to delete. Example:
136
+ ``files/abc-123``
137
+ """
138
+
139
+ name: str = proto.Field(
140
+ proto.STRING,
141
+ number=1,
142
+ )
143
+
144
+
145
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/generative_service.py ADDED
@@ -0,0 +1,1751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1beta.types import citation
23
+ from google.ai.generativelanguage_v1beta.types import content as gag_content
24
+ from google.ai.generativelanguage_v1beta.types import retriever, safety
25
+
26
+ __protobuf__ = proto.module(
27
+ package="google.ai.generativelanguage.v1beta",
28
+ manifest={
29
+ "TaskType",
30
+ "GenerateContentRequest",
31
+ "PrebuiltVoiceConfig",
32
+ "VoiceConfig",
33
+ "SpeechConfig",
34
+ "GenerationConfig",
35
+ "SemanticRetrieverConfig",
36
+ "GenerateContentResponse",
37
+ "Candidate",
38
+ "LogprobsResult",
39
+ "AttributionSourceId",
40
+ "GroundingAttribution",
41
+ "RetrievalMetadata",
42
+ "GroundingMetadata",
43
+ "SearchEntryPoint",
44
+ "GroundingChunk",
45
+ "Segment",
46
+ "GroundingSupport",
47
+ "GenerateAnswerRequest",
48
+ "GenerateAnswerResponse",
49
+ "EmbedContentRequest",
50
+ "ContentEmbedding",
51
+ "EmbedContentResponse",
52
+ "BatchEmbedContentsRequest",
53
+ "BatchEmbedContentsResponse",
54
+ "CountTokensRequest",
55
+ "CountTokensResponse",
56
+ },
57
+ )
58
+
59
+
60
+ class TaskType(proto.Enum):
61
+ r"""Type of task for which the embedding will be used.
62
+
63
+ Values:
64
+ TASK_TYPE_UNSPECIFIED (0):
65
+ Unset value, which will default to one of the
66
+ other enum values.
67
+ RETRIEVAL_QUERY (1):
68
+ Specifies the given text is a query in a
69
+ search/retrieval setting.
70
+ RETRIEVAL_DOCUMENT (2):
71
+ Specifies the given text is a document from
72
+ the corpus being searched.
73
+ SEMANTIC_SIMILARITY (3):
74
+ Specifies the given text will be used for
75
+ STS.
76
+ CLASSIFICATION (4):
77
+ Specifies that the given text will be
78
+ classified.
79
+ CLUSTERING (5):
80
+ Specifies that the embeddings will be used
81
+ for clustering.
82
+ QUESTION_ANSWERING (6):
83
+ Specifies that the given text will be used
84
+ for question answering.
85
+ FACT_VERIFICATION (7):
86
+ Specifies that the given text will be used
87
+ for fact verification.
88
+ """
89
+ TASK_TYPE_UNSPECIFIED = 0
90
+ RETRIEVAL_QUERY = 1
91
+ RETRIEVAL_DOCUMENT = 2
92
+ SEMANTIC_SIMILARITY = 3
93
+ CLASSIFICATION = 4
94
+ CLUSTERING = 5
95
+ QUESTION_ANSWERING = 6
96
+ FACT_VERIFICATION = 7
97
+
98
+
99
+ class GenerateContentRequest(proto.Message):
100
+ r"""Request to generate a completion from the model.
101
+
102
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
103
+
104
+ Attributes:
105
+ model (str):
106
+ Required. The name of the ``Model`` to use for generating
107
+ the completion.
108
+
109
+ Format: ``models/{model}``.
110
+ system_instruction (google.ai.generativelanguage_v1beta.types.Content):
111
+ Optional. Developer set `system
112
+ instruction(s) <https://ai.google.dev/gemini-api/docs/system-instructions>`__.
113
+ Currently, text only.
114
+
115
+ This field is a member of `oneof`_ ``_system_instruction``.
116
+ contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]):
117
+ Required. The content of the current conversation with the
118
+ model.
119
+
120
+ For single-turn queries, this is a single instance. For
121
+ multi-turn queries like
122
+ `chat <https://ai.google.dev/gemini-api/docs/text-generation#chat>`__,
123
+ this is a repeated field that contains the conversation
124
+ history and the latest request.
125
+ tools (MutableSequence[google.ai.generativelanguage_v1beta.types.Tool]):
126
+ Optional. A list of ``Tools`` the ``Model`` may use to
127
+ generate the next response.
128
+
129
+ A ``Tool`` is a piece of code that enables the system to
130
+ interact with external systems to perform an action, or set
131
+ of actions, outside of knowledge and scope of the ``Model``.
132
+ Supported ``Tool``\ s are ``Function`` and
133
+ ``code_execution``. Refer to the `Function
134
+ calling <https://ai.google.dev/gemini-api/docs/function-calling>`__
135
+ and the `Code
136
+ execution <https://ai.google.dev/gemini-api/docs/code-execution>`__
137
+ guides to learn more.
138
+ tool_config (google.ai.generativelanguage_v1beta.types.ToolConfig):
139
+ Optional. Tool configuration for any ``Tool`` specified in
140
+ the request. Refer to the `Function calling
141
+ guide <https://ai.google.dev/gemini-api/docs/function-calling#function_calling_mode>`__
142
+ for a usage example.
143
+ safety_settings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]):
144
+ Optional. A list of unique ``SafetySetting`` instances for
145
+ blocking unsafe content.
146
+
147
+ This will be enforced on the
148
+ ``GenerateContentRequest.contents`` and
149
+ ``GenerateContentResponse.candidates``. There should not be
150
+ more than one setting for each ``SafetyCategory`` type. The
151
+ API will block any contents and responses that fail to meet
152
+ the thresholds set by these settings. This list overrides
153
+ the default settings for each ``SafetyCategory`` specified
154
+ in the safety_settings. If there is no ``SafetySetting`` for
155
+ a given ``SafetyCategory`` provided in the list, the API
156
+ will use the default safety setting for that category. Harm
157
+ categories HARM_CATEGORY_HATE_SPEECH,
158
+ HARM_CATEGORY_SEXUALLY_EXPLICIT,
159
+ HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT,
160
+ HARM_CATEGORY_CIVIC_INTEGRITY are supported. Refer to the
161
+ `guide <https://ai.google.dev/gemini-api/docs/safety-settings>`__
162
+ for detailed information on available safety settings. Also
163
+ refer to the `Safety
164
+ guidance <https://ai.google.dev/gemini-api/docs/safety-guidance>`__
165
+ to learn how to incorporate safety considerations in your AI
166
+ applications.
167
+ generation_config (google.ai.generativelanguage_v1beta.types.GenerationConfig):
168
+ Optional. Configuration options for model
169
+ generation and outputs.
170
+
171
+ This field is a member of `oneof`_ ``_generation_config``.
172
+ cached_content (str):
173
+ Optional. The name of the content
174
+ `cached <https://ai.google.dev/gemini-api/docs/caching>`__
175
+ to use as context to serve the prediction. Format:
176
+ ``cachedContents/{cachedContent}``
177
+
178
+ This field is a member of `oneof`_ ``_cached_content``.
179
+ """
180
+
181
+ model: str = proto.Field(
182
+ proto.STRING,
183
+ number=1,
184
+ )
185
+ system_instruction: gag_content.Content = proto.Field(
186
+ proto.MESSAGE,
187
+ number=8,
188
+ optional=True,
189
+ message=gag_content.Content,
190
+ )
191
+ contents: MutableSequence[gag_content.Content] = proto.RepeatedField(
192
+ proto.MESSAGE,
193
+ number=2,
194
+ message=gag_content.Content,
195
+ )
196
+ tools: MutableSequence[gag_content.Tool] = proto.RepeatedField(
197
+ proto.MESSAGE,
198
+ number=5,
199
+ message=gag_content.Tool,
200
+ )
201
+ tool_config: gag_content.ToolConfig = proto.Field(
202
+ proto.MESSAGE,
203
+ number=7,
204
+ message=gag_content.ToolConfig,
205
+ )
206
+ safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField(
207
+ proto.MESSAGE,
208
+ number=3,
209
+ message=safety.SafetySetting,
210
+ )
211
+ generation_config: "GenerationConfig" = proto.Field(
212
+ proto.MESSAGE,
213
+ number=4,
214
+ optional=True,
215
+ message="GenerationConfig",
216
+ )
217
+ cached_content: str = proto.Field(
218
+ proto.STRING,
219
+ number=9,
220
+ optional=True,
221
+ )
222
+
223
+
224
+ class PrebuiltVoiceConfig(proto.Message):
225
+ r"""The configuration for the prebuilt speaker to use.
226
+
227
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
228
+
229
+ Attributes:
230
+ voice_name (str):
231
+ The name of the preset voice to use.
232
+
233
+ This field is a member of `oneof`_ ``_voice_name``.
234
+ """
235
+
236
+ voice_name: str = proto.Field(
237
+ proto.STRING,
238
+ number=1,
239
+ optional=True,
240
+ )
241
+
242
+
243
+ class VoiceConfig(proto.Message):
244
+ r"""The configuration for the voice to use.
245
+
246
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
247
+
248
+ Attributes:
249
+ prebuilt_voice_config (google.ai.generativelanguage_v1beta.types.PrebuiltVoiceConfig):
250
+ The configuration for the prebuilt voice to
251
+ use.
252
+
253
+ This field is a member of `oneof`_ ``voice_config``.
254
+ """
255
+
256
+ prebuilt_voice_config: "PrebuiltVoiceConfig" = proto.Field(
257
+ proto.MESSAGE,
258
+ number=1,
259
+ oneof="voice_config",
260
+ message="PrebuiltVoiceConfig",
261
+ )
262
+
263
+
264
+ class SpeechConfig(proto.Message):
265
+ r"""The speech generation config.
266
+
267
+ Attributes:
268
+ voice_config (google.ai.generativelanguage_v1beta.types.VoiceConfig):
269
+ The configuration for the speaker to use.
270
+ """
271
+
272
+ voice_config: "VoiceConfig" = proto.Field(
273
+ proto.MESSAGE,
274
+ number=1,
275
+ message="VoiceConfig",
276
+ )
277
+
278
+
279
+ class GenerationConfig(proto.Message):
280
+ r"""Configuration options for model generation and outputs. Not
281
+ all parameters are configurable for every model.
282
+
283
+
284
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
285
+
286
+ Attributes:
287
+ candidate_count (int):
288
+ Optional. Number of generated responses to
289
+ return.
290
+ Currently, this value can only be set to 1. If
291
+ unset, this will default to 1.
292
+
293
+ This field is a member of `oneof`_ ``_candidate_count``.
294
+ stop_sequences (MutableSequence[str]):
295
+ Optional. The set of character sequences (up to 5) that will
296
+ stop output generation. If specified, the API will stop at
297
+ the first appearance of a ``stop_sequence``. The stop
298
+ sequence will not be included as part of the response.
299
+ max_output_tokens (int):
300
+ Optional. The maximum number of tokens to include in a
301
+ response candidate.
302
+
303
+ Note: The default value varies by model, see the
304
+ ``Model.output_token_limit`` attribute of the ``Model``
305
+ returned from the ``getModel`` function.
306
+
307
+ This field is a member of `oneof`_ ``_max_output_tokens``.
308
+ temperature (float):
309
+ Optional. Controls the randomness of the output.
310
+
311
+ Note: The default value varies by model, see the
312
+ ``Model.temperature`` attribute of the ``Model`` returned
313
+ from the ``getModel`` function.
314
+
315
+ Values can range from [0.0, 2.0].
316
+
317
+ This field is a member of `oneof`_ ``_temperature``.
318
+ top_p (float):
319
+ Optional. The maximum cumulative probability of tokens to
320
+ consider when sampling.
321
+
322
+ The model uses combined Top-k and Top-p (nucleus) sampling.
323
+
324
+ Tokens are sorted based on their assigned probabilities so
325
+ that only the most likely tokens are considered. Top-k
326
+ sampling directly limits the maximum number of tokens to
327
+ consider, while Nucleus sampling limits the number of tokens
328
+ based on the cumulative probability.
329
+
330
+ Note: The default value varies by ``Model`` and is specified
331
+ by the\ ``Model.top_p`` attribute returned from the
332
+ ``getModel`` function. An empty ``top_k`` attribute
333
+ indicates that the model doesn't apply top-k sampling and
334
+ doesn't allow setting ``top_k`` on requests.
335
+
336
+ This field is a member of `oneof`_ ``_top_p``.
337
+ top_k (int):
338
+ Optional. The maximum number of tokens to consider when
339
+ sampling.
340
+
341
+ Gemini models use Top-p (nucleus) sampling or a combination
342
+ of Top-k and nucleus sampling. Top-k sampling considers the
343
+ set of ``top_k`` most probable tokens. Models running with
344
+ nucleus sampling don't allow top_k setting.
345
+
346
+ Note: The default value varies by ``Model`` and is specified
347
+ by the\ ``Model.top_p`` attribute returned from the
348
+ ``getModel`` function. An empty ``top_k`` attribute
349
+ indicates that the model doesn't apply top-k sampling and
350
+ doesn't allow setting ``top_k`` on requests.
351
+
352
+ This field is a member of `oneof`_ ``_top_k``.
353
+ response_mime_type (str):
354
+ Optional. MIME type of the generated candidate text.
355
+ Supported MIME types are: ``text/plain``: (default) Text
356
+ output. ``application/json``: JSON response in the response
357
+ candidates. ``text/x.enum``: ENUM as a string response in
358
+ the response candidates. Refer to the
359
+ `docs <https://ai.google.dev/gemini-api/docs/prompting_with_media#plain_text_formats>`__
360
+ for a list of all supported text MIME types.
361
+ response_schema (google.ai.generativelanguage_v1beta.types.Schema):
362
+ Optional. Output schema of the generated candidate text.
363
+ Schemas must be a subset of the `OpenAPI
364
+ schema <https://spec.openapis.org/oas/v3.0.3#schema>`__ and
365
+ can be objects, primitives or arrays.
366
+
367
+ If set, a compatible ``response_mime_type`` must also be
368
+ set. Compatible MIME types: ``application/json``: Schema for
369
+ JSON response. Refer to the `JSON text generation
370
+ guide <https://ai.google.dev/gemini-api/docs/json-mode>`__
371
+ for more details.
372
+ presence_penalty (float):
373
+ Optional. Presence penalty applied to the next token's
374
+ logprobs if the token has already been seen in the response.
375
+
376
+ This penalty is binary on/off and not dependant on the
377
+ number of times the token is used (after the first). Use
378
+ [frequency_penalty][google.ai.generativelanguage.v1beta.GenerationConfig.frequency_penalty]
379
+ for a penalty that increases with each use.
380
+
381
+ A positive penalty will discourage the use of tokens that
382
+ have already been used in the response, increasing the
383
+ vocabulary.
384
+
385
+ A negative penalty will encourage the use of tokens that
386
+ have already been used in the response, decreasing the
387
+ vocabulary.
388
+
389
+ This field is a member of `oneof`_ ``_presence_penalty``.
390
+ frequency_penalty (float):
391
+ Optional. Frequency penalty applied to the next token's
392
+ logprobs, multiplied by the number of times each token has
393
+ been seen in the respponse so far.
394
+
395
+ A positive penalty will discourage the use of tokens that
396
+ have already been used, proportional to the number of times
397
+ the token has been used: The more a token is used, the more
398
+ dificult it is for the model to use that token again
399
+ increasing the vocabulary of responses.
400
+
401
+ Caution: A *negative* penalty will encourage the model to
402
+ reuse tokens proportional to the number of times the token
403
+ has been used. Small negative values will reduce the
404
+ vocabulary of a response. Larger negative values will cause
405
+ the model to start repeating a common token until it hits
406
+ the
407
+ [max_output_tokens][google.ai.generativelanguage.v1beta.GenerationConfig.max_output_tokens]
408
+ limit.
409
+
410
+ This field is a member of `oneof`_ ``_frequency_penalty``.
411
+ response_logprobs (bool):
412
+ Optional. If true, export the logprobs
413
+ results in response.
414
+
415
+ This field is a member of `oneof`_ ``_response_logprobs``.
416
+ logprobs (int):
417
+ Optional. Only valid if
418
+ [response_logprobs=True][google.ai.generativelanguage.v1beta.GenerationConfig.response_logprobs].
419
+ This sets the number of top logprobs to return at each
420
+ decoding step in the
421
+ [Candidate.logprobs_result][google.ai.generativelanguage.v1beta.Candidate.logprobs_result].
422
+
423
+ This field is a member of `oneof`_ ``_logprobs``.
424
+ enable_enhanced_civic_answers (bool):
425
+ Optional. Enables enhanced civic answers. It
426
+ may not be available for all models.
427
+
428
+ This field is a member of `oneof`_ ``_enable_enhanced_civic_answers``.
429
+ response_modalities (MutableSequence[google.ai.generativelanguage_v1beta.types.GenerationConfig.Modality]):
430
+ Optional. The requested modalities of the
431
+ response. Represents the set of modalities that
432
+ the model can return, and should be expected in
433
+ the response. This is an exact match to the
434
+ modalities of the response.
435
+
436
+ A model may have multiple combinations of
437
+ supported modalities. If the requested
438
+ modalities do not match any of the supported
439
+ combinations, an error will be returned.
440
+
441
+ An empty list is equivalent to requesting only
442
+ text.
443
+ speech_config (google.ai.generativelanguage_v1beta.types.SpeechConfig):
444
+ Optional. The speech generation config.
445
+
446
+ This field is a member of `oneof`_ ``_speech_config``.
447
+ """
448
+
449
+ class Modality(proto.Enum):
450
+ r"""Supported modalities of the response.
451
+
452
+ Values:
453
+ MODALITY_UNSPECIFIED (0):
454
+ Default value.
455
+ TEXT (1):
456
+ Indicates the model should return text.
457
+ IMAGE (2):
458
+ Indicates the model should return images.
459
+ AUDIO (3):
460
+ Indicates the model should return audio.
461
+ """
462
+ MODALITY_UNSPECIFIED = 0
463
+ TEXT = 1
464
+ IMAGE = 2
465
+ AUDIO = 3
466
+
467
+ candidate_count: int = proto.Field(
468
+ proto.INT32,
469
+ number=1,
470
+ optional=True,
471
+ )
472
+ stop_sequences: MutableSequence[str] = proto.RepeatedField(
473
+ proto.STRING,
474
+ number=2,
475
+ )
476
+ max_output_tokens: int = proto.Field(
477
+ proto.INT32,
478
+ number=4,
479
+ optional=True,
480
+ )
481
+ temperature: float = proto.Field(
482
+ proto.FLOAT,
483
+ number=5,
484
+ optional=True,
485
+ )
486
+ top_p: float = proto.Field(
487
+ proto.FLOAT,
488
+ number=6,
489
+ optional=True,
490
+ )
491
+ top_k: int = proto.Field(
492
+ proto.INT32,
493
+ number=7,
494
+ optional=True,
495
+ )
496
+ response_mime_type: str = proto.Field(
497
+ proto.STRING,
498
+ number=13,
499
+ )
500
+ response_schema: gag_content.Schema = proto.Field(
501
+ proto.MESSAGE,
502
+ number=14,
503
+ message=gag_content.Schema,
504
+ )
505
+ presence_penalty: float = proto.Field(
506
+ proto.FLOAT,
507
+ number=15,
508
+ optional=True,
509
+ )
510
+ frequency_penalty: float = proto.Field(
511
+ proto.FLOAT,
512
+ number=16,
513
+ optional=True,
514
+ )
515
+ response_logprobs: bool = proto.Field(
516
+ proto.BOOL,
517
+ number=17,
518
+ optional=True,
519
+ )
520
+ logprobs: int = proto.Field(
521
+ proto.INT32,
522
+ number=18,
523
+ optional=True,
524
+ )
525
+ enable_enhanced_civic_answers: bool = proto.Field(
526
+ proto.BOOL,
527
+ number=19,
528
+ optional=True,
529
+ )
530
+ response_modalities: MutableSequence[Modality] = proto.RepeatedField(
531
+ proto.ENUM,
532
+ number=20,
533
+ enum=Modality,
534
+ )
535
+ speech_config: "SpeechConfig" = proto.Field(
536
+ proto.MESSAGE,
537
+ number=21,
538
+ optional=True,
539
+ message="SpeechConfig",
540
+ )
541
+
542
+
543
+ class SemanticRetrieverConfig(proto.Message):
544
+ r"""Configuration for retrieving grounding content from a ``Corpus`` or
545
+ ``Document`` created using the Semantic Retriever API.
546
+
547
+
548
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
549
+
550
+ Attributes:
551
+ source (str):
552
+ Required. Name of the resource for retrieval. Example:
553
+ ``corpora/123`` or ``corpora/123/documents/abc``.
554
+ query (google.ai.generativelanguage_v1beta.types.Content):
555
+ Required. Query to use for matching ``Chunk``\ s in the
556
+ given resource by similarity.
557
+ metadata_filters (MutableSequence[google.ai.generativelanguage_v1beta.types.MetadataFilter]):
558
+ Optional. Filters for selecting ``Document``\ s and/or
559
+ ``Chunk``\ s from the resource.
560
+ max_chunks_count (int):
561
+ Optional. Maximum number of relevant ``Chunk``\ s to
562
+ retrieve.
563
+
564
+ This field is a member of `oneof`_ ``_max_chunks_count``.
565
+ minimum_relevance_score (float):
566
+ Optional. Minimum relevance score for retrieved relevant
567
+ ``Chunk``\ s.
568
+
569
+ This field is a member of `oneof`_ ``_minimum_relevance_score``.
570
+ """
571
+
572
+ source: str = proto.Field(
573
+ proto.STRING,
574
+ number=1,
575
+ )
576
+ query: gag_content.Content = proto.Field(
577
+ proto.MESSAGE,
578
+ number=2,
579
+ message=gag_content.Content,
580
+ )
581
+ metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField(
582
+ proto.MESSAGE,
583
+ number=3,
584
+ message=retriever.MetadataFilter,
585
+ )
586
+ max_chunks_count: int = proto.Field(
587
+ proto.INT32,
588
+ number=4,
589
+ optional=True,
590
+ )
591
+ minimum_relevance_score: float = proto.Field(
592
+ proto.FLOAT,
593
+ number=5,
594
+ optional=True,
595
+ )
596
+
597
+
598
+ class GenerateContentResponse(proto.Message):
599
+ r"""Response from the model supporting multiple candidate responses.
600
+
601
+ Safety ratings and content filtering are reported for both prompt in
602
+ ``GenerateContentResponse.prompt_feedback`` and for each candidate
603
+ in ``finish_reason`` and in ``safety_ratings``. The API:
604
+
605
+ - Returns either all requested candidates or none of them
606
+ - Returns no candidates at all only if there was something wrong
607
+ with the prompt (check ``prompt_feedback``)
608
+ - Reports feedback on each candidate in ``finish_reason`` and
609
+ ``safety_ratings``.
610
+
611
+ Attributes:
612
+ candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.Candidate]):
613
+ Candidate responses from the model.
614
+ prompt_feedback (google.ai.generativelanguage_v1beta.types.GenerateContentResponse.PromptFeedback):
615
+ Returns the prompt's feedback related to the
616
+ content filters.
617
+ usage_metadata (google.ai.generativelanguage_v1beta.types.GenerateContentResponse.UsageMetadata):
618
+ Output only. Metadata on the generation
619
+ requests' token usage.
620
+ model_version (str):
621
+ Output only. The model version used to
622
+ generate the response.
623
+ """
624
+
625
+ class PromptFeedback(proto.Message):
626
+ r"""A set of the feedback metadata the prompt specified in
627
+ ``GenerateContentRequest.content``.
628
+
629
+ Attributes:
630
+ block_reason (google.ai.generativelanguage_v1beta.types.GenerateContentResponse.PromptFeedback.BlockReason):
631
+ Optional. If set, the prompt was blocked and
632
+ no candidates are returned. Rephrase the prompt.
633
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyRating]):
634
+ Ratings for safety of the prompt.
635
+ There is at most one rating per category.
636
+ """
637
+
638
+ class BlockReason(proto.Enum):
639
+ r"""Specifies the reason why the prompt was blocked.
640
+
641
+ Values:
642
+ BLOCK_REASON_UNSPECIFIED (0):
643
+ Default value. This value is unused.
644
+ SAFETY (1):
645
+ Prompt was blocked due to safety reasons. Inspect
646
+ ``safety_ratings`` to understand which safety category
647
+ blocked it.
648
+ OTHER (2):
649
+ Prompt was blocked due to unknown reasons.
650
+ BLOCKLIST (3):
651
+ Prompt was blocked due to the terms which are
652
+ included from the terminology blocklist.
653
+ PROHIBITED_CONTENT (4):
654
+ Prompt was blocked due to prohibited content.
655
+ IMAGE_SAFETY (5):
656
+ Candidates blocked due to unsafe image
657
+ generation content.
658
+ """
659
+ BLOCK_REASON_UNSPECIFIED = 0
660
+ SAFETY = 1
661
+ OTHER = 2
662
+ BLOCKLIST = 3
663
+ PROHIBITED_CONTENT = 4
664
+ IMAGE_SAFETY = 5
665
+
666
+ block_reason: "GenerateContentResponse.PromptFeedback.BlockReason" = (
667
+ proto.Field(
668
+ proto.ENUM,
669
+ number=1,
670
+ enum="GenerateContentResponse.PromptFeedback.BlockReason",
671
+ )
672
+ )
673
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
674
+ proto.MESSAGE,
675
+ number=2,
676
+ message=safety.SafetyRating,
677
+ )
678
+
679
+ class UsageMetadata(proto.Message):
680
+ r"""Metadata on the generation request's token usage.
681
+
682
+ Attributes:
683
+ prompt_token_count (int):
684
+ Number of tokens in the prompt. When ``cached_content`` is
685
+ set, this is still the total effective prompt size meaning
686
+ this includes the number of tokens in the cached content.
687
+ cached_content_token_count (int):
688
+ Number of tokens in the cached part of the
689
+ prompt (the cached content)
690
+ candidates_token_count (int):
691
+ Total number of tokens across all the
692
+ generated response candidates.
693
+ total_token_count (int):
694
+ Total token count for the generation request
695
+ (prompt + response candidates).
696
+ """
697
+
698
+ prompt_token_count: int = proto.Field(
699
+ proto.INT32,
700
+ number=1,
701
+ )
702
+ cached_content_token_count: int = proto.Field(
703
+ proto.INT32,
704
+ number=4,
705
+ )
706
+ candidates_token_count: int = proto.Field(
707
+ proto.INT32,
708
+ number=2,
709
+ )
710
+ total_token_count: int = proto.Field(
711
+ proto.INT32,
712
+ number=3,
713
+ )
714
+
715
+ candidates: MutableSequence["Candidate"] = proto.RepeatedField(
716
+ proto.MESSAGE,
717
+ number=1,
718
+ message="Candidate",
719
+ )
720
+ prompt_feedback: PromptFeedback = proto.Field(
721
+ proto.MESSAGE,
722
+ number=2,
723
+ message=PromptFeedback,
724
+ )
725
+ usage_metadata: UsageMetadata = proto.Field(
726
+ proto.MESSAGE,
727
+ number=3,
728
+ message=UsageMetadata,
729
+ )
730
+ model_version: str = proto.Field(
731
+ proto.STRING,
732
+ number=4,
733
+ )
734
+
735
+
736
+ class Candidate(proto.Message):
737
+ r"""A response candidate generated from the model.
738
+
739
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
740
+
741
+ Attributes:
742
+ index (int):
743
+ Output only. Index of the candidate in the
744
+ list of response candidates.
745
+
746
+ This field is a member of `oneof`_ ``_index``.
747
+ content (google.ai.generativelanguage_v1beta.types.Content):
748
+ Output only. Generated content returned from
749
+ the model.
750
+ finish_reason (google.ai.generativelanguage_v1beta.types.Candidate.FinishReason):
751
+ Optional. Output only. The reason why the
752
+ model stopped generating tokens.
753
+ If empty, the model has not stopped generating
754
+ tokens.
755
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyRating]):
756
+ List of ratings for the safety of a response
757
+ candidate.
758
+ There is at most one rating per category.
759
+ citation_metadata (google.ai.generativelanguage_v1beta.types.CitationMetadata):
760
+ Output only. Citation information for model-generated
761
+ candidate.
762
+
763
+ This field may be populated with recitation information for
764
+ any text included in the ``content``. These are passages
765
+ that are "recited" from copyrighted material in the
766
+ foundational LLM's training data.
767
+ token_count (int):
768
+ Output only. Token count for this candidate.
769
+ grounding_attributions (MutableSequence[google.ai.generativelanguage_v1beta.types.GroundingAttribution]):
770
+ Output only. Attribution information for sources that
771
+ contributed to a grounded answer.
772
+
773
+ This field is populated for ``GenerateAnswer`` calls.
774
+ grounding_metadata (google.ai.generativelanguage_v1beta.types.GroundingMetadata):
775
+ Output only. Grounding metadata for the candidate.
776
+
777
+ This field is populated for ``GenerateContent`` calls.
778
+ avg_logprobs (float):
779
+ Output only. Average log probability score of
780
+ the candidate.
781
+ logprobs_result (google.ai.generativelanguage_v1beta.types.LogprobsResult):
782
+ Output only. Log-likelihood scores for the
783
+ response tokens and top tokens
784
+ """
785
+
786
+ class FinishReason(proto.Enum):
787
+ r"""Defines the reason why the model stopped generating tokens.
788
+
789
+ Values:
790
+ FINISH_REASON_UNSPECIFIED (0):
791
+ Default value. This value is unused.
792
+ STOP (1):
793
+ Natural stop point of the model or provided
794
+ stop sequence.
795
+ MAX_TOKENS (2):
796
+ The maximum number of tokens as specified in
797
+ the request was reached.
798
+ SAFETY (3):
799
+ The response candidate content was flagged
800
+ for safety reasons.
801
+ RECITATION (4):
802
+ The response candidate content was flagged
803
+ for recitation reasons.
804
+ LANGUAGE (6):
805
+ The response candidate content was flagged
806
+ for using an unsupported language.
807
+ OTHER (5):
808
+ Unknown reason.
809
+ BLOCKLIST (7):
810
+ Token generation stopped because the content
811
+ contains forbidden terms.
812
+ PROHIBITED_CONTENT (8):
813
+ Token generation stopped for potentially
814
+ containing prohibited content.
815
+ SPII (9):
816
+ Token generation stopped because the content
817
+ potentially contains Sensitive Personally
818
+ Identifiable Information (SPII).
819
+ MALFORMED_FUNCTION_CALL (10):
820
+ The function call generated by the model is
821
+ invalid.
822
+ IMAGE_SAFETY (11):
823
+ Token generation stopped because generated
824
+ images contain safety violations.
825
+ """
826
+ FINISH_REASON_UNSPECIFIED = 0
827
+ STOP = 1
828
+ MAX_TOKENS = 2
829
+ SAFETY = 3
830
+ RECITATION = 4
831
+ LANGUAGE = 6
832
+ OTHER = 5
833
+ BLOCKLIST = 7
834
+ PROHIBITED_CONTENT = 8
835
+ SPII = 9
836
+ MALFORMED_FUNCTION_CALL = 10
837
+ IMAGE_SAFETY = 11
838
+
839
+ index: int = proto.Field(
840
+ proto.INT32,
841
+ number=3,
842
+ optional=True,
843
+ )
844
+ content: gag_content.Content = proto.Field(
845
+ proto.MESSAGE,
846
+ number=1,
847
+ message=gag_content.Content,
848
+ )
849
+ finish_reason: FinishReason = proto.Field(
850
+ proto.ENUM,
851
+ number=2,
852
+ enum=FinishReason,
853
+ )
854
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
855
+ proto.MESSAGE,
856
+ number=5,
857
+ message=safety.SafetyRating,
858
+ )
859
+ citation_metadata: citation.CitationMetadata = proto.Field(
860
+ proto.MESSAGE,
861
+ number=6,
862
+ message=citation.CitationMetadata,
863
+ )
864
+ token_count: int = proto.Field(
865
+ proto.INT32,
866
+ number=7,
867
+ )
868
+ grounding_attributions: MutableSequence[
869
+ "GroundingAttribution"
870
+ ] = proto.RepeatedField(
871
+ proto.MESSAGE,
872
+ number=8,
873
+ message="GroundingAttribution",
874
+ )
875
+ grounding_metadata: "GroundingMetadata" = proto.Field(
876
+ proto.MESSAGE,
877
+ number=9,
878
+ message="GroundingMetadata",
879
+ )
880
+ avg_logprobs: float = proto.Field(
881
+ proto.DOUBLE,
882
+ number=10,
883
+ )
884
+ logprobs_result: "LogprobsResult" = proto.Field(
885
+ proto.MESSAGE,
886
+ number=11,
887
+ message="LogprobsResult",
888
+ )
889
+
890
+
891
+ class LogprobsResult(proto.Message):
892
+ r"""Logprobs Result
893
+
894
+ Attributes:
895
+ top_candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.LogprobsResult.TopCandidates]):
896
+ Length = total number of decoding steps.
897
+ chosen_candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.LogprobsResult.Candidate]):
898
+ Length = total number of decoding steps. The chosen
899
+ candidates may or may not be in top_candidates.
900
+ """
901
+
902
+ class Candidate(proto.Message):
903
+ r"""Candidate for the logprobs token and score.
904
+
905
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
906
+
907
+ Attributes:
908
+ token (str):
909
+ The candidate’s token string value.
910
+
911
+ This field is a member of `oneof`_ ``_token``.
912
+ token_id (int):
913
+ The candidate’s token id value.
914
+
915
+ This field is a member of `oneof`_ ``_token_id``.
916
+ log_probability (float):
917
+ The candidate's log probability.
918
+
919
+ This field is a member of `oneof`_ ``_log_probability``.
920
+ """
921
+
922
+ token: str = proto.Field(
923
+ proto.STRING,
924
+ number=1,
925
+ optional=True,
926
+ )
927
+ token_id: int = proto.Field(
928
+ proto.INT32,
929
+ number=3,
930
+ optional=True,
931
+ )
932
+ log_probability: float = proto.Field(
933
+ proto.FLOAT,
934
+ number=2,
935
+ optional=True,
936
+ )
937
+
938
+ class TopCandidates(proto.Message):
939
+ r"""Candidates with top log probabilities at each decoding step.
940
+
941
+ Attributes:
942
+ candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.LogprobsResult.Candidate]):
943
+ Sorted by log probability in descending
944
+ order.
945
+ """
946
+
947
+ candidates: MutableSequence["LogprobsResult.Candidate"] = proto.RepeatedField(
948
+ proto.MESSAGE,
949
+ number=1,
950
+ message="LogprobsResult.Candidate",
951
+ )
952
+
953
+ top_candidates: MutableSequence[TopCandidates] = proto.RepeatedField(
954
+ proto.MESSAGE,
955
+ number=1,
956
+ message=TopCandidates,
957
+ )
958
+ chosen_candidates: MutableSequence[Candidate] = proto.RepeatedField(
959
+ proto.MESSAGE,
960
+ number=2,
961
+ message=Candidate,
962
+ )
963
+
964
+
965
+ class AttributionSourceId(proto.Message):
966
+ r"""Identifier for the source contributing to this attribution.
967
+
968
+ This message has `oneof`_ fields (mutually exclusive fields).
969
+ For each oneof, at most one member field can be set at the same time.
970
+ Setting any member of the oneof automatically clears all other
971
+ members.
972
+
973
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
974
+
975
+ Attributes:
976
+ grounding_passage (google.ai.generativelanguage_v1beta.types.AttributionSourceId.GroundingPassageId):
977
+ Identifier for an inline passage.
978
+
979
+ This field is a member of `oneof`_ ``source``.
980
+ semantic_retriever_chunk (google.ai.generativelanguage_v1beta.types.AttributionSourceId.SemanticRetrieverChunk):
981
+ Identifier for a ``Chunk`` fetched via Semantic Retriever.
982
+
983
+ This field is a member of `oneof`_ ``source``.
984
+ """
985
+
986
+ class GroundingPassageId(proto.Message):
987
+ r"""Identifier for a part within a ``GroundingPassage``.
988
+
989
+ Attributes:
990
+ passage_id (str):
991
+ Output only. ID of the passage matching the
992
+ ``GenerateAnswerRequest``'s ``GroundingPassage.id``.
993
+ part_index (int):
994
+ Output only. Index of the part within the
995
+ ``GenerateAnswerRequest``'s ``GroundingPassage.content``.
996
+ """
997
+
998
+ passage_id: str = proto.Field(
999
+ proto.STRING,
1000
+ number=1,
1001
+ )
1002
+ part_index: int = proto.Field(
1003
+ proto.INT32,
1004
+ number=2,
1005
+ )
1006
+
1007
+ class SemanticRetrieverChunk(proto.Message):
1008
+ r"""Identifier for a ``Chunk`` retrieved via Semantic Retriever
1009
+ specified in the ``GenerateAnswerRequest`` using
1010
+ ``SemanticRetrieverConfig``.
1011
+
1012
+ Attributes:
1013
+ source (str):
1014
+ Output only. Name of the source matching the request's
1015
+ ``SemanticRetrieverConfig.source``. Example: ``corpora/123``
1016
+ or ``corpora/123/documents/abc``
1017
+ chunk (str):
1018
+ Output only. Name of the ``Chunk`` containing the attributed
1019
+ text. Example: ``corpora/123/documents/abc/chunks/xyz``
1020
+ """
1021
+
1022
+ source: str = proto.Field(
1023
+ proto.STRING,
1024
+ number=1,
1025
+ )
1026
+ chunk: str = proto.Field(
1027
+ proto.STRING,
1028
+ number=2,
1029
+ )
1030
+
1031
+ grounding_passage: GroundingPassageId = proto.Field(
1032
+ proto.MESSAGE,
1033
+ number=1,
1034
+ oneof="source",
1035
+ message=GroundingPassageId,
1036
+ )
1037
+ semantic_retriever_chunk: SemanticRetrieverChunk = proto.Field(
1038
+ proto.MESSAGE,
1039
+ number=2,
1040
+ oneof="source",
1041
+ message=SemanticRetrieverChunk,
1042
+ )
1043
+
1044
+
1045
+ class GroundingAttribution(proto.Message):
1046
+ r"""Attribution for a source that contributed to an answer.
1047
+
1048
+ Attributes:
1049
+ source_id (google.ai.generativelanguage_v1beta.types.AttributionSourceId):
1050
+ Output only. Identifier for the source
1051
+ contributing to this attribution.
1052
+ content (google.ai.generativelanguage_v1beta.types.Content):
1053
+ Grounding source content that makes up this
1054
+ attribution.
1055
+ """
1056
+
1057
+ source_id: "AttributionSourceId" = proto.Field(
1058
+ proto.MESSAGE,
1059
+ number=3,
1060
+ message="AttributionSourceId",
1061
+ )
1062
+ content: gag_content.Content = proto.Field(
1063
+ proto.MESSAGE,
1064
+ number=2,
1065
+ message=gag_content.Content,
1066
+ )
1067
+
1068
+
1069
+ class RetrievalMetadata(proto.Message):
1070
+ r"""Metadata related to retrieval in the grounding flow.
1071
+
1072
+ Attributes:
1073
+ google_search_dynamic_retrieval_score (float):
1074
+ Optional. Score indicating how likely information from
1075
+ google search could help answer the prompt. The score is in
1076
+ the range [0, 1], where 0 is the least likely and 1 is the
1077
+ most likely. This score is only populated when google search
1078
+ grounding and dynamic retrieval is enabled. It will be
1079
+ compared to the threshold to determine whether to trigger
1080
+ google search.
1081
+ """
1082
+
1083
+ google_search_dynamic_retrieval_score: float = proto.Field(
1084
+ proto.FLOAT,
1085
+ number=2,
1086
+ )
1087
+
1088
+
1089
+ class GroundingMetadata(proto.Message):
1090
+ r"""Metadata returned to client when grounding is enabled.
1091
+
1092
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1093
+
1094
+ Attributes:
1095
+ search_entry_point (google.ai.generativelanguage_v1beta.types.SearchEntryPoint):
1096
+ Optional. Google search entry for the
1097
+ following-up web searches.
1098
+
1099
+ This field is a member of `oneof`_ ``_search_entry_point``.
1100
+ grounding_chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.GroundingChunk]):
1101
+ List of supporting references retrieved from
1102
+ specified grounding source.
1103
+ grounding_supports (MutableSequence[google.ai.generativelanguage_v1beta.types.GroundingSupport]):
1104
+ List of grounding support.
1105
+ retrieval_metadata (google.ai.generativelanguage_v1beta.types.RetrievalMetadata):
1106
+ Metadata related to retrieval in the
1107
+ grounding flow.
1108
+
1109
+ This field is a member of `oneof`_ ``_retrieval_metadata``.
1110
+ web_search_queries (MutableSequence[str]):
1111
+ Web search queries for the following-up web
1112
+ search.
1113
+ """
1114
+
1115
+ search_entry_point: "SearchEntryPoint" = proto.Field(
1116
+ proto.MESSAGE,
1117
+ number=1,
1118
+ optional=True,
1119
+ message="SearchEntryPoint",
1120
+ )
1121
+ grounding_chunks: MutableSequence["GroundingChunk"] = proto.RepeatedField(
1122
+ proto.MESSAGE,
1123
+ number=2,
1124
+ message="GroundingChunk",
1125
+ )
1126
+ grounding_supports: MutableSequence["GroundingSupport"] = proto.RepeatedField(
1127
+ proto.MESSAGE,
1128
+ number=3,
1129
+ message="GroundingSupport",
1130
+ )
1131
+ retrieval_metadata: "RetrievalMetadata" = proto.Field(
1132
+ proto.MESSAGE,
1133
+ number=4,
1134
+ optional=True,
1135
+ message="RetrievalMetadata",
1136
+ )
1137
+ web_search_queries: MutableSequence[str] = proto.RepeatedField(
1138
+ proto.STRING,
1139
+ number=5,
1140
+ )
1141
+
1142
+
1143
+ class SearchEntryPoint(proto.Message):
1144
+ r"""Google search entry point.
1145
+
1146
+ Attributes:
1147
+ rendered_content (str):
1148
+ Optional. Web content snippet that can be
1149
+ embedded in a web page or an app webview.
1150
+ sdk_blob (bytes):
1151
+ Optional. Base64 encoded JSON representing
1152
+ array of <search term, search url> tuple.
1153
+ """
1154
+
1155
+ rendered_content: str = proto.Field(
1156
+ proto.STRING,
1157
+ number=1,
1158
+ )
1159
+ sdk_blob: bytes = proto.Field(
1160
+ proto.BYTES,
1161
+ number=2,
1162
+ )
1163
+
1164
+
1165
+ class GroundingChunk(proto.Message):
1166
+ r"""Grounding chunk.
1167
+
1168
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1169
+
1170
+ Attributes:
1171
+ web (google.ai.generativelanguage_v1beta.types.GroundingChunk.Web):
1172
+ Grounding chunk from the web.
1173
+
1174
+ This field is a member of `oneof`_ ``chunk_type``.
1175
+ """
1176
+
1177
+ class Web(proto.Message):
1178
+ r"""Chunk from the web.
1179
+
1180
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1181
+
1182
+ Attributes:
1183
+ uri (str):
1184
+ URI reference of the chunk.
1185
+
1186
+ This field is a member of `oneof`_ ``_uri``.
1187
+ title (str):
1188
+ Title of the chunk.
1189
+
1190
+ This field is a member of `oneof`_ ``_title``.
1191
+ """
1192
+
1193
+ uri: str = proto.Field(
1194
+ proto.STRING,
1195
+ number=1,
1196
+ optional=True,
1197
+ )
1198
+ title: str = proto.Field(
1199
+ proto.STRING,
1200
+ number=2,
1201
+ optional=True,
1202
+ )
1203
+
1204
+ web: Web = proto.Field(
1205
+ proto.MESSAGE,
1206
+ number=1,
1207
+ oneof="chunk_type",
1208
+ message=Web,
1209
+ )
1210
+
1211
+
1212
+ class Segment(proto.Message):
1213
+ r"""Segment of the content.
1214
+
1215
+ Attributes:
1216
+ part_index (int):
1217
+ Output only. The index of a Part object
1218
+ within its parent Content object.
1219
+ start_index (int):
1220
+ Output only. Start index in the given Part,
1221
+ measured in bytes. Offset from the start of the
1222
+ Part, inclusive, starting at zero.
1223
+ end_index (int):
1224
+ Output only. End index in the given Part,
1225
+ measured in bytes. Offset from the start of the
1226
+ Part, exclusive, starting at zero.
1227
+ text (str):
1228
+ Output only. The text corresponding to the
1229
+ segment from the response.
1230
+ """
1231
+
1232
+ part_index: int = proto.Field(
1233
+ proto.INT32,
1234
+ number=1,
1235
+ )
1236
+ start_index: int = proto.Field(
1237
+ proto.INT32,
1238
+ number=2,
1239
+ )
1240
+ end_index: int = proto.Field(
1241
+ proto.INT32,
1242
+ number=3,
1243
+ )
1244
+ text: str = proto.Field(
1245
+ proto.STRING,
1246
+ number=4,
1247
+ )
1248
+
1249
+
1250
+ class GroundingSupport(proto.Message):
1251
+ r"""Grounding support.
1252
+
1253
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1254
+
1255
+ Attributes:
1256
+ segment (google.ai.generativelanguage_v1beta.types.Segment):
1257
+ Segment of the content this support belongs
1258
+ to.
1259
+
1260
+ This field is a member of `oneof`_ ``_segment``.
1261
+ grounding_chunk_indices (MutableSequence[int]):
1262
+ A list of indices (into 'grounding_chunk') specifying the
1263
+ citations associated with the claim. For instance [1,3,4]
1264
+ means that grounding_chunk[1], grounding_chunk[3],
1265
+ grounding_chunk[4] are the retrieved content attributed to
1266
+ the claim.
1267
+ confidence_scores (MutableSequence[float]):
1268
+ Confidence score of the support references. Ranges from 0 to
1269
+ 1. 1 is the most confident. This list must have the same
1270
+ size as the grounding_chunk_indices.
1271
+ """
1272
+
1273
+ segment: "Segment" = proto.Field(
1274
+ proto.MESSAGE,
1275
+ number=1,
1276
+ optional=True,
1277
+ message="Segment",
1278
+ )
1279
+ grounding_chunk_indices: MutableSequence[int] = proto.RepeatedField(
1280
+ proto.INT32,
1281
+ number=2,
1282
+ )
1283
+ confidence_scores: MutableSequence[float] = proto.RepeatedField(
1284
+ proto.FLOAT,
1285
+ number=3,
1286
+ )
1287
+
1288
+
1289
+ class GenerateAnswerRequest(proto.Message):
1290
+ r"""Request to generate a grounded answer from the ``Model``.
1291
+
1292
+ This message has `oneof`_ fields (mutually exclusive fields).
1293
+ For each oneof, at most one member field can be set at the same time.
1294
+ Setting any member of the oneof automatically clears all other
1295
+ members.
1296
+
1297
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1298
+
1299
+ Attributes:
1300
+ inline_passages (google.ai.generativelanguage_v1beta.types.GroundingPassages):
1301
+ Passages provided inline with the request.
1302
+
1303
+ This field is a member of `oneof`_ ``grounding_source``.
1304
+ semantic_retriever (google.ai.generativelanguage_v1beta.types.SemanticRetrieverConfig):
1305
+ Content retrieved from resources created via
1306
+ the Semantic Retriever API.
1307
+
1308
+ This field is a member of `oneof`_ ``grounding_source``.
1309
+ model (str):
1310
+ Required. The name of the ``Model`` to use for generating
1311
+ the grounded response.
1312
+
1313
+ Format: ``model=models/{model}``.
1314
+ contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]):
1315
+ Required. The content of the current conversation with the
1316
+ ``Model``. For single-turn queries, this is a single
1317
+ question to answer. For multi-turn queries, this is a
1318
+ repeated field that contains conversation history and the
1319
+ last ``Content`` in the list containing the question.
1320
+
1321
+ Note: ``GenerateAnswer`` only supports queries in English.
1322
+ answer_style (google.ai.generativelanguage_v1beta.types.GenerateAnswerRequest.AnswerStyle):
1323
+ Required. Style in which answers should be
1324
+ returned.
1325
+ safety_settings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]):
1326
+ Optional. A list of unique ``SafetySetting`` instances for
1327
+ blocking unsafe content.
1328
+
1329
+ This will be enforced on the
1330
+ ``GenerateAnswerRequest.contents`` and
1331
+ ``GenerateAnswerResponse.candidate``. There should not be
1332
+ more than one setting for each ``SafetyCategory`` type. The
1333
+ API will block any contents and responses that fail to meet
1334
+ the thresholds set by these settings. This list overrides
1335
+ the default settings for each ``SafetyCategory`` specified
1336
+ in the safety_settings. If there is no ``SafetySetting`` for
1337
+ a given ``SafetyCategory`` provided in the list, the API
1338
+ will use the default safety setting for that category. Harm
1339
+ categories HARM_CATEGORY_HATE_SPEECH,
1340
+ HARM_CATEGORY_SEXUALLY_EXPLICIT,
1341
+ HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT
1342
+ are supported. Refer to the
1343
+ `guide <https://ai.google.dev/gemini-api/docs/safety-settings>`__
1344
+ for detailed information on available safety settings. Also
1345
+ refer to the `Safety
1346
+ guidance <https://ai.google.dev/gemini-api/docs/safety-guidance>`__
1347
+ to learn how to incorporate safety considerations in your AI
1348
+ applications.
1349
+ temperature (float):
1350
+ Optional. Controls the randomness of the output.
1351
+
1352
+ Values can range from [0.0,1.0], inclusive. A value closer
1353
+ to 1.0 will produce responses that are more varied and
1354
+ creative, while a value closer to 0.0 will typically result
1355
+ in more straightforward responses from the model. A low
1356
+ temperature (~0.2) is usually recommended for
1357
+ Attributed-Question-Answering use cases.
1358
+
1359
+ This field is a member of `oneof`_ ``_temperature``.
1360
+ """
1361
+
1362
+ class AnswerStyle(proto.Enum):
1363
+ r"""Style for grounded answers.
1364
+
1365
+ Values:
1366
+ ANSWER_STYLE_UNSPECIFIED (0):
1367
+ Unspecified answer style.
1368
+ ABSTRACTIVE (1):
1369
+ Succint but abstract style.
1370
+ EXTRACTIVE (2):
1371
+ Very brief and extractive style.
1372
+ VERBOSE (3):
1373
+ Verbose style including extra details. The
1374
+ response may be formatted as a sentence,
1375
+ paragraph, multiple paragraphs, or bullet
1376
+ points, etc.
1377
+ """
1378
+ ANSWER_STYLE_UNSPECIFIED = 0
1379
+ ABSTRACTIVE = 1
1380
+ EXTRACTIVE = 2
1381
+ VERBOSE = 3
1382
+
1383
+ inline_passages: gag_content.GroundingPassages = proto.Field(
1384
+ proto.MESSAGE,
1385
+ number=6,
1386
+ oneof="grounding_source",
1387
+ message=gag_content.GroundingPassages,
1388
+ )
1389
+ semantic_retriever: "SemanticRetrieverConfig" = proto.Field(
1390
+ proto.MESSAGE,
1391
+ number=7,
1392
+ oneof="grounding_source",
1393
+ message="SemanticRetrieverConfig",
1394
+ )
1395
+ model: str = proto.Field(
1396
+ proto.STRING,
1397
+ number=1,
1398
+ )
1399
+ contents: MutableSequence[gag_content.Content] = proto.RepeatedField(
1400
+ proto.MESSAGE,
1401
+ number=2,
1402
+ message=gag_content.Content,
1403
+ )
1404
+ answer_style: AnswerStyle = proto.Field(
1405
+ proto.ENUM,
1406
+ number=5,
1407
+ enum=AnswerStyle,
1408
+ )
1409
+ safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField(
1410
+ proto.MESSAGE,
1411
+ number=3,
1412
+ message=safety.SafetySetting,
1413
+ )
1414
+ temperature: float = proto.Field(
1415
+ proto.FLOAT,
1416
+ number=4,
1417
+ optional=True,
1418
+ )
1419
+
1420
+
1421
+ class GenerateAnswerResponse(proto.Message):
1422
+ r"""Response from the model for a grounded answer.
1423
+
1424
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1425
+
1426
+ Attributes:
1427
+ answer (google.ai.generativelanguage_v1beta.types.Candidate):
1428
+ Candidate answer from the model.
1429
+
1430
+ Note: The model *always* attempts to provide a grounded
1431
+ answer, even when the answer is unlikely to be answerable
1432
+ from the given passages. In that case, a low-quality or
1433
+ ungrounded answer may be provided, along with a low
1434
+ ``answerable_probability``.
1435
+ answerable_probability (float):
1436
+ Output only. The model's estimate of the probability that
1437
+ its answer is correct and grounded in the input passages.
1438
+
1439
+ A low ``answerable_probability`` indicates that the answer
1440
+ might not be grounded in the sources.
1441
+
1442
+ When ``answerable_probability`` is low, you may want to:
1443
+
1444
+ - Display a message to the effect of "We couldn’t answer
1445
+ that question" to the user.
1446
+ - Fall back to a general-purpose LLM that answers the
1447
+ question from world knowledge. The threshold and nature
1448
+ of such fallbacks will depend on individual use cases.
1449
+ ``0.5`` is a good starting threshold.
1450
+
1451
+ This field is a member of `oneof`_ ``_answerable_probability``.
1452
+ input_feedback (google.ai.generativelanguage_v1beta.types.GenerateAnswerResponse.InputFeedback):
1453
+ Output only. Feedback related to the input data used to
1454
+ answer the question, as opposed to the model-generated
1455
+ response to the question.
1456
+
1457
+ The input data can be one or more of the following:
1458
+
1459
+ - Question specified by the last entry in
1460
+ ``GenerateAnswerRequest.content``
1461
+ - Conversation history specified by the other entries in
1462
+ ``GenerateAnswerRequest.content``
1463
+ - Grounding sources
1464
+ (``GenerateAnswerRequest.semantic_retriever`` or
1465
+ ``GenerateAnswerRequest.inline_passages``)
1466
+
1467
+ This field is a member of `oneof`_ ``_input_feedback``.
1468
+ """
1469
+
1470
+ class InputFeedback(proto.Message):
1471
+ r"""Feedback related to the input data used to answer the
1472
+ question, as opposed to the model-generated response to the
1473
+ question.
1474
+
1475
+
1476
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1477
+
1478
+ Attributes:
1479
+ block_reason (google.ai.generativelanguage_v1beta.types.GenerateAnswerResponse.InputFeedback.BlockReason):
1480
+ Optional. If set, the input was blocked and
1481
+ no candidates are returned. Rephrase the input.
1482
+
1483
+ This field is a member of `oneof`_ ``_block_reason``.
1484
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyRating]):
1485
+ Ratings for safety of the input.
1486
+ There is at most one rating per category.
1487
+ """
1488
+
1489
+ class BlockReason(proto.Enum):
1490
+ r"""Specifies what was the reason why input was blocked.
1491
+
1492
+ Values:
1493
+ BLOCK_REASON_UNSPECIFIED (0):
1494
+ Default value. This value is unused.
1495
+ SAFETY (1):
1496
+ Input was blocked due to safety reasons. Inspect
1497
+ ``safety_ratings`` to understand which safety category
1498
+ blocked it.
1499
+ OTHER (2):
1500
+ Input was blocked due to other reasons.
1501
+ """
1502
+ BLOCK_REASON_UNSPECIFIED = 0
1503
+ SAFETY = 1
1504
+ OTHER = 2
1505
+
1506
+ block_reason: "GenerateAnswerResponse.InputFeedback.BlockReason" = proto.Field(
1507
+ proto.ENUM,
1508
+ number=1,
1509
+ optional=True,
1510
+ enum="GenerateAnswerResponse.InputFeedback.BlockReason",
1511
+ )
1512
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
1513
+ proto.MESSAGE,
1514
+ number=2,
1515
+ message=safety.SafetyRating,
1516
+ )
1517
+
1518
+ answer: "Candidate" = proto.Field(
1519
+ proto.MESSAGE,
1520
+ number=1,
1521
+ message="Candidate",
1522
+ )
1523
+ answerable_probability: float = proto.Field(
1524
+ proto.FLOAT,
1525
+ number=2,
1526
+ optional=True,
1527
+ )
1528
+ input_feedback: InputFeedback = proto.Field(
1529
+ proto.MESSAGE,
1530
+ number=3,
1531
+ optional=True,
1532
+ message=InputFeedback,
1533
+ )
1534
+
1535
+
1536
+ class EmbedContentRequest(proto.Message):
1537
+ r"""Request containing the ``Content`` for the model to embed.
1538
+
1539
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
1540
+
1541
+ Attributes:
1542
+ model (str):
1543
+ Required. The model's resource name. This serves as an ID
1544
+ for the Model to use.
1545
+
1546
+ This name should match a model name returned by the
1547
+ ``ListModels`` method.
1548
+
1549
+ Format: ``models/{model}``
1550
+ content (google.ai.generativelanguage_v1beta.types.Content):
1551
+ Required. The content to embed. Only the ``parts.text``
1552
+ fields will be counted.
1553
+ task_type (google.ai.generativelanguage_v1beta.types.TaskType):
1554
+ Optional. Optional task type for which the embeddings will
1555
+ be used. Can only be set for ``models/embedding-001``.
1556
+
1557
+ This field is a member of `oneof`_ ``_task_type``.
1558
+ title (str):
1559
+ Optional. An optional title for the text. Only applicable
1560
+ when TaskType is ``RETRIEVAL_DOCUMENT``.
1561
+
1562
+ Note: Specifying a ``title`` for ``RETRIEVAL_DOCUMENT``
1563
+ provides better quality embeddings for retrieval.
1564
+
1565
+ This field is a member of `oneof`_ ``_title``.
1566
+ output_dimensionality (int):
1567
+ Optional. Optional reduced dimension for the output
1568
+ embedding. If set, excessive values in the output embedding
1569
+ are truncated from the end. Supported by newer models since
1570
+ 2024 only. You cannot set this value if using the earlier
1571
+ model (``models/embedding-001``).
1572
+
1573
+ This field is a member of `oneof`_ ``_output_dimensionality``.
1574
+ """
1575
+
1576
+ model: str = proto.Field(
1577
+ proto.STRING,
1578
+ number=1,
1579
+ )
1580
+ content: gag_content.Content = proto.Field(
1581
+ proto.MESSAGE,
1582
+ number=2,
1583
+ message=gag_content.Content,
1584
+ )
1585
+ task_type: "TaskType" = proto.Field(
1586
+ proto.ENUM,
1587
+ number=3,
1588
+ optional=True,
1589
+ enum="TaskType",
1590
+ )
1591
+ title: str = proto.Field(
1592
+ proto.STRING,
1593
+ number=4,
1594
+ optional=True,
1595
+ )
1596
+ output_dimensionality: int = proto.Field(
1597
+ proto.INT32,
1598
+ number=5,
1599
+ optional=True,
1600
+ )
1601
+
1602
+
1603
+ class ContentEmbedding(proto.Message):
1604
+ r"""A list of floats representing an embedding.
1605
+
1606
+ Attributes:
1607
+ values (MutableSequence[float]):
1608
+ The embedding values.
1609
+ """
1610
+
1611
+ values: MutableSequence[float] = proto.RepeatedField(
1612
+ proto.FLOAT,
1613
+ number=1,
1614
+ )
1615
+
1616
+
1617
+ class EmbedContentResponse(proto.Message):
1618
+ r"""The response to an ``EmbedContentRequest``.
1619
+
1620
+ Attributes:
1621
+ embedding (google.ai.generativelanguage_v1beta.types.ContentEmbedding):
1622
+ Output only. The embedding generated from the
1623
+ input content.
1624
+ """
1625
+
1626
+ embedding: "ContentEmbedding" = proto.Field(
1627
+ proto.MESSAGE,
1628
+ number=1,
1629
+ message="ContentEmbedding",
1630
+ )
1631
+
1632
+
1633
+ class BatchEmbedContentsRequest(proto.Message):
1634
+ r"""Batch request to get embeddings from the model for a list of
1635
+ prompts.
1636
+
1637
+ Attributes:
1638
+ model (str):
1639
+ Required. The model's resource name. This serves as an ID
1640
+ for the Model to use.
1641
+
1642
+ This name should match a model name returned by the
1643
+ ``ListModels`` method.
1644
+
1645
+ Format: ``models/{model}``
1646
+ requests (MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedContentRequest]):
1647
+ Required. Embed requests for the batch. The model in each of
1648
+ these requests must match the model specified
1649
+ ``BatchEmbedContentsRequest.model``.
1650
+ """
1651
+
1652
+ model: str = proto.Field(
1653
+ proto.STRING,
1654
+ number=1,
1655
+ )
1656
+ requests: MutableSequence["EmbedContentRequest"] = proto.RepeatedField(
1657
+ proto.MESSAGE,
1658
+ number=2,
1659
+ message="EmbedContentRequest",
1660
+ )
1661
+
1662
+
1663
+ class BatchEmbedContentsResponse(proto.Message):
1664
+ r"""The response to a ``BatchEmbedContentsRequest``.
1665
+
1666
+ Attributes:
1667
+ embeddings (MutableSequence[google.ai.generativelanguage_v1beta.types.ContentEmbedding]):
1668
+ Output only. The embeddings for each request,
1669
+ in the same order as provided in the batch
1670
+ request.
1671
+ """
1672
+
1673
+ embeddings: MutableSequence["ContentEmbedding"] = proto.RepeatedField(
1674
+ proto.MESSAGE,
1675
+ number=1,
1676
+ message="ContentEmbedding",
1677
+ )
1678
+
1679
+
1680
+ class CountTokensRequest(proto.Message):
1681
+ r"""Counts the number of tokens in the ``prompt`` sent to a model.
1682
+
1683
+ Models may tokenize text differently, so each model may return a
1684
+ different ``token_count``.
1685
+
1686
+ Attributes:
1687
+ model (str):
1688
+ Required. The model's resource name. This serves as an ID
1689
+ for the Model to use.
1690
+
1691
+ This name should match a model name returned by the
1692
+ ``ListModels`` method.
1693
+
1694
+ Format: ``models/{model}``
1695
+ contents (MutableSequence[google.ai.generativelanguage_v1beta.types.Content]):
1696
+ Optional. The input given to the model as a prompt. This
1697
+ field is ignored when ``generate_content_request`` is set.
1698
+ generate_content_request (google.ai.generativelanguage_v1beta.types.GenerateContentRequest):
1699
+ Optional. The overall input given to the ``Model``. This
1700
+ includes the prompt as well as other model steering
1701
+ information like `system
1702
+ instructions <https://ai.google.dev/gemini-api/docs/system-instructions>`__,
1703
+ and/or function declarations for `function
1704
+ calling <https://ai.google.dev/gemini-api/docs/function-calling>`__.
1705
+ ``Model``\ s/\ ``Content``\ s and
1706
+ ``generate_content_request``\ s are mutually exclusive. You
1707
+ can either send ``Model`` + ``Content``\ s or a
1708
+ ``generate_content_request``, but never both.
1709
+ """
1710
+
1711
+ model: str = proto.Field(
1712
+ proto.STRING,
1713
+ number=1,
1714
+ )
1715
+ contents: MutableSequence[gag_content.Content] = proto.RepeatedField(
1716
+ proto.MESSAGE,
1717
+ number=2,
1718
+ message=gag_content.Content,
1719
+ )
1720
+ generate_content_request: "GenerateContentRequest" = proto.Field(
1721
+ proto.MESSAGE,
1722
+ number=3,
1723
+ message="GenerateContentRequest",
1724
+ )
1725
+
1726
+
1727
+ class CountTokensResponse(proto.Message):
1728
+ r"""A response from ``CountTokens``.
1729
+
1730
+ It returns the model's ``token_count`` for the ``prompt``.
1731
+
1732
+ Attributes:
1733
+ total_tokens (int):
1734
+ The number of tokens that the ``Model`` tokenizes the
1735
+ ``prompt`` into. Always non-negative.
1736
+ cached_content_token_count (int):
1737
+ Number of tokens in the cached part of the
1738
+ prompt (the cached content).
1739
+ """
1740
+
1741
+ total_tokens: int = proto.Field(
1742
+ proto.INT32,
1743
+ number=1,
1744
+ )
1745
+ cached_content_token_count: int = proto.Field(
1746
+ proto.INT32,
1747
+ number=5,
1748
+ )
1749
+
1750
+
1751
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/model.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta",
24
+ manifest={
25
+ "Model",
26
+ },
27
+ )
28
+
29
+
30
+ class Model(proto.Message):
31
+ r"""Information about a Generative Language Model.
32
+
33
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
34
+
35
+ Attributes:
36
+ name (str):
37
+ Required. The resource name of the ``Model``. Refer to
38
+ `Model
39
+ variants <https://ai.google.dev/gemini-api/docs/models/gemini#model-variations>`__
40
+ for all allowed values.
41
+
42
+ Format: ``models/{model}`` with a ``{model}`` naming
43
+ convention of:
44
+
45
+ - "{base_model_id}-{version}"
46
+
47
+ Examples:
48
+
49
+ - ``models/gemini-1.5-flash-001``
50
+ base_model_id (str):
51
+ Required. The name of the base model, pass this to the
52
+ generation request.
53
+
54
+ Examples:
55
+
56
+ - ``gemini-1.5-flash``
57
+ version (str):
58
+ Required. The version number of the model.
59
+
60
+ This represents the major version (``1.0`` or ``1.5``)
61
+ display_name (str):
62
+ The human-readable name of the model. E.g.
63
+ "Gemini 1.5 Flash".
64
+ The name can be up to 128 characters long and
65
+ can consist of any UTF-8 characters.
66
+ description (str):
67
+ A short description of the model.
68
+ input_token_limit (int):
69
+ Maximum number of input tokens allowed for
70
+ this model.
71
+ output_token_limit (int):
72
+ Maximum number of output tokens available for
73
+ this model.
74
+ supported_generation_methods (MutableSequence[str]):
75
+ The model's supported generation methods.
76
+
77
+ The corresponding API method names are defined as Pascal
78
+ case strings, such as ``generateMessage`` and
79
+ ``generateContent``.
80
+ temperature (float):
81
+ Controls the randomness of the output.
82
+
83
+ Values can range over ``[0.0,max_temperature]``, inclusive.
84
+ A higher value will produce responses that are more varied,
85
+ while a value closer to ``0.0`` will typically result in
86
+ less surprising responses from the model. This value
87
+ specifies default to be used by the backend while making the
88
+ call to the model.
89
+
90
+ This field is a member of `oneof`_ ``_temperature``.
91
+ max_temperature (float):
92
+ The maximum temperature this model can use.
93
+
94
+ This field is a member of `oneof`_ ``_max_temperature``.
95
+ top_p (float):
96
+ For `Nucleus
97
+ sampling <https://ai.google.dev/gemini-api/docs/prompting-strategies#top-p>`__.
98
+
99
+ Nucleus sampling considers the smallest set of tokens whose
100
+ probability sum is at least ``top_p``. This value specifies
101
+ default to be used by the backend while making the call to
102
+ the model.
103
+
104
+ This field is a member of `oneof`_ ``_top_p``.
105
+ top_k (int):
106
+ For Top-k sampling.
107
+
108
+ Top-k sampling considers the set of ``top_k`` most probable
109
+ tokens. This value specifies default to be used by the
110
+ backend while making the call to the model. If empty,
111
+ indicates the model doesn't use top-k sampling, and
112
+ ``top_k`` isn't allowed as a generation parameter.
113
+
114
+ This field is a member of `oneof`_ ``_top_k``.
115
+ """
116
+
117
+ name: str = proto.Field(
118
+ proto.STRING,
119
+ number=1,
120
+ )
121
+ base_model_id: str = proto.Field(
122
+ proto.STRING,
123
+ number=2,
124
+ )
125
+ version: str = proto.Field(
126
+ proto.STRING,
127
+ number=3,
128
+ )
129
+ display_name: str = proto.Field(
130
+ proto.STRING,
131
+ number=4,
132
+ )
133
+ description: str = proto.Field(
134
+ proto.STRING,
135
+ number=5,
136
+ )
137
+ input_token_limit: int = proto.Field(
138
+ proto.INT32,
139
+ number=6,
140
+ )
141
+ output_token_limit: int = proto.Field(
142
+ proto.INT32,
143
+ number=7,
144
+ )
145
+ supported_generation_methods: MutableSequence[str] = proto.RepeatedField(
146
+ proto.STRING,
147
+ number=8,
148
+ )
149
+ temperature: float = proto.Field(
150
+ proto.FLOAT,
151
+ number=9,
152
+ optional=True,
153
+ )
154
+ max_temperature: float = proto.Field(
155
+ proto.FLOAT,
156
+ number=13,
157
+ optional=True,
158
+ )
159
+ top_p: float = proto.Field(
160
+ proto.FLOAT,
161
+ number=10,
162
+ optional=True,
163
+ )
164
+ top_k: int = proto.Field(
165
+ proto.INT32,
166
+ number=11,
167
+ optional=True,
168
+ )
169
+
170
+
171
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/model_service.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1beta.types import tuned_model as gag_tuned_model
24
+ from google.ai.generativelanguage_v1beta.types import model
25
+
26
+ __protobuf__ = proto.module(
27
+ package="google.ai.generativelanguage.v1beta",
28
+ manifest={
29
+ "GetModelRequest",
30
+ "ListModelsRequest",
31
+ "ListModelsResponse",
32
+ "GetTunedModelRequest",
33
+ "ListTunedModelsRequest",
34
+ "ListTunedModelsResponse",
35
+ "CreateTunedModelRequest",
36
+ "CreateTunedModelMetadata",
37
+ "UpdateTunedModelRequest",
38
+ "DeleteTunedModelRequest",
39
+ },
40
+ )
41
+
42
+
43
+ class GetModelRequest(proto.Message):
44
+ r"""Request for getting information about a specific Model.
45
+
46
+ Attributes:
47
+ name (str):
48
+ Required. The resource name of the model.
49
+
50
+ This name should match a model name returned by the
51
+ ``ListModels`` method.
52
+
53
+ Format: ``models/{model}``
54
+ """
55
+
56
+ name: str = proto.Field(
57
+ proto.STRING,
58
+ number=1,
59
+ )
60
+
61
+
62
+ class ListModelsRequest(proto.Message):
63
+ r"""Request for listing all Models.
64
+
65
+ Attributes:
66
+ page_size (int):
67
+ The maximum number of ``Models`` to return (per page).
68
+
69
+ If unspecified, 50 models will be returned per page. This
70
+ method returns at most 1000 models per page, even if you
71
+ pass a larger page_size.
72
+ page_token (str):
73
+ A page token, received from a previous ``ListModels`` call.
74
+
75
+ Provide the ``page_token`` returned by one request as an
76
+ argument to the next request to retrieve the next page.
77
+
78
+ When paginating, all other parameters provided to
79
+ ``ListModels`` must match the call that provided the page
80
+ token.
81
+ """
82
+
83
+ page_size: int = proto.Field(
84
+ proto.INT32,
85
+ number=2,
86
+ )
87
+ page_token: str = proto.Field(
88
+ proto.STRING,
89
+ number=3,
90
+ )
91
+
92
+
93
+ class ListModelsResponse(proto.Message):
94
+ r"""Response from ``ListModel`` containing a paginated list of Models.
95
+
96
+ Attributes:
97
+ models (MutableSequence[google.ai.generativelanguage_v1beta.types.Model]):
98
+ The returned Models.
99
+ next_page_token (str):
100
+ A token, which can be sent as ``page_token`` to retrieve the
101
+ next page.
102
+
103
+ If this field is omitted, there are no more pages.
104
+ """
105
+
106
+ @property
107
+ def raw_page(self):
108
+ return self
109
+
110
+ models: MutableSequence[model.Model] = proto.RepeatedField(
111
+ proto.MESSAGE,
112
+ number=1,
113
+ message=model.Model,
114
+ )
115
+ next_page_token: str = proto.Field(
116
+ proto.STRING,
117
+ number=2,
118
+ )
119
+
120
+
121
+ class GetTunedModelRequest(proto.Message):
122
+ r"""Request for getting information about a specific Model.
123
+
124
+ Attributes:
125
+ name (str):
126
+ Required. The resource name of the model.
127
+
128
+ Format: ``tunedModels/my-model-id``
129
+ """
130
+
131
+ name: str = proto.Field(
132
+ proto.STRING,
133
+ number=1,
134
+ )
135
+
136
+
137
+ class ListTunedModelsRequest(proto.Message):
138
+ r"""Request for listing TunedModels.
139
+
140
+ Attributes:
141
+ page_size (int):
142
+ Optional. The maximum number of ``TunedModels`` to return
143
+ (per page). The service may return fewer tuned models.
144
+
145
+ If unspecified, at most 10 tuned models will be returned.
146
+ This method returns at most 1000 models per page, even if
147
+ you pass a larger page_size.
148
+ page_token (str):
149
+ Optional. A page token, received from a previous
150
+ ``ListTunedModels`` call.
151
+
152
+ Provide the ``page_token`` returned by one request as an
153
+ argument to the next request to retrieve the next page.
154
+
155
+ When paginating, all other parameters provided to
156
+ ``ListTunedModels`` must match the call that provided the
157
+ page token.
158
+ filter (str):
159
+ Optional. A filter is a full text search over
160
+ the tuned model's description and display name.
161
+ By default, results will not include tuned
162
+ models shared with everyone.
163
+
164
+ Additional operators:
165
+
166
+ - owner:me
167
+ - writers:me
168
+ - readers:me
169
+ - readers:everyone
170
+
171
+ Examples:
172
+
173
+ "owner:me" returns all tuned models to which
174
+ caller has owner role "readers:me" returns all
175
+ tuned models to which caller has reader role
176
+ "readers:everyone" returns all tuned models that
177
+ are shared with everyone
178
+ """
179
+
180
+ page_size: int = proto.Field(
181
+ proto.INT32,
182
+ number=1,
183
+ )
184
+ page_token: str = proto.Field(
185
+ proto.STRING,
186
+ number=2,
187
+ )
188
+ filter: str = proto.Field(
189
+ proto.STRING,
190
+ number=3,
191
+ )
192
+
193
+
194
+ class ListTunedModelsResponse(proto.Message):
195
+ r"""Response from ``ListTunedModels`` containing a paginated list of
196
+ Models.
197
+
198
+ Attributes:
199
+ tuned_models (MutableSequence[google.ai.generativelanguage_v1beta.types.TunedModel]):
200
+ The returned Models.
201
+ next_page_token (str):
202
+ A token, which can be sent as ``page_token`` to retrieve the
203
+ next page.
204
+
205
+ If this field is omitted, there are no more pages.
206
+ """
207
+
208
+ @property
209
+ def raw_page(self):
210
+ return self
211
+
212
+ tuned_models: MutableSequence[gag_tuned_model.TunedModel] = proto.RepeatedField(
213
+ proto.MESSAGE,
214
+ number=1,
215
+ message=gag_tuned_model.TunedModel,
216
+ )
217
+ next_page_token: str = proto.Field(
218
+ proto.STRING,
219
+ number=2,
220
+ )
221
+
222
+
223
+ class CreateTunedModelRequest(proto.Message):
224
+ r"""Request to create a TunedModel.
225
+
226
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
227
+
228
+ Attributes:
229
+ tuned_model_id (str):
230
+ Optional. The unique id for the tuned model if specified.
231
+ This value should be up to 40 characters, the first
232
+ character must be a letter, the last could be a letter or a
233
+ number. The id must match the regular expression:
234
+ ``[a-z]([a-z0-9-]{0,38}[a-z0-9])?``.
235
+
236
+ This field is a member of `oneof`_ ``_tuned_model_id``.
237
+ tuned_model (google.ai.generativelanguage_v1beta.types.TunedModel):
238
+ Required. The tuned model to create.
239
+ """
240
+
241
+ tuned_model_id: str = proto.Field(
242
+ proto.STRING,
243
+ number=1,
244
+ optional=True,
245
+ )
246
+ tuned_model: gag_tuned_model.TunedModel = proto.Field(
247
+ proto.MESSAGE,
248
+ number=2,
249
+ message=gag_tuned_model.TunedModel,
250
+ )
251
+
252
+
253
+ class CreateTunedModelMetadata(proto.Message):
254
+ r"""Metadata about the state and progress of creating a tuned
255
+ model returned from the long-running operation
256
+
257
+ Attributes:
258
+ tuned_model (str):
259
+ Name of the tuned model associated with the
260
+ tuning operation.
261
+ total_steps (int):
262
+ The total number of tuning steps.
263
+ completed_steps (int):
264
+ The number of steps completed.
265
+ completed_percent (float):
266
+ The completed percentage for the tuning
267
+ operation.
268
+ snapshots (MutableSequence[google.ai.generativelanguage_v1beta.types.TuningSnapshot]):
269
+ Metrics collected during tuning.
270
+ """
271
+
272
+ tuned_model: str = proto.Field(
273
+ proto.STRING,
274
+ number=5,
275
+ )
276
+ total_steps: int = proto.Field(
277
+ proto.INT32,
278
+ number=1,
279
+ )
280
+ completed_steps: int = proto.Field(
281
+ proto.INT32,
282
+ number=2,
283
+ )
284
+ completed_percent: float = proto.Field(
285
+ proto.FLOAT,
286
+ number=3,
287
+ )
288
+ snapshots: MutableSequence[gag_tuned_model.TuningSnapshot] = proto.RepeatedField(
289
+ proto.MESSAGE,
290
+ number=4,
291
+ message=gag_tuned_model.TuningSnapshot,
292
+ )
293
+
294
+
295
+ class UpdateTunedModelRequest(proto.Message):
296
+ r"""Request to update a TunedModel.
297
+
298
+ Attributes:
299
+ tuned_model (google.ai.generativelanguage_v1beta.types.TunedModel):
300
+ Required. The tuned model to update.
301
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
302
+ Optional. The list of fields to update.
303
+ """
304
+
305
+ tuned_model: gag_tuned_model.TunedModel = proto.Field(
306
+ proto.MESSAGE,
307
+ number=1,
308
+ message=gag_tuned_model.TunedModel,
309
+ )
310
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
311
+ proto.MESSAGE,
312
+ number=2,
313
+ message=field_mask_pb2.FieldMask,
314
+ )
315
+
316
+
317
+ class DeleteTunedModelRequest(proto.Message):
318
+ r"""Request to delete a TunedModel.
319
+
320
+ Attributes:
321
+ name (str):
322
+ Required. The resource name of the model. Format:
323
+ ``tunedModels/my-model-id``
324
+ """
325
+
326
+ name: str = proto.Field(
327
+ proto.STRING,
328
+ number=1,
329
+ )
330
+
331
+
332
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/permission.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta",
24
+ manifest={
25
+ "Permission",
26
+ },
27
+ )
28
+
29
+
30
+ class Permission(proto.Message):
31
+ r"""Permission resource grants user, group or the rest of the
32
+ world access to the PaLM API resource (e.g. a tuned model,
33
+ corpus).
34
+
35
+ A role is a collection of permitted operations that allows users
36
+ to perform specific actions on PaLM API resources. To make them
37
+ available to users, groups, or service accounts, you assign
38
+ roles. When you assign a role, you grant permissions that the
39
+ role contains.
40
+
41
+ There are three concentric roles. Each role is a superset of the
42
+ previous role's permitted operations:
43
+
44
+ - reader can use the resource (e.g. tuned model, corpus) for
45
+ inference
46
+ - writer has reader's permissions and additionally can edit and
47
+ share
48
+ - owner has writer's permissions and additionally can delete
49
+
50
+
51
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
52
+
53
+ Attributes:
54
+ name (str):
55
+ Output only. Identifier. The permission name. A unique name
56
+ will be generated on create. Examples:
57
+ tunedModels/{tuned_model}/permissions/{permission}
58
+ corpora/{corpus}/permissions/{permission} Output only.
59
+ grantee_type (google.ai.generativelanguage_v1beta.types.Permission.GranteeType):
60
+ Optional. Immutable. The type of the grantee.
61
+
62
+ This field is a member of `oneof`_ ``_grantee_type``.
63
+ email_address (str):
64
+ Optional. Immutable. The email address of the
65
+ user of group which this permission refers.
66
+ Field is not set when permission's grantee type
67
+ is EVERYONE.
68
+
69
+ This field is a member of `oneof`_ ``_email_address``.
70
+ role (google.ai.generativelanguage_v1beta.types.Permission.Role):
71
+ Required. The role granted by this
72
+ permission.
73
+
74
+ This field is a member of `oneof`_ ``_role``.
75
+ """
76
+
77
+ class GranteeType(proto.Enum):
78
+ r"""Defines types of the grantee of this permission.
79
+
80
+ Values:
81
+ GRANTEE_TYPE_UNSPECIFIED (0):
82
+ The default value. This value is unused.
83
+ USER (1):
84
+ Represents a user. When set, you must provide email_address
85
+ for the user.
86
+ GROUP (2):
87
+ Represents a group. When set, you must provide email_address
88
+ for the group.
89
+ EVERYONE (3):
90
+ Represents access to everyone. No extra
91
+ information is required.
92
+ """
93
+ GRANTEE_TYPE_UNSPECIFIED = 0
94
+ USER = 1
95
+ GROUP = 2
96
+ EVERYONE = 3
97
+
98
+ class Role(proto.Enum):
99
+ r"""Defines the role granted by this permission.
100
+
101
+ Values:
102
+ ROLE_UNSPECIFIED (0):
103
+ The default value. This value is unused.
104
+ OWNER (1):
105
+ Owner can use, update, share and delete the
106
+ resource.
107
+ WRITER (2):
108
+ Writer can use, update and share the
109
+ resource.
110
+ READER (3):
111
+ Reader can use the resource.
112
+ """
113
+ ROLE_UNSPECIFIED = 0
114
+ OWNER = 1
115
+ WRITER = 2
116
+ READER = 3
117
+
118
+ name: str = proto.Field(
119
+ proto.STRING,
120
+ number=1,
121
+ )
122
+ grantee_type: GranteeType = proto.Field(
123
+ proto.ENUM,
124
+ number=2,
125
+ optional=True,
126
+ enum=GranteeType,
127
+ )
128
+ email_address: str = proto.Field(
129
+ proto.STRING,
130
+ number=3,
131
+ optional=True,
132
+ )
133
+ role: Role = proto.Field(
134
+ proto.ENUM,
135
+ number=4,
136
+ optional=True,
137
+ enum=Role,
138
+ )
139
+
140
+
141
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/permission_service.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1beta.types import permission as gag_permission
24
+
25
+ __protobuf__ = proto.module(
26
+ package="google.ai.generativelanguage.v1beta",
27
+ manifest={
28
+ "CreatePermissionRequest",
29
+ "GetPermissionRequest",
30
+ "ListPermissionsRequest",
31
+ "ListPermissionsResponse",
32
+ "UpdatePermissionRequest",
33
+ "DeletePermissionRequest",
34
+ "TransferOwnershipRequest",
35
+ "TransferOwnershipResponse",
36
+ },
37
+ )
38
+
39
+
40
+ class CreatePermissionRequest(proto.Message):
41
+ r"""Request to create a ``Permission``.
42
+
43
+ Attributes:
44
+ parent (str):
45
+ Required. The parent resource of the ``Permission``.
46
+ Formats: ``tunedModels/{tuned_model}`` ``corpora/{corpus}``
47
+ permission (google.ai.generativelanguage_v1beta.types.Permission):
48
+ Required. The permission to create.
49
+ """
50
+
51
+ parent: str = proto.Field(
52
+ proto.STRING,
53
+ number=1,
54
+ )
55
+ permission: gag_permission.Permission = proto.Field(
56
+ proto.MESSAGE,
57
+ number=2,
58
+ message=gag_permission.Permission,
59
+ )
60
+
61
+
62
+ class GetPermissionRequest(proto.Message):
63
+ r"""Request for getting information about a specific ``Permission``.
64
+
65
+ Attributes:
66
+ name (str):
67
+ Required. The resource name of the permission.
68
+
69
+ Formats:
70
+ ``tunedModels/{tuned_model}/permissions/{permission}``
71
+ ``corpora/{corpus}/permissions/{permission}``
72
+ """
73
+
74
+ name: str = proto.Field(
75
+ proto.STRING,
76
+ number=1,
77
+ )
78
+
79
+
80
+ class ListPermissionsRequest(proto.Message):
81
+ r"""Request for listing permissions.
82
+
83
+ Attributes:
84
+ parent (str):
85
+ Required. The parent resource of the permissions. Formats:
86
+ ``tunedModels/{tuned_model}`` ``corpora/{corpus}``
87
+ page_size (int):
88
+ Optional. The maximum number of ``Permission``\ s to return
89
+ (per page). The service may return fewer permissions.
90
+
91
+ If unspecified, at most 10 permissions will be returned.
92
+ This method returns at most 1000 permissions per page, even
93
+ if you pass larger page_size.
94
+ page_token (str):
95
+ Optional. A page token, received from a previous
96
+ ``ListPermissions`` call.
97
+
98
+ Provide the ``page_token`` returned by one request as an
99
+ argument to the next request to retrieve the next page.
100
+
101
+ When paginating, all other parameters provided to
102
+ ``ListPermissions`` must match the call that provided the
103
+ page token.
104
+ """
105
+
106
+ parent: str = proto.Field(
107
+ proto.STRING,
108
+ number=1,
109
+ )
110
+ page_size: int = proto.Field(
111
+ proto.INT32,
112
+ number=2,
113
+ )
114
+ page_token: str = proto.Field(
115
+ proto.STRING,
116
+ number=3,
117
+ )
118
+
119
+
120
+ class ListPermissionsResponse(proto.Message):
121
+ r"""Response from ``ListPermissions`` containing a paginated list of
122
+ permissions.
123
+
124
+ Attributes:
125
+ permissions (MutableSequence[google.ai.generativelanguage_v1beta.types.Permission]):
126
+ Returned permissions.
127
+ next_page_token (str):
128
+ A token, which can be sent as ``page_token`` to retrieve the
129
+ next page.
130
+
131
+ If this field is omitted, there are no more pages.
132
+ """
133
+
134
+ @property
135
+ def raw_page(self):
136
+ return self
137
+
138
+ permissions: MutableSequence[gag_permission.Permission] = proto.RepeatedField(
139
+ proto.MESSAGE,
140
+ number=1,
141
+ message=gag_permission.Permission,
142
+ )
143
+ next_page_token: str = proto.Field(
144
+ proto.STRING,
145
+ number=2,
146
+ )
147
+
148
+
149
+ class UpdatePermissionRequest(proto.Message):
150
+ r"""Request to update the ``Permission``.
151
+
152
+ Attributes:
153
+ permission (google.ai.generativelanguage_v1beta.types.Permission):
154
+ Required. The permission to update.
155
+
156
+ The permission's ``name`` field is used to identify the
157
+ permission to update.
158
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
159
+ Required. The list of fields to update. Accepted ones:
160
+
161
+ - role (``Permission.role`` field)
162
+ """
163
+
164
+ permission: gag_permission.Permission = proto.Field(
165
+ proto.MESSAGE,
166
+ number=1,
167
+ message=gag_permission.Permission,
168
+ )
169
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
170
+ proto.MESSAGE,
171
+ number=2,
172
+ message=field_mask_pb2.FieldMask,
173
+ )
174
+
175
+
176
+ class DeletePermissionRequest(proto.Message):
177
+ r"""Request to delete the ``Permission``.
178
+
179
+ Attributes:
180
+ name (str):
181
+ Required. The resource name of the permission. Formats:
182
+ ``tunedModels/{tuned_model}/permissions/{permission}``
183
+ ``corpora/{corpus}/permissions/{permission}``
184
+ """
185
+
186
+ name: str = proto.Field(
187
+ proto.STRING,
188
+ number=1,
189
+ )
190
+
191
+
192
+ class TransferOwnershipRequest(proto.Message):
193
+ r"""Request to transfer the ownership of the tuned model.
194
+
195
+ Attributes:
196
+ name (str):
197
+ Required. The resource name of the tuned model to transfer
198
+ ownership.
199
+
200
+ Format: ``tunedModels/my-model-id``
201
+ email_address (str):
202
+ Required. The email address of the user to
203
+ whom the tuned model is being transferred to.
204
+ """
205
+
206
+ name: str = proto.Field(
207
+ proto.STRING,
208
+ number=1,
209
+ )
210
+ email_address: str = proto.Field(
211
+ proto.STRING,
212
+ number=2,
213
+ )
214
+
215
+
216
+ class TransferOwnershipResponse(proto.Message):
217
+ r"""Response from ``TransferOwnership``."""
218
+
219
+
220
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/prediction_service.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import struct_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1beta",
25
+ manifest={
26
+ "PredictRequest",
27
+ "PredictResponse",
28
+ },
29
+ )
30
+
31
+
32
+ class PredictRequest(proto.Message):
33
+ r"""Request message for
34
+ [PredictionService.Predict][google.ai.generativelanguage.v1beta.PredictionService.Predict].
35
+
36
+ Attributes:
37
+ model (str):
38
+ Required. The name of the model for prediction. Format:
39
+ ``name=models/{model}``.
40
+ instances (MutableSequence[google.protobuf.struct_pb2.Value]):
41
+ Required. The instances that are the input to
42
+ the prediction call.
43
+ parameters (google.protobuf.struct_pb2.Value):
44
+ Optional. The parameters that govern the
45
+ prediction call.
46
+ """
47
+
48
+ model: str = proto.Field(
49
+ proto.STRING,
50
+ number=1,
51
+ )
52
+ instances: MutableSequence[struct_pb2.Value] = proto.RepeatedField(
53
+ proto.MESSAGE,
54
+ number=2,
55
+ message=struct_pb2.Value,
56
+ )
57
+ parameters: struct_pb2.Value = proto.Field(
58
+ proto.MESSAGE,
59
+ number=3,
60
+ message=struct_pb2.Value,
61
+ )
62
+
63
+
64
+ class PredictResponse(proto.Message):
65
+ r"""Response message for [PredictionService.Predict].
66
+
67
+ Attributes:
68
+ predictions (MutableSequence[google.protobuf.struct_pb2.Value]):
69
+ The outputs of the prediction call.
70
+ """
71
+
72
+ predictions: MutableSequence[struct_pb2.Value] = proto.RepeatedField(
73
+ proto.MESSAGE,
74
+ number=1,
75
+ message=struct_pb2.Value,
76
+ )
77
+
78
+
79
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/retriever.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import timestamp_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1beta",
25
+ manifest={
26
+ "Corpus",
27
+ "Document",
28
+ "StringList",
29
+ "CustomMetadata",
30
+ "MetadataFilter",
31
+ "Condition",
32
+ "Chunk",
33
+ "ChunkData",
34
+ },
35
+ )
36
+
37
+
38
+ class Corpus(proto.Message):
39
+ r"""A ``Corpus`` is a collection of ``Document``\ s. A project can
40
+ create up to 5 corpora.
41
+
42
+ Attributes:
43
+ name (str):
44
+ Immutable. Identifier. The ``Corpus`` resource name. The ID
45
+ (name excluding the "corpora/" prefix) can contain up to 40
46
+ characters that are lowercase alphanumeric or dashes (-).
47
+ The ID cannot start or end with a dash. If the name is empty
48
+ on create, a unique name will be derived from
49
+ ``display_name`` along with a 12 character random suffix.
50
+ Example: ``corpora/my-awesome-corpora-123a456b789c``
51
+ display_name (str):
52
+ Optional. The human-readable display name for the
53
+ ``Corpus``. The display name must be no more than 512
54
+ characters in length, including spaces. Example: "Docs on
55
+ Semantic Retriever".
56
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
57
+ Output only. The Timestamp of when the ``Corpus`` was
58
+ created.
59
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
60
+ Output only. The Timestamp of when the ``Corpus`` was last
61
+ updated.
62
+ """
63
+
64
+ name: str = proto.Field(
65
+ proto.STRING,
66
+ number=1,
67
+ )
68
+ display_name: str = proto.Field(
69
+ proto.STRING,
70
+ number=2,
71
+ )
72
+ create_time: timestamp_pb2.Timestamp = proto.Field(
73
+ proto.MESSAGE,
74
+ number=3,
75
+ message=timestamp_pb2.Timestamp,
76
+ )
77
+ update_time: timestamp_pb2.Timestamp = proto.Field(
78
+ proto.MESSAGE,
79
+ number=4,
80
+ message=timestamp_pb2.Timestamp,
81
+ )
82
+
83
+
84
+ class Document(proto.Message):
85
+ r"""A ``Document`` is a collection of ``Chunk``\ s. A ``Corpus`` can
86
+ have a maximum of 10,000 ``Document``\ s.
87
+
88
+ Attributes:
89
+ name (str):
90
+ Immutable. Identifier. The ``Document`` resource name. The
91
+ ID (name excluding the `corpora/*/documents/` prefix) can
92
+ contain up to 40 characters that are lowercase alphanumeric
93
+ or dashes (-). The ID cannot start or end with a dash. If
94
+ the name is empty on create, a unique name will be derived
95
+ from ``display_name`` along with a 12 character random
96
+ suffix. Example:
97
+ ``corpora/{corpus_id}/documents/my-awesome-doc-123a456b789c``
98
+ display_name (str):
99
+ Optional. The human-readable display name for the
100
+ ``Document``. The display name must be no more than 512
101
+ characters in length, including spaces. Example: "Semantic
102
+ Retriever Documentation".
103
+ custom_metadata (MutableSequence[google.ai.generativelanguage_v1beta.types.CustomMetadata]):
104
+ Optional. User provided custom metadata stored as key-value
105
+ pairs used for querying. A ``Document`` can have a maximum
106
+ of 20 ``CustomMetadata``.
107
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
108
+ Output only. The Timestamp of when the ``Document`` was last
109
+ updated.
110
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
111
+ Output only. The Timestamp of when the ``Document`` was
112
+ created.
113
+ """
114
+
115
+ name: str = proto.Field(
116
+ proto.STRING,
117
+ number=1,
118
+ )
119
+ display_name: str = proto.Field(
120
+ proto.STRING,
121
+ number=2,
122
+ )
123
+ custom_metadata: MutableSequence["CustomMetadata"] = proto.RepeatedField(
124
+ proto.MESSAGE,
125
+ number=3,
126
+ message="CustomMetadata",
127
+ )
128
+ update_time: timestamp_pb2.Timestamp = proto.Field(
129
+ proto.MESSAGE,
130
+ number=4,
131
+ message=timestamp_pb2.Timestamp,
132
+ )
133
+ create_time: timestamp_pb2.Timestamp = proto.Field(
134
+ proto.MESSAGE,
135
+ number=5,
136
+ message=timestamp_pb2.Timestamp,
137
+ )
138
+
139
+
140
+ class StringList(proto.Message):
141
+ r"""User provided string values assigned to a single metadata
142
+ key.
143
+
144
+ Attributes:
145
+ values (MutableSequence[str]):
146
+ The string values of the metadata to store.
147
+ """
148
+
149
+ values: MutableSequence[str] = proto.RepeatedField(
150
+ proto.STRING,
151
+ number=1,
152
+ )
153
+
154
+
155
+ class CustomMetadata(proto.Message):
156
+ r"""User provided metadata stored as key-value pairs.
157
+
158
+ This message has `oneof`_ fields (mutually exclusive fields).
159
+ For each oneof, at most one member field can be set at the same time.
160
+ Setting any member of the oneof automatically clears all other
161
+ members.
162
+
163
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
164
+
165
+ Attributes:
166
+ string_value (str):
167
+ The string value of the metadata to store.
168
+
169
+ This field is a member of `oneof`_ ``value``.
170
+ string_list_value (google.ai.generativelanguage_v1beta.types.StringList):
171
+ The StringList value of the metadata to
172
+ store.
173
+
174
+ This field is a member of `oneof`_ ``value``.
175
+ numeric_value (float):
176
+ The numeric value of the metadata to store.
177
+
178
+ This field is a member of `oneof`_ ``value``.
179
+ key (str):
180
+ Required. The key of the metadata to store.
181
+ """
182
+
183
+ string_value: str = proto.Field(
184
+ proto.STRING,
185
+ number=2,
186
+ oneof="value",
187
+ )
188
+ string_list_value: "StringList" = proto.Field(
189
+ proto.MESSAGE,
190
+ number=6,
191
+ oneof="value",
192
+ message="StringList",
193
+ )
194
+ numeric_value: float = proto.Field(
195
+ proto.FLOAT,
196
+ number=7,
197
+ oneof="value",
198
+ )
199
+ key: str = proto.Field(
200
+ proto.STRING,
201
+ number=1,
202
+ )
203
+
204
+
205
+ class MetadataFilter(proto.Message):
206
+ r"""User provided filter to limit retrieval based on ``Chunk`` or
207
+ ``Document`` level metadata values. Example (genre = drama OR genre
208
+ = action): key = "document.custom_metadata.genre" conditions =
209
+ [{string_value = "drama", operation = EQUAL}, {string_value =
210
+ "action", operation = EQUAL}]
211
+
212
+ Attributes:
213
+ key (str):
214
+ Required. The key of the metadata to filter
215
+ on.
216
+ conditions (MutableSequence[google.ai.generativelanguage_v1beta.types.Condition]):
217
+ Required. The ``Condition``\ s for the given key that will
218
+ trigger this filter. Multiple ``Condition``\ s are joined by
219
+ logical ORs.
220
+ """
221
+
222
+ key: str = proto.Field(
223
+ proto.STRING,
224
+ number=1,
225
+ )
226
+ conditions: MutableSequence["Condition"] = proto.RepeatedField(
227
+ proto.MESSAGE,
228
+ number=2,
229
+ message="Condition",
230
+ )
231
+
232
+
233
+ class Condition(proto.Message):
234
+ r"""Filter condition applicable to a single key.
235
+
236
+ This message has `oneof`_ fields (mutually exclusive fields).
237
+ For each oneof, at most one member field can be set at the same time.
238
+ Setting any member of the oneof automatically clears all other
239
+ members.
240
+
241
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
242
+
243
+ Attributes:
244
+ string_value (str):
245
+ The string value to filter the metadata on.
246
+
247
+ This field is a member of `oneof`_ ``value``.
248
+ numeric_value (float):
249
+ The numeric value to filter the metadata on.
250
+
251
+ This field is a member of `oneof`_ ``value``.
252
+ operation (google.ai.generativelanguage_v1beta.types.Condition.Operator):
253
+ Required. Operator applied to the given
254
+ key-value pair to trigger the condition.
255
+ """
256
+
257
+ class Operator(proto.Enum):
258
+ r"""Defines the valid operators that can be applied to a
259
+ key-value pair.
260
+
261
+ Values:
262
+ OPERATOR_UNSPECIFIED (0):
263
+ The default value. This value is unused.
264
+ LESS (1):
265
+ Supported by numeric.
266
+ LESS_EQUAL (2):
267
+ Supported by numeric.
268
+ EQUAL (3):
269
+ Supported by numeric & string.
270
+ GREATER_EQUAL (4):
271
+ Supported by numeric.
272
+ GREATER (5):
273
+ Supported by numeric.
274
+ NOT_EQUAL (6):
275
+ Supported by numeric & string.
276
+ INCLUDES (7):
277
+ Supported by string only when ``CustomMetadata`` value type
278
+ for the given key has a ``string_list_value``.
279
+ EXCLUDES (8):
280
+ Supported by string only when ``CustomMetadata`` value type
281
+ for the given key has a ``string_list_value``.
282
+ """
283
+ OPERATOR_UNSPECIFIED = 0
284
+ LESS = 1
285
+ LESS_EQUAL = 2
286
+ EQUAL = 3
287
+ GREATER_EQUAL = 4
288
+ GREATER = 5
289
+ NOT_EQUAL = 6
290
+ INCLUDES = 7
291
+ EXCLUDES = 8
292
+
293
+ string_value: str = proto.Field(
294
+ proto.STRING,
295
+ number=1,
296
+ oneof="value",
297
+ )
298
+ numeric_value: float = proto.Field(
299
+ proto.FLOAT,
300
+ number=6,
301
+ oneof="value",
302
+ )
303
+ operation: Operator = proto.Field(
304
+ proto.ENUM,
305
+ number=5,
306
+ enum=Operator,
307
+ )
308
+
309
+
310
+ class Chunk(proto.Message):
311
+ r"""A ``Chunk`` is a subpart of a ``Document`` that is treated as an
312
+ independent unit for the purposes of vector representation and
313
+ storage. A ``Corpus`` can have a maximum of 1 million ``Chunk``\ s.
314
+
315
+ Attributes:
316
+ name (str):
317
+ Immutable. Identifier. The ``Chunk`` resource name. The ID
318
+ (name excluding the `corpora/*/documents/*/chunks/` prefix)
319
+ can contain up to 40 characters that are lowercase
320
+ alphanumeric or dashes (-). The ID cannot start or end with
321
+ a dash. If the name is empty on create, a random
322
+ 12-character unique ID will be generated. Example:
323
+ ``corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c``
324
+ data (google.ai.generativelanguage_v1beta.types.ChunkData):
325
+ Required. The content for the ``Chunk``, such as the text
326
+ string. The maximum number of tokens per chunk is 2043.
327
+ custom_metadata (MutableSequence[google.ai.generativelanguage_v1beta.types.CustomMetadata]):
328
+ Optional. User provided custom metadata stored as key-value
329
+ pairs. The maximum number of ``CustomMetadata`` per chunk is
330
+ 20.
331
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
332
+ Output only. The Timestamp of when the ``Chunk`` was
333
+ created.
334
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
335
+ Output only. The Timestamp of when the ``Chunk`` was last
336
+ updated.
337
+ state (google.ai.generativelanguage_v1beta.types.Chunk.State):
338
+ Output only. Current state of the ``Chunk``.
339
+ """
340
+
341
+ class State(proto.Enum):
342
+ r"""States for the lifecycle of a ``Chunk``.
343
+
344
+ Values:
345
+ STATE_UNSPECIFIED (0):
346
+ The default value. This value is used if the
347
+ state is omitted.
348
+ STATE_PENDING_PROCESSING (1):
349
+ ``Chunk`` is being processed (embedding and vector storage).
350
+ STATE_ACTIVE (2):
351
+ ``Chunk`` is processed and available for querying.
352
+ STATE_FAILED (10):
353
+ ``Chunk`` failed processing.
354
+ """
355
+ STATE_UNSPECIFIED = 0
356
+ STATE_PENDING_PROCESSING = 1
357
+ STATE_ACTIVE = 2
358
+ STATE_FAILED = 10
359
+
360
+ name: str = proto.Field(
361
+ proto.STRING,
362
+ number=1,
363
+ )
364
+ data: "ChunkData" = proto.Field(
365
+ proto.MESSAGE,
366
+ number=2,
367
+ message="ChunkData",
368
+ )
369
+ custom_metadata: MutableSequence["CustomMetadata"] = proto.RepeatedField(
370
+ proto.MESSAGE,
371
+ number=3,
372
+ message="CustomMetadata",
373
+ )
374
+ create_time: timestamp_pb2.Timestamp = proto.Field(
375
+ proto.MESSAGE,
376
+ number=4,
377
+ message=timestamp_pb2.Timestamp,
378
+ )
379
+ update_time: timestamp_pb2.Timestamp = proto.Field(
380
+ proto.MESSAGE,
381
+ number=5,
382
+ message=timestamp_pb2.Timestamp,
383
+ )
384
+ state: State = proto.Field(
385
+ proto.ENUM,
386
+ number=6,
387
+ enum=State,
388
+ )
389
+
390
+
391
+ class ChunkData(proto.Message):
392
+ r"""Extracted data that represents the ``Chunk`` content.
393
+
394
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
395
+
396
+ Attributes:
397
+ string_value (str):
398
+ The ``Chunk`` content as a string. The maximum number of
399
+ tokens per chunk is 2043.
400
+
401
+ This field is a member of `oneof`_ ``data``.
402
+ """
403
+
404
+ string_value: str = proto.Field(
405
+ proto.STRING,
406
+ number=1,
407
+ oneof="data",
408
+ )
409
+
410
+
411
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/retriever_service.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1beta.types import retriever
24
+
25
+ __protobuf__ = proto.module(
26
+ package="google.ai.generativelanguage.v1beta",
27
+ manifest={
28
+ "CreateCorpusRequest",
29
+ "GetCorpusRequest",
30
+ "UpdateCorpusRequest",
31
+ "DeleteCorpusRequest",
32
+ "ListCorporaRequest",
33
+ "ListCorporaResponse",
34
+ "QueryCorpusRequest",
35
+ "QueryCorpusResponse",
36
+ "RelevantChunk",
37
+ "CreateDocumentRequest",
38
+ "GetDocumentRequest",
39
+ "UpdateDocumentRequest",
40
+ "DeleteDocumentRequest",
41
+ "ListDocumentsRequest",
42
+ "ListDocumentsResponse",
43
+ "QueryDocumentRequest",
44
+ "QueryDocumentResponse",
45
+ "CreateChunkRequest",
46
+ "BatchCreateChunksRequest",
47
+ "BatchCreateChunksResponse",
48
+ "GetChunkRequest",
49
+ "UpdateChunkRequest",
50
+ "BatchUpdateChunksRequest",
51
+ "BatchUpdateChunksResponse",
52
+ "DeleteChunkRequest",
53
+ "BatchDeleteChunksRequest",
54
+ "ListChunksRequest",
55
+ "ListChunksResponse",
56
+ },
57
+ )
58
+
59
+
60
+ class CreateCorpusRequest(proto.Message):
61
+ r"""Request to create a ``Corpus``.
62
+
63
+ Attributes:
64
+ corpus (google.ai.generativelanguage_v1beta.types.Corpus):
65
+ Required. The ``Corpus`` to create.
66
+ """
67
+
68
+ corpus: retriever.Corpus = proto.Field(
69
+ proto.MESSAGE,
70
+ number=1,
71
+ message=retriever.Corpus,
72
+ )
73
+
74
+
75
+ class GetCorpusRequest(proto.Message):
76
+ r"""Request for getting information about a specific ``Corpus``.
77
+
78
+ Attributes:
79
+ name (str):
80
+ Required. The name of the ``Corpus``. Example:
81
+ ``corpora/my-corpus-123``
82
+ """
83
+
84
+ name: str = proto.Field(
85
+ proto.STRING,
86
+ number=1,
87
+ )
88
+
89
+
90
+ class UpdateCorpusRequest(proto.Message):
91
+ r"""Request to update a ``Corpus``.
92
+
93
+ Attributes:
94
+ corpus (google.ai.generativelanguage_v1beta.types.Corpus):
95
+ Required. The ``Corpus`` to update.
96
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
97
+ Required. The list of fields to update. Currently, this only
98
+ supports updating ``display_name``.
99
+ """
100
+
101
+ corpus: retriever.Corpus = proto.Field(
102
+ proto.MESSAGE,
103
+ number=1,
104
+ message=retriever.Corpus,
105
+ )
106
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
107
+ proto.MESSAGE,
108
+ number=2,
109
+ message=field_mask_pb2.FieldMask,
110
+ )
111
+
112
+
113
+ class DeleteCorpusRequest(proto.Message):
114
+ r"""Request to delete a ``Corpus``.
115
+
116
+ Attributes:
117
+ name (str):
118
+ Required. The resource name of the ``Corpus``. Example:
119
+ ``corpora/my-corpus-123``
120
+ force (bool):
121
+ Optional. If set to true, any ``Document``\ s and objects
122
+ related to this ``Corpus`` will also be deleted.
123
+
124
+ If false (the default), a ``FAILED_PRECONDITION`` error will
125
+ be returned if ``Corpus`` contains any ``Document``\ s.
126
+ """
127
+
128
+ name: str = proto.Field(
129
+ proto.STRING,
130
+ number=1,
131
+ )
132
+ force: bool = proto.Field(
133
+ proto.BOOL,
134
+ number=2,
135
+ )
136
+
137
+
138
+ class ListCorporaRequest(proto.Message):
139
+ r"""Request for listing ``Corpora``.
140
+
141
+ Attributes:
142
+ page_size (int):
143
+ Optional. The maximum number of ``Corpora`` to return (per
144
+ page). The service may return fewer ``Corpora``.
145
+
146
+ If unspecified, at most 10 ``Corpora`` will be returned. The
147
+ maximum size limit is 20 ``Corpora`` per page.
148
+ page_token (str):
149
+ Optional. A page token, received from a previous
150
+ ``ListCorpora`` call.
151
+
152
+ Provide the ``next_page_token`` returned in the response as
153
+ an argument to the next request to retrieve the next page.
154
+
155
+ When paginating, all other parameters provided to
156
+ ``ListCorpora`` must match the call that provided the page
157
+ token.
158
+ """
159
+
160
+ page_size: int = proto.Field(
161
+ proto.INT32,
162
+ number=1,
163
+ )
164
+ page_token: str = proto.Field(
165
+ proto.STRING,
166
+ number=2,
167
+ )
168
+
169
+
170
+ class ListCorporaResponse(proto.Message):
171
+ r"""Response from ``ListCorpora`` containing a paginated list of
172
+ ``Corpora``. The results are sorted by ascending
173
+ ``corpus.create_time``.
174
+
175
+ Attributes:
176
+ corpora (MutableSequence[google.ai.generativelanguage_v1beta.types.Corpus]):
177
+ The returned corpora.
178
+ next_page_token (str):
179
+ A token, which can be sent as ``page_token`` to retrieve the
180
+ next page. If this field is omitted, there are no more
181
+ pages.
182
+ """
183
+
184
+ @property
185
+ def raw_page(self):
186
+ return self
187
+
188
+ corpora: MutableSequence[retriever.Corpus] = proto.RepeatedField(
189
+ proto.MESSAGE,
190
+ number=1,
191
+ message=retriever.Corpus,
192
+ )
193
+ next_page_token: str = proto.Field(
194
+ proto.STRING,
195
+ number=2,
196
+ )
197
+
198
+
199
+ class QueryCorpusRequest(proto.Message):
200
+ r"""Request for querying a ``Corpus``.
201
+
202
+ Attributes:
203
+ name (str):
204
+ Required. The name of the ``Corpus`` to query. Example:
205
+ ``corpora/my-corpus-123``
206
+ query (str):
207
+ Required. Query string to perform semantic
208
+ search.
209
+ metadata_filters (MutableSequence[google.ai.generativelanguage_v1beta.types.MetadataFilter]):
210
+ Optional. Filter for ``Chunk`` and ``Document`` metadata.
211
+ Each ``MetadataFilter`` object should correspond to a unique
212
+ key. Multiple ``MetadataFilter`` objects are joined by
213
+ logical "AND"s.
214
+
215
+ Example query at document level: (year >= 2020 OR year <
216
+ 2010) AND (genre = drama OR genre = action)
217
+
218
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
219
+ "document.custom_metadata.year" conditions = [{int_value =
220
+ 2020, operation = GREATER_EQUAL}, {int_value = 2010,
221
+ operation = LESS}]}, {key = "document.custom_metadata.year"
222
+ conditions = [{int_value = 2020, operation = GREATER_EQUAL},
223
+ {int_value = 2010, operation = LESS}]}, {key =
224
+ "document.custom_metadata.genre" conditions = [{string_value
225
+ = "drama", operation = EQUAL}, {string_value = "action",
226
+ operation = EQUAL}]}]
227
+
228
+ Example query at chunk level for a numeric range of values:
229
+ (year > 2015 AND year <= 2020)
230
+
231
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
232
+ "chunk.custom_metadata.year" conditions = [{int_value =
233
+ 2015, operation = GREATER}]}, {key =
234
+ "chunk.custom_metadata.year" conditions = [{int_value =
235
+ 2020, operation = LESS_EQUAL}]}]
236
+
237
+ Note: "AND"s for the same key are only supported for numeric
238
+ values. String values only support "OR"s for the same key.
239
+ results_count (int):
240
+ Optional. The maximum number of ``Chunk``\ s to return. The
241
+ service may return fewer ``Chunk``\ s.
242
+
243
+ If unspecified, at most 10 ``Chunk``\ s will be returned.
244
+ The maximum specified result count is 100.
245
+ """
246
+
247
+ name: str = proto.Field(
248
+ proto.STRING,
249
+ number=1,
250
+ )
251
+ query: str = proto.Field(
252
+ proto.STRING,
253
+ number=2,
254
+ )
255
+ metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField(
256
+ proto.MESSAGE,
257
+ number=3,
258
+ message=retriever.MetadataFilter,
259
+ )
260
+ results_count: int = proto.Field(
261
+ proto.INT32,
262
+ number=4,
263
+ )
264
+
265
+
266
+ class QueryCorpusResponse(proto.Message):
267
+ r"""Response from ``QueryCorpus`` containing a list of relevant chunks.
268
+
269
+ Attributes:
270
+ relevant_chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.RelevantChunk]):
271
+ The relevant chunks.
272
+ """
273
+
274
+ relevant_chunks: MutableSequence["RelevantChunk"] = proto.RepeatedField(
275
+ proto.MESSAGE,
276
+ number=1,
277
+ message="RelevantChunk",
278
+ )
279
+
280
+
281
+ class RelevantChunk(proto.Message):
282
+ r"""The information for a chunk relevant to a query.
283
+
284
+ Attributes:
285
+ chunk_relevance_score (float):
286
+ ``Chunk`` relevance to the query.
287
+ chunk (google.ai.generativelanguage_v1beta.types.Chunk):
288
+ ``Chunk`` associated with the query.
289
+ """
290
+
291
+ chunk_relevance_score: float = proto.Field(
292
+ proto.FLOAT,
293
+ number=1,
294
+ )
295
+ chunk: retriever.Chunk = proto.Field(
296
+ proto.MESSAGE,
297
+ number=2,
298
+ message=retriever.Chunk,
299
+ )
300
+
301
+
302
+ class CreateDocumentRequest(proto.Message):
303
+ r"""Request to create a ``Document``.
304
+
305
+ Attributes:
306
+ parent (str):
307
+ Required. The name of the ``Corpus`` where this ``Document``
308
+ will be created. Example: ``corpora/my-corpus-123``
309
+ document (google.ai.generativelanguage_v1beta.types.Document):
310
+ Required. The ``Document`` to create.
311
+ """
312
+
313
+ parent: str = proto.Field(
314
+ proto.STRING,
315
+ number=1,
316
+ )
317
+ document: retriever.Document = proto.Field(
318
+ proto.MESSAGE,
319
+ number=2,
320
+ message=retriever.Document,
321
+ )
322
+
323
+
324
+ class GetDocumentRequest(proto.Message):
325
+ r"""Request for getting information about a specific ``Document``.
326
+
327
+ Attributes:
328
+ name (str):
329
+ Required. The name of the ``Document`` to retrieve. Example:
330
+ ``corpora/my-corpus-123/documents/the-doc-abc``
331
+ """
332
+
333
+ name: str = proto.Field(
334
+ proto.STRING,
335
+ number=1,
336
+ )
337
+
338
+
339
+ class UpdateDocumentRequest(proto.Message):
340
+ r"""Request to update a ``Document``.
341
+
342
+ Attributes:
343
+ document (google.ai.generativelanguage_v1beta.types.Document):
344
+ Required. The ``Document`` to update.
345
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
346
+ Required. The list of fields to update. Currently, this only
347
+ supports updating ``display_name`` and ``custom_metadata``.
348
+ """
349
+
350
+ document: retriever.Document = proto.Field(
351
+ proto.MESSAGE,
352
+ number=1,
353
+ message=retriever.Document,
354
+ )
355
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
356
+ proto.MESSAGE,
357
+ number=2,
358
+ message=field_mask_pb2.FieldMask,
359
+ )
360
+
361
+
362
+ class DeleteDocumentRequest(proto.Message):
363
+ r"""Request to delete a ``Document``.
364
+
365
+ Attributes:
366
+ name (str):
367
+ Required. The resource name of the ``Document`` to delete.
368
+ Example: ``corpora/my-corpus-123/documents/the-doc-abc``
369
+ force (bool):
370
+ Optional. If set to true, any ``Chunk``\ s and objects
371
+ related to this ``Document`` will also be deleted.
372
+
373
+ If false (the default), a ``FAILED_PRECONDITION`` error will
374
+ be returned if ``Document`` contains any ``Chunk``\ s.
375
+ """
376
+
377
+ name: str = proto.Field(
378
+ proto.STRING,
379
+ number=1,
380
+ )
381
+ force: bool = proto.Field(
382
+ proto.BOOL,
383
+ number=2,
384
+ )
385
+
386
+
387
+ class ListDocumentsRequest(proto.Message):
388
+ r"""Request for listing ``Document``\ s.
389
+
390
+ Attributes:
391
+ parent (str):
392
+ Required. The name of the ``Corpus`` containing
393
+ ``Document``\ s. Example: ``corpora/my-corpus-123``
394
+ page_size (int):
395
+ Optional. The maximum number of ``Document``\ s to return
396
+ (per page). The service may return fewer ``Document``\ s.
397
+
398
+ If unspecified, at most 10 ``Document``\ s will be returned.
399
+ The maximum size limit is 20 ``Document``\ s per page.
400
+ page_token (str):
401
+ Optional. A page token, received from a previous
402
+ ``ListDocuments`` call.
403
+
404
+ Provide the ``next_page_token`` returned in the response as
405
+ an argument to the next request to retrieve the next page.
406
+
407
+ When paginating, all other parameters provided to
408
+ ``ListDocuments`` must match the call that provided the page
409
+ token.
410
+ """
411
+
412
+ parent: str = proto.Field(
413
+ proto.STRING,
414
+ number=1,
415
+ )
416
+ page_size: int = proto.Field(
417
+ proto.INT32,
418
+ number=2,
419
+ )
420
+ page_token: str = proto.Field(
421
+ proto.STRING,
422
+ number=3,
423
+ )
424
+
425
+
426
+ class ListDocumentsResponse(proto.Message):
427
+ r"""Response from ``ListDocuments`` containing a paginated list of
428
+ ``Document``\ s. The ``Document``\ s are sorted by ascending
429
+ ``document.create_time``.
430
+
431
+ Attributes:
432
+ documents (MutableSequence[google.ai.generativelanguage_v1beta.types.Document]):
433
+ The returned ``Document``\ s.
434
+ next_page_token (str):
435
+ A token, which can be sent as ``page_token`` to retrieve the
436
+ next page. If this field is omitted, there are no more
437
+ pages.
438
+ """
439
+
440
+ @property
441
+ def raw_page(self):
442
+ return self
443
+
444
+ documents: MutableSequence[retriever.Document] = proto.RepeatedField(
445
+ proto.MESSAGE,
446
+ number=1,
447
+ message=retriever.Document,
448
+ )
449
+ next_page_token: str = proto.Field(
450
+ proto.STRING,
451
+ number=2,
452
+ )
453
+
454
+
455
+ class QueryDocumentRequest(proto.Message):
456
+ r"""Request for querying a ``Document``.
457
+
458
+ Attributes:
459
+ name (str):
460
+ Required. The name of the ``Document`` to query. Example:
461
+ ``corpora/my-corpus-123/documents/the-doc-abc``
462
+ query (str):
463
+ Required. Query string to perform semantic
464
+ search.
465
+ results_count (int):
466
+ Optional. The maximum number of ``Chunk``\ s to return. The
467
+ service may return fewer ``Chunk``\ s.
468
+
469
+ If unspecified, at most 10 ``Chunk``\ s will be returned.
470
+ The maximum specified result count is 100.
471
+ metadata_filters (MutableSequence[google.ai.generativelanguage_v1beta.types.MetadataFilter]):
472
+ Optional. Filter for ``Chunk`` metadata. Each
473
+ ``MetadataFilter`` object should correspond to a unique key.
474
+ Multiple ``MetadataFilter`` objects are joined by logical
475
+ "AND"s.
476
+
477
+ Note: ``Document``-level filtering is not supported for this
478
+ request because a ``Document`` name is already specified.
479
+
480
+ Example query: (year >= 2020 OR year < 2010) AND (genre =
481
+ drama OR genre = action)
482
+
483
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
484
+ "chunk.custom_metadata.year" conditions = [{int_value =
485
+ 2020, operation = GREATER_EQUAL}, {int_value = 2010,
486
+ operation = LESS}}, {key = "chunk.custom_metadata.genre"
487
+ conditions = [{string_value = "drama", operation = EQUAL},
488
+ {string_value = "action", operation = EQUAL}}]
489
+
490
+ Example query for a numeric range of values: (year > 2015
491
+ AND year <= 2020)
492
+
493
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
494
+ "chunk.custom_metadata.year" conditions = [{int_value =
495
+ 2015, operation = GREATER}]}, {key =
496
+ "chunk.custom_metadata.year" conditions = [{int_value =
497
+ 2020, operation = LESS_EQUAL}]}]
498
+
499
+ Note: "AND"s for the same key are only supported for numeric
500
+ values. String values only support "OR"s for the same key.
501
+ """
502
+
503
+ name: str = proto.Field(
504
+ proto.STRING,
505
+ number=1,
506
+ )
507
+ query: str = proto.Field(
508
+ proto.STRING,
509
+ number=2,
510
+ )
511
+ results_count: int = proto.Field(
512
+ proto.INT32,
513
+ number=3,
514
+ )
515
+ metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField(
516
+ proto.MESSAGE,
517
+ number=4,
518
+ message=retriever.MetadataFilter,
519
+ )
520
+
521
+
522
+ class QueryDocumentResponse(proto.Message):
523
+ r"""Response from ``QueryDocument`` containing a list of relevant
524
+ chunks.
525
+
526
+ Attributes:
527
+ relevant_chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.RelevantChunk]):
528
+ The returned relevant chunks.
529
+ """
530
+
531
+ relevant_chunks: MutableSequence["RelevantChunk"] = proto.RepeatedField(
532
+ proto.MESSAGE,
533
+ number=1,
534
+ message="RelevantChunk",
535
+ )
536
+
537
+
538
+ class CreateChunkRequest(proto.Message):
539
+ r"""Request to create a ``Chunk``.
540
+
541
+ Attributes:
542
+ parent (str):
543
+ Required. The name of the ``Document`` where this ``Chunk``
544
+ will be created. Example:
545
+ ``corpora/my-corpus-123/documents/the-doc-abc``
546
+ chunk (google.ai.generativelanguage_v1beta.types.Chunk):
547
+ Required. The ``Chunk`` to create.
548
+ """
549
+
550
+ parent: str = proto.Field(
551
+ proto.STRING,
552
+ number=1,
553
+ )
554
+ chunk: retriever.Chunk = proto.Field(
555
+ proto.MESSAGE,
556
+ number=2,
557
+ message=retriever.Chunk,
558
+ )
559
+
560
+
561
+ class BatchCreateChunksRequest(proto.Message):
562
+ r"""Request to batch create ``Chunk``\ s.
563
+
564
+ Attributes:
565
+ parent (str):
566
+ Optional. The name of the ``Document`` where this batch of
567
+ ``Chunk``\ s will be created. The parent field in every
568
+ ``CreateChunkRequest`` must match this value. Example:
569
+ ``corpora/my-corpus-123/documents/the-doc-abc``
570
+ requests (MutableSequence[google.ai.generativelanguage_v1beta.types.CreateChunkRequest]):
571
+ Required. The request messages specifying the ``Chunk``\ s
572
+ to create. A maximum of 100 ``Chunk``\ s can be created in a
573
+ batch.
574
+ """
575
+
576
+ parent: str = proto.Field(
577
+ proto.STRING,
578
+ number=1,
579
+ )
580
+ requests: MutableSequence["CreateChunkRequest"] = proto.RepeatedField(
581
+ proto.MESSAGE,
582
+ number=2,
583
+ message="CreateChunkRequest",
584
+ )
585
+
586
+
587
+ class BatchCreateChunksResponse(proto.Message):
588
+ r"""Response from ``BatchCreateChunks`` containing a list of created
589
+ ``Chunk``\ s.
590
+
591
+ Attributes:
592
+ chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.Chunk]):
593
+ ``Chunk``\ s created.
594
+ """
595
+
596
+ chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField(
597
+ proto.MESSAGE,
598
+ number=1,
599
+ message=retriever.Chunk,
600
+ )
601
+
602
+
603
+ class GetChunkRequest(proto.Message):
604
+ r"""Request for getting information about a specific ``Chunk``.
605
+
606
+ Attributes:
607
+ name (str):
608
+ Required. The name of the ``Chunk`` to retrieve. Example:
609
+ ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
610
+ """
611
+
612
+ name: str = proto.Field(
613
+ proto.STRING,
614
+ number=1,
615
+ )
616
+
617
+
618
+ class UpdateChunkRequest(proto.Message):
619
+ r"""Request to update a ``Chunk``.
620
+
621
+ Attributes:
622
+ chunk (google.ai.generativelanguage_v1beta.types.Chunk):
623
+ Required. The ``Chunk`` to update.
624
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
625
+ Required. The list of fields to update. Currently, this only
626
+ supports updating ``custom_metadata`` and ``data``.
627
+ """
628
+
629
+ chunk: retriever.Chunk = proto.Field(
630
+ proto.MESSAGE,
631
+ number=1,
632
+ message=retriever.Chunk,
633
+ )
634
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
635
+ proto.MESSAGE,
636
+ number=2,
637
+ message=field_mask_pb2.FieldMask,
638
+ )
639
+
640
+
641
+ class BatchUpdateChunksRequest(proto.Message):
642
+ r"""Request to batch update ``Chunk``\ s.
643
+
644
+ Attributes:
645
+ parent (str):
646
+ Optional. The name of the ``Document`` containing the
647
+ ``Chunk``\ s to update. The parent field in every
648
+ ``UpdateChunkRequest`` must match this value. Example:
649
+ ``corpora/my-corpus-123/documents/the-doc-abc``
650
+ requests (MutableSequence[google.ai.generativelanguage_v1beta.types.UpdateChunkRequest]):
651
+ Required. The request messages specifying the ``Chunk``\ s
652
+ to update. A maximum of 100 ``Chunk``\ s can be updated in a
653
+ batch.
654
+ """
655
+
656
+ parent: str = proto.Field(
657
+ proto.STRING,
658
+ number=1,
659
+ )
660
+ requests: MutableSequence["UpdateChunkRequest"] = proto.RepeatedField(
661
+ proto.MESSAGE,
662
+ number=2,
663
+ message="UpdateChunkRequest",
664
+ )
665
+
666
+
667
+ class BatchUpdateChunksResponse(proto.Message):
668
+ r"""Response from ``BatchUpdateChunks`` containing a list of updated
669
+ ``Chunk``\ s.
670
+
671
+ Attributes:
672
+ chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.Chunk]):
673
+ ``Chunk``\ s updated.
674
+ """
675
+
676
+ chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField(
677
+ proto.MESSAGE,
678
+ number=1,
679
+ message=retriever.Chunk,
680
+ )
681
+
682
+
683
+ class DeleteChunkRequest(proto.Message):
684
+ r"""Request to delete a ``Chunk``.
685
+
686
+ Attributes:
687
+ name (str):
688
+ Required. The resource name of the ``Chunk`` to delete.
689
+ Example:
690
+ ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
691
+ """
692
+
693
+ name: str = proto.Field(
694
+ proto.STRING,
695
+ number=1,
696
+ )
697
+
698
+
699
+ class BatchDeleteChunksRequest(proto.Message):
700
+ r"""Request to batch delete ``Chunk``\ s.
701
+
702
+ Attributes:
703
+ parent (str):
704
+ Optional. The name of the ``Document`` containing the
705
+ ``Chunk``\ s to delete. The parent field in every
706
+ ``DeleteChunkRequest`` must match this value. Example:
707
+ ``corpora/my-corpus-123/documents/the-doc-abc``
708
+ requests (MutableSequence[google.ai.generativelanguage_v1beta.types.DeleteChunkRequest]):
709
+ Required. The request messages specifying the ``Chunk``\ s
710
+ to delete.
711
+ """
712
+
713
+ parent: str = proto.Field(
714
+ proto.STRING,
715
+ number=1,
716
+ )
717
+ requests: MutableSequence["DeleteChunkRequest"] = proto.RepeatedField(
718
+ proto.MESSAGE,
719
+ number=2,
720
+ message="DeleteChunkRequest",
721
+ )
722
+
723
+
724
+ class ListChunksRequest(proto.Message):
725
+ r"""Request for listing ``Chunk``\ s.
726
+
727
+ Attributes:
728
+ parent (str):
729
+ Required. The name of the ``Document`` containing
730
+ ``Chunk``\ s. Example:
731
+ ``corpora/my-corpus-123/documents/the-doc-abc``
732
+ page_size (int):
733
+ Optional. The maximum number of ``Chunk``\ s to return (per
734
+ page). The service may return fewer ``Chunk``\ s.
735
+
736
+ If unspecified, at most 10 ``Chunk``\ s will be returned.
737
+ The maximum size limit is 100 ``Chunk``\ s per page.
738
+ page_token (str):
739
+ Optional. A page token, received from a previous
740
+ ``ListChunks`` call.
741
+
742
+ Provide the ``next_page_token`` returned in the response as
743
+ an argument to the next request to retrieve the next page.
744
+
745
+ When paginating, all other parameters provided to
746
+ ``ListChunks`` must match the call that provided the page
747
+ token.
748
+ """
749
+
750
+ parent: str = proto.Field(
751
+ proto.STRING,
752
+ number=1,
753
+ )
754
+ page_size: int = proto.Field(
755
+ proto.INT32,
756
+ number=2,
757
+ )
758
+ page_token: str = proto.Field(
759
+ proto.STRING,
760
+ number=3,
761
+ )
762
+
763
+
764
+ class ListChunksResponse(proto.Message):
765
+ r"""Response from ``ListChunks`` containing a paginated list of
766
+ ``Chunk``\ s. The ``Chunk``\ s are sorted by ascending
767
+ ``chunk.create_time``.
768
+
769
+ Attributes:
770
+ chunks (MutableSequence[google.ai.generativelanguage_v1beta.types.Chunk]):
771
+ The returned ``Chunk``\ s.
772
+ next_page_token (str):
773
+ A token, which can be sent as ``page_token`` to retrieve the
774
+ next page. If this field is omitted, there are no more
775
+ pages.
776
+ """
777
+
778
+ @property
779
+ def raw_page(self):
780
+ return self
781
+
782
+ chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField(
783
+ proto.MESSAGE,
784
+ number=1,
785
+ message=retriever.Chunk,
786
+ )
787
+ next_page_token: str = proto.Field(
788
+ proto.STRING,
789
+ number=2,
790
+ )
791
+
792
+
793
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/safety.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1beta",
24
+ manifest={
25
+ "HarmCategory",
26
+ "ContentFilter",
27
+ "SafetyFeedback",
28
+ "SafetyRating",
29
+ "SafetySetting",
30
+ },
31
+ )
32
+
33
+
34
+ class HarmCategory(proto.Enum):
35
+ r"""The category of a rating.
36
+
37
+ These categories cover various kinds of harms that developers
38
+ may wish to adjust.
39
+
40
+ Values:
41
+ HARM_CATEGORY_UNSPECIFIED (0):
42
+ Category is unspecified.
43
+ HARM_CATEGORY_DEROGATORY (1):
44
+ **PaLM** - Negative or harmful comments targeting identity
45
+ and/or protected attribute.
46
+ HARM_CATEGORY_TOXICITY (2):
47
+ **PaLM** - Content that is rude, disrespectful, or profane.
48
+ HARM_CATEGORY_VIOLENCE (3):
49
+ **PaLM** - Describes scenarios depicting violence against an
50
+ individual or group, or general descriptions of gore.
51
+ HARM_CATEGORY_SEXUAL (4):
52
+ **PaLM** - Contains references to sexual acts or other lewd
53
+ content.
54
+ HARM_CATEGORY_MEDICAL (5):
55
+ **PaLM** - Promotes unchecked medical advice.
56
+ HARM_CATEGORY_DANGEROUS (6):
57
+ **PaLM** - Dangerous content that promotes, facilitates, or
58
+ encourages harmful acts.
59
+ HARM_CATEGORY_HARASSMENT (7):
60
+ **Gemini** - Harassment content.
61
+ HARM_CATEGORY_HATE_SPEECH (8):
62
+ **Gemini** - Hate speech and content.
63
+ HARM_CATEGORY_SEXUALLY_EXPLICIT (9):
64
+ **Gemini** - Sexually explicit content.
65
+ HARM_CATEGORY_DANGEROUS_CONTENT (10):
66
+ **Gemini** - Dangerous content.
67
+ HARM_CATEGORY_CIVIC_INTEGRITY (11):
68
+ **Gemini** - Content that may be used to harm civic
69
+ integrity.
70
+ """
71
+ HARM_CATEGORY_UNSPECIFIED = 0
72
+ HARM_CATEGORY_DEROGATORY = 1
73
+ HARM_CATEGORY_TOXICITY = 2
74
+ HARM_CATEGORY_VIOLENCE = 3
75
+ HARM_CATEGORY_SEXUAL = 4
76
+ HARM_CATEGORY_MEDICAL = 5
77
+ HARM_CATEGORY_DANGEROUS = 6
78
+ HARM_CATEGORY_HARASSMENT = 7
79
+ HARM_CATEGORY_HATE_SPEECH = 8
80
+ HARM_CATEGORY_SEXUALLY_EXPLICIT = 9
81
+ HARM_CATEGORY_DANGEROUS_CONTENT = 10
82
+ HARM_CATEGORY_CIVIC_INTEGRITY = 11
83
+
84
+
85
+ class ContentFilter(proto.Message):
86
+ r"""Content filtering metadata associated with processing a
87
+ single request.
88
+ ContentFilter contains a reason and an optional supporting
89
+ string. The reason may be unspecified.
90
+
91
+
92
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
93
+
94
+ Attributes:
95
+ reason (google.ai.generativelanguage_v1beta.types.ContentFilter.BlockedReason):
96
+ The reason content was blocked during request
97
+ processing.
98
+ message (str):
99
+ A string that describes the filtering
100
+ behavior in more detail.
101
+
102
+ This field is a member of `oneof`_ ``_message``.
103
+ """
104
+
105
+ class BlockedReason(proto.Enum):
106
+ r"""A list of reasons why content may have been blocked.
107
+
108
+ Values:
109
+ BLOCKED_REASON_UNSPECIFIED (0):
110
+ A blocked reason was not specified.
111
+ SAFETY (1):
112
+ Content was blocked by safety settings.
113
+ OTHER (2):
114
+ Content was blocked, but the reason is
115
+ uncategorized.
116
+ """
117
+ BLOCKED_REASON_UNSPECIFIED = 0
118
+ SAFETY = 1
119
+ OTHER = 2
120
+
121
+ reason: BlockedReason = proto.Field(
122
+ proto.ENUM,
123
+ number=1,
124
+ enum=BlockedReason,
125
+ )
126
+ message: str = proto.Field(
127
+ proto.STRING,
128
+ number=2,
129
+ optional=True,
130
+ )
131
+
132
+
133
+ class SafetyFeedback(proto.Message):
134
+ r"""Safety feedback for an entire request.
135
+
136
+ This field is populated if content in the input and/or response
137
+ is blocked due to safety settings. SafetyFeedback may not exist
138
+ for every HarmCategory. Each SafetyFeedback will return the
139
+ safety settings used by the request as well as the lowest
140
+ HarmProbability that should be allowed in order to return a
141
+ result.
142
+
143
+ Attributes:
144
+ rating (google.ai.generativelanguage_v1beta.types.SafetyRating):
145
+ Safety rating evaluated from content.
146
+ setting (google.ai.generativelanguage_v1beta.types.SafetySetting):
147
+ Safety settings applied to the request.
148
+ """
149
+
150
+ rating: "SafetyRating" = proto.Field(
151
+ proto.MESSAGE,
152
+ number=1,
153
+ message="SafetyRating",
154
+ )
155
+ setting: "SafetySetting" = proto.Field(
156
+ proto.MESSAGE,
157
+ number=2,
158
+ message="SafetySetting",
159
+ )
160
+
161
+
162
+ class SafetyRating(proto.Message):
163
+ r"""Safety rating for a piece of content.
164
+
165
+ The safety rating contains the category of harm and the harm
166
+ probability level in that category for a piece of content.
167
+ Content is classified for safety across a number of harm
168
+ categories and the probability of the harm classification is
169
+ included here.
170
+
171
+ Attributes:
172
+ category (google.ai.generativelanguage_v1beta.types.HarmCategory):
173
+ Required. The category for this rating.
174
+ probability (google.ai.generativelanguage_v1beta.types.SafetyRating.HarmProbability):
175
+ Required. The probability of harm for this
176
+ content.
177
+ blocked (bool):
178
+ Was this content blocked because of this
179
+ rating?
180
+ """
181
+
182
+ class HarmProbability(proto.Enum):
183
+ r"""The probability that a piece of content is harmful.
184
+
185
+ The classification system gives the probability of the content
186
+ being unsafe. This does not indicate the severity of harm for a
187
+ piece of content.
188
+
189
+ Values:
190
+ HARM_PROBABILITY_UNSPECIFIED (0):
191
+ Probability is unspecified.
192
+ NEGLIGIBLE (1):
193
+ Content has a negligible chance of being
194
+ unsafe.
195
+ LOW (2):
196
+ Content has a low chance of being unsafe.
197
+ MEDIUM (3):
198
+ Content has a medium chance of being unsafe.
199
+ HIGH (4):
200
+ Content has a high chance of being unsafe.
201
+ """
202
+ HARM_PROBABILITY_UNSPECIFIED = 0
203
+ NEGLIGIBLE = 1
204
+ LOW = 2
205
+ MEDIUM = 3
206
+ HIGH = 4
207
+
208
+ category: "HarmCategory" = proto.Field(
209
+ proto.ENUM,
210
+ number=3,
211
+ enum="HarmCategory",
212
+ )
213
+ probability: HarmProbability = proto.Field(
214
+ proto.ENUM,
215
+ number=4,
216
+ enum=HarmProbability,
217
+ )
218
+ blocked: bool = proto.Field(
219
+ proto.BOOL,
220
+ number=5,
221
+ )
222
+
223
+
224
+ class SafetySetting(proto.Message):
225
+ r"""Safety setting, affecting the safety-blocking behavior.
226
+
227
+ Passing a safety setting for a category changes the allowed
228
+ probability that content is blocked.
229
+
230
+ Attributes:
231
+ category (google.ai.generativelanguage_v1beta.types.HarmCategory):
232
+ Required. The category for this setting.
233
+ threshold (google.ai.generativelanguage_v1beta.types.SafetySetting.HarmBlockThreshold):
234
+ Required. Controls the probability threshold
235
+ at which harm is blocked.
236
+ """
237
+
238
+ class HarmBlockThreshold(proto.Enum):
239
+ r"""Block at and beyond a specified harm probability.
240
+
241
+ Values:
242
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED (0):
243
+ Threshold is unspecified.
244
+ BLOCK_LOW_AND_ABOVE (1):
245
+ Content with NEGLIGIBLE will be allowed.
246
+ BLOCK_MEDIUM_AND_ABOVE (2):
247
+ Content with NEGLIGIBLE and LOW will be
248
+ allowed.
249
+ BLOCK_ONLY_HIGH (3):
250
+ Content with NEGLIGIBLE, LOW, and MEDIUM will
251
+ be allowed.
252
+ BLOCK_NONE (4):
253
+ All content will be allowed.
254
+ OFF (5):
255
+ Turn off the safety filter.
256
+ """
257
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0
258
+ BLOCK_LOW_AND_ABOVE = 1
259
+ BLOCK_MEDIUM_AND_ABOVE = 2
260
+ BLOCK_ONLY_HIGH = 3
261
+ BLOCK_NONE = 4
262
+ OFF = 5
263
+
264
+ category: "HarmCategory" = proto.Field(
265
+ proto.ENUM,
266
+ number=3,
267
+ enum="HarmCategory",
268
+ )
269
+ threshold: HarmBlockThreshold = proto.Field(
270
+ proto.ENUM,
271
+ number=4,
272
+ enum=HarmBlockThreshold,
273
+ )
274
+
275
+
276
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/text_service.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1beta.types import citation, safety
23
+
24
+ __protobuf__ = proto.module(
25
+ package="google.ai.generativelanguage.v1beta",
26
+ manifest={
27
+ "GenerateTextRequest",
28
+ "GenerateTextResponse",
29
+ "TextPrompt",
30
+ "TextCompletion",
31
+ "EmbedTextRequest",
32
+ "EmbedTextResponse",
33
+ "BatchEmbedTextRequest",
34
+ "BatchEmbedTextResponse",
35
+ "Embedding",
36
+ "CountTextTokensRequest",
37
+ "CountTextTokensResponse",
38
+ },
39
+ )
40
+
41
+
42
+ class GenerateTextRequest(proto.Message):
43
+ r"""Request to generate a text completion response from the
44
+ model.
45
+
46
+
47
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
48
+
49
+ Attributes:
50
+ model (str):
51
+ Required. The name of the ``Model`` or ``TunedModel`` to use
52
+ for generating the completion. Examples:
53
+ models/text-bison-001 tunedModels/sentence-translator-u3b7m
54
+ prompt (google.ai.generativelanguage_v1beta.types.TextPrompt):
55
+ Required. The free-form input text given to
56
+ the model as a prompt.
57
+ Given a prompt, the model will generate a
58
+ TextCompletion response it predicts as the
59
+ completion of the input text.
60
+ temperature (float):
61
+ Optional. Controls the randomness of the output. Note: The
62
+ default value varies by model, see the ``Model.temperature``
63
+ attribute of the ``Model`` returned the ``getModel``
64
+ function.
65
+
66
+ Values can range from [0.0,1.0], inclusive. A value closer
67
+ to 1.0 will produce responses that are more varied and
68
+ creative, while a value closer to 0.0 will typically result
69
+ in more straightforward responses from the model.
70
+
71
+ This field is a member of `oneof`_ ``_temperature``.
72
+ candidate_count (int):
73
+ Optional. Number of generated responses to return.
74
+
75
+ This value must be between [1, 8], inclusive. If unset, this
76
+ will default to 1.
77
+
78
+ This field is a member of `oneof`_ ``_candidate_count``.
79
+ max_output_tokens (int):
80
+ Optional. The maximum number of tokens to include in a
81
+ candidate.
82
+
83
+ If unset, this will default to output_token_limit specified
84
+ in the ``Model`` specification.
85
+
86
+ This field is a member of `oneof`_ ``_max_output_tokens``.
87
+ top_p (float):
88
+ Optional. The maximum cumulative probability of tokens to
89
+ consider when sampling.
90
+
91
+ The model uses combined Top-k and nucleus sampling.
92
+
93
+ Tokens are sorted based on their assigned probabilities so
94
+ that only the most likely tokens are considered. Top-k
95
+ sampling directly limits the maximum number of tokens to
96
+ consider, while Nucleus sampling limits number of tokens
97
+ based on the cumulative probability.
98
+
99
+ Note: The default value varies by model, see the
100
+ ``Model.top_p`` attribute of the ``Model`` returned the
101
+ ``getModel`` function.
102
+
103
+ This field is a member of `oneof`_ ``_top_p``.
104
+ top_k (int):
105
+ Optional. The maximum number of tokens to consider when
106
+ sampling.
107
+
108
+ The model uses combined Top-k and nucleus sampling.
109
+
110
+ Top-k sampling considers the set of ``top_k`` most probable
111
+ tokens. Defaults to 40.
112
+
113
+ Note: The default value varies by model, see the
114
+ ``Model.top_k`` attribute of the ``Model`` returned the
115
+ ``getModel`` function.
116
+
117
+ This field is a member of `oneof`_ ``_top_k``.
118
+ safety_settings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetySetting]):
119
+ Optional. A list of unique ``SafetySetting`` instances for
120
+ blocking unsafe content.
121
+
122
+ that will be enforced on the ``GenerateTextRequest.prompt``
123
+ and ``GenerateTextResponse.candidates``. There should not be
124
+ more than one setting for each ``SafetyCategory`` type. The
125
+ API will block any prompts and responses that fail to meet
126
+ the thresholds set by these settings. This list overrides
127
+ the default settings for each ``SafetyCategory`` specified
128
+ in the safety_settings. If there is no ``SafetySetting`` for
129
+ a given ``SafetyCategory`` provided in the list, the API
130
+ will use the default safety setting for that category. Harm
131
+ categories HARM_CATEGORY_DEROGATORY, HARM_CATEGORY_TOXICITY,
132
+ HARM_CATEGORY_VIOLENCE, HARM_CATEGORY_SEXUAL,
133
+ HARM_CATEGORY_MEDICAL, HARM_CATEGORY_DANGEROUS are supported
134
+ in text service.
135
+ stop_sequences (MutableSequence[str]):
136
+ The set of character sequences (up to 5) that
137
+ will stop output generation. If specified, the
138
+ API will stop at the first appearance of a stop
139
+ sequence. The stop sequence will not be included
140
+ as part of the response.
141
+ """
142
+
143
+ model: str = proto.Field(
144
+ proto.STRING,
145
+ number=1,
146
+ )
147
+ prompt: "TextPrompt" = proto.Field(
148
+ proto.MESSAGE,
149
+ number=2,
150
+ message="TextPrompt",
151
+ )
152
+ temperature: float = proto.Field(
153
+ proto.FLOAT,
154
+ number=3,
155
+ optional=True,
156
+ )
157
+ candidate_count: int = proto.Field(
158
+ proto.INT32,
159
+ number=4,
160
+ optional=True,
161
+ )
162
+ max_output_tokens: int = proto.Field(
163
+ proto.INT32,
164
+ number=5,
165
+ optional=True,
166
+ )
167
+ top_p: float = proto.Field(
168
+ proto.FLOAT,
169
+ number=6,
170
+ optional=True,
171
+ )
172
+ top_k: int = proto.Field(
173
+ proto.INT32,
174
+ number=7,
175
+ optional=True,
176
+ )
177
+ safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField(
178
+ proto.MESSAGE,
179
+ number=8,
180
+ message=safety.SafetySetting,
181
+ )
182
+ stop_sequences: MutableSequence[str] = proto.RepeatedField(
183
+ proto.STRING,
184
+ number=9,
185
+ )
186
+
187
+
188
+ class GenerateTextResponse(proto.Message):
189
+ r"""The response from the model, including candidate completions.
190
+
191
+ Attributes:
192
+ candidates (MutableSequence[google.ai.generativelanguage_v1beta.types.TextCompletion]):
193
+ Candidate responses from the model.
194
+ filters (MutableSequence[google.ai.generativelanguage_v1beta.types.ContentFilter]):
195
+ A set of content filtering metadata for the prompt and
196
+ response text.
197
+
198
+ This indicates which ``SafetyCategory``\ (s) blocked a
199
+ candidate from this response, the lowest ``HarmProbability``
200
+ that triggered a block, and the HarmThreshold setting for
201
+ that category. This indicates the smallest change to the
202
+ ``SafetySettings`` that would be necessary to unblock at
203
+ least 1 response.
204
+
205
+ The blocking is configured by the ``SafetySettings`` in the
206
+ request (or the default ``SafetySettings`` of the API).
207
+ safety_feedback (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyFeedback]):
208
+ Returns any safety feedback related to
209
+ content filtering.
210
+ """
211
+
212
+ candidates: MutableSequence["TextCompletion"] = proto.RepeatedField(
213
+ proto.MESSAGE,
214
+ number=1,
215
+ message="TextCompletion",
216
+ )
217
+ filters: MutableSequence[safety.ContentFilter] = proto.RepeatedField(
218
+ proto.MESSAGE,
219
+ number=3,
220
+ message=safety.ContentFilter,
221
+ )
222
+ safety_feedback: MutableSequence[safety.SafetyFeedback] = proto.RepeatedField(
223
+ proto.MESSAGE,
224
+ number=4,
225
+ message=safety.SafetyFeedback,
226
+ )
227
+
228
+
229
+ class TextPrompt(proto.Message):
230
+ r"""Text given to the model as a prompt.
231
+
232
+ The Model will use this TextPrompt to Generate a text
233
+ completion.
234
+
235
+ Attributes:
236
+ text (str):
237
+ Required. The prompt text.
238
+ """
239
+
240
+ text: str = proto.Field(
241
+ proto.STRING,
242
+ number=1,
243
+ )
244
+
245
+
246
+ class TextCompletion(proto.Message):
247
+ r"""Output text returned from a model.
248
+
249
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
250
+
251
+ Attributes:
252
+ output (str):
253
+ Output only. The generated text returned from
254
+ the model.
255
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1beta.types.SafetyRating]):
256
+ Ratings for the safety of a response.
257
+
258
+ There is at most one rating per category.
259
+ citation_metadata (google.ai.generativelanguage_v1beta.types.CitationMetadata):
260
+ Output only. Citation information for model-generated
261
+ ``output`` in this ``TextCompletion``.
262
+
263
+ This field may be populated with attribution information for
264
+ any text included in the ``output``.
265
+
266
+ This field is a member of `oneof`_ ``_citation_metadata``.
267
+ """
268
+
269
+ output: str = proto.Field(
270
+ proto.STRING,
271
+ number=1,
272
+ )
273
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
274
+ proto.MESSAGE,
275
+ number=2,
276
+ message=safety.SafetyRating,
277
+ )
278
+ citation_metadata: citation.CitationMetadata = proto.Field(
279
+ proto.MESSAGE,
280
+ number=3,
281
+ optional=True,
282
+ message=citation.CitationMetadata,
283
+ )
284
+
285
+
286
+ class EmbedTextRequest(proto.Message):
287
+ r"""Request to get a text embedding from the model.
288
+
289
+ Attributes:
290
+ model (str):
291
+ Required. The model name to use with the
292
+ format model=models/{model}.
293
+ text (str):
294
+ Optional. The free-form input text that the
295
+ model will turn into an embedding.
296
+ """
297
+
298
+ model: str = proto.Field(
299
+ proto.STRING,
300
+ number=1,
301
+ )
302
+ text: str = proto.Field(
303
+ proto.STRING,
304
+ number=2,
305
+ )
306
+
307
+
308
+ class EmbedTextResponse(proto.Message):
309
+ r"""The response to a EmbedTextRequest.
310
+
311
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
312
+
313
+ Attributes:
314
+ embedding (google.ai.generativelanguage_v1beta.types.Embedding):
315
+ Output only. The embedding generated from the
316
+ input text.
317
+
318
+ This field is a member of `oneof`_ ``_embedding``.
319
+ """
320
+
321
+ embedding: "Embedding" = proto.Field(
322
+ proto.MESSAGE,
323
+ number=1,
324
+ optional=True,
325
+ message="Embedding",
326
+ )
327
+
328
+
329
+ class BatchEmbedTextRequest(proto.Message):
330
+ r"""Batch request to get a text embedding from the model.
331
+
332
+ Attributes:
333
+ model (str):
334
+ Required. The name of the ``Model`` to use for generating
335
+ the embedding. Examples: models/embedding-gecko-001
336
+ texts (MutableSequence[str]):
337
+ Optional. The free-form input texts that the
338
+ model will turn into an embedding. The current
339
+ limit is 100 texts, over which an error will be
340
+ thrown.
341
+ requests (MutableSequence[google.ai.generativelanguage_v1beta.types.EmbedTextRequest]):
342
+ Optional. Embed requests for the batch. Only one of
343
+ ``texts`` or ``requests`` can be set.
344
+ """
345
+
346
+ model: str = proto.Field(
347
+ proto.STRING,
348
+ number=1,
349
+ )
350
+ texts: MutableSequence[str] = proto.RepeatedField(
351
+ proto.STRING,
352
+ number=2,
353
+ )
354
+ requests: MutableSequence["EmbedTextRequest"] = proto.RepeatedField(
355
+ proto.MESSAGE,
356
+ number=3,
357
+ message="EmbedTextRequest",
358
+ )
359
+
360
+
361
+ class BatchEmbedTextResponse(proto.Message):
362
+ r"""The response to a EmbedTextRequest.
363
+
364
+ Attributes:
365
+ embeddings (MutableSequence[google.ai.generativelanguage_v1beta.types.Embedding]):
366
+ Output only. The embeddings generated from
367
+ the input text.
368
+ """
369
+
370
+ embeddings: MutableSequence["Embedding"] = proto.RepeatedField(
371
+ proto.MESSAGE,
372
+ number=1,
373
+ message="Embedding",
374
+ )
375
+
376
+
377
+ class Embedding(proto.Message):
378
+ r"""A list of floats representing the embedding.
379
+
380
+ Attributes:
381
+ value (MutableSequence[float]):
382
+ The embedding values.
383
+ """
384
+
385
+ value: MutableSequence[float] = proto.RepeatedField(
386
+ proto.FLOAT,
387
+ number=1,
388
+ )
389
+
390
+
391
+ class CountTextTokensRequest(proto.Message):
392
+ r"""Counts the number of tokens in the ``prompt`` sent to a model.
393
+
394
+ Models may tokenize text differently, so each model may return a
395
+ different ``token_count``.
396
+
397
+ Attributes:
398
+ model (str):
399
+ Required. The model's resource name. This serves as an ID
400
+ for the Model to use.
401
+
402
+ This name should match a model name returned by the
403
+ ``ListModels`` method.
404
+
405
+ Format: ``models/{model}``
406
+ prompt (google.ai.generativelanguage_v1beta.types.TextPrompt):
407
+ Required. The free-form input text given to
408
+ the model as a prompt.
409
+ """
410
+
411
+ model: str = proto.Field(
412
+ proto.STRING,
413
+ number=1,
414
+ )
415
+ prompt: "TextPrompt" = proto.Field(
416
+ proto.MESSAGE,
417
+ number=2,
418
+ message="TextPrompt",
419
+ )
420
+
421
+
422
+ class CountTextTokensResponse(proto.Message):
423
+ r"""A response from ``CountTextTokens``.
424
+
425
+ It returns the model's ``token_count`` for the ``prompt``.
426
+
427
+ Attributes:
428
+ token_count (int):
429
+ The number of tokens that the ``model`` tokenizes the
430
+ ``prompt`` into.
431
+
432
+ Always non-negative.
433
+ """
434
+
435
+ token_count: int = proto.Field(
436
+ proto.INT32,
437
+ number=1,
438
+ )
439
+
440
+
441
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/types/tuned_model.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import timestamp_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1beta",
25
+ manifest={
26
+ "TunedModel",
27
+ "TunedModelSource",
28
+ "TuningTask",
29
+ "Hyperparameters",
30
+ "Dataset",
31
+ "TuningExamples",
32
+ "TuningExample",
33
+ "TuningSnapshot",
34
+ },
35
+ )
36
+
37
+
38
+ class TunedModel(proto.Message):
39
+ r"""A fine-tuned model created using
40
+ ModelService.CreateTunedModel.
41
+
42
+ This message has `oneof`_ fields (mutually exclusive fields).
43
+ For each oneof, at most one member field can be set at the same time.
44
+ Setting any member of the oneof automatically clears all other
45
+ members.
46
+
47
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
48
+
49
+ Attributes:
50
+ tuned_model_source (google.ai.generativelanguage_v1beta.types.TunedModelSource):
51
+ Optional. TunedModel to use as the starting
52
+ point for training the new model.
53
+
54
+ This field is a member of `oneof`_ ``source_model``.
55
+ base_model (str):
56
+ Immutable. The name of the ``Model`` to tune. Example:
57
+ ``models/gemini-1.5-flash-001``
58
+
59
+ This field is a member of `oneof`_ ``source_model``.
60
+ name (str):
61
+ Output only. The tuned model name. A unique name will be
62
+ generated on create. Example: ``tunedModels/az2mb0bpw6i`` If
63
+ display_name is set on create, the id portion of the name
64
+ will be set by concatenating the words of the display_name
65
+ with hyphens and adding a random portion for uniqueness.
66
+
67
+ Example:
68
+
69
+ - display_name = ``Sentence Translator``
70
+ - name = ``tunedModels/sentence-translator-u3b7m``
71
+ display_name (str):
72
+ Optional. The name to display for this model
73
+ in user interfaces. The display name must be up
74
+ to 40 characters including spaces.
75
+ description (str):
76
+ Optional. A short description of this model.
77
+ temperature (float):
78
+ Optional. Controls the randomness of the output.
79
+
80
+ Values can range over ``[0.0,1.0]``, inclusive. A value
81
+ closer to ``1.0`` will produce responses that are more
82
+ varied, while a value closer to ``0.0`` will typically
83
+ result in less surprising responses from the model.
84
+
85
+ This value specifies default to be the one used by the base
86
+ model while creating the model.
87
+
88
+ This field is a member of `oneof`_ ``_temperature``.
89
+ top_p (float):
90
+ Optional. For Nucleus sampling.
91
+
92
+ Nucleus sampling considers the smallest set of tokens whose
93
+ probability sum is at least ``top_p``.
94
+
95
+ This value specifies default to be the one used by the base
96
+ model while creating the model.
97
+
98
+ This field is a member of `oneof`_ ``_top_p``.
99
+ top_k (int):
100
+ Optional. For Top-k sampling.
101
+
102
+ Top-k sampling considers the set of ``top_k`` most probable
103
+ tokens. This value specifies default to be used by the
104
+ backend while making the call to the model.
105
+
106
+ This value specifies default to be the one used by the base
107
+ model while creating the model.
108
+
109
+ This field is a member of `oneof`_ ``_top_k``.
110
+ state (google.ai.generativelanguage_v1beta.types.TunedModel.State):
111
+ Output only. The state of the tuned model.
112
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
113
+ Output only. The timestamp when this model
114
+ was created.
115
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
116
+ Output only. The timestamp when this model
117
+ was updated.
118
+ tuning_task (google.ai.generativelanguage_v1beta.types.TuningTask):
119
+ Required. The tuning task that creates the
120
+ tuned model.
121
+ reader_project_numbers (MutableSequence[int]):
122
+ Optional. List of project numbers that have
123
+ read access to the tuned model.
124
+ """
125
+
126
+ class State(proto.Enum):
127
+ r"""The state of the tuned model.
128
+
129
+ Values:
130
+ STATE_UNSPECIFIED (0):
131
+ The default value. This value is unused.
132
+ CREATING (1):
133
+ The model is being created.
134
+ ACTIVE (2):
135
+ The model is ready to be used.
136
+ FAILED (3):
137
+ The model failed to be created.
138
+ """
139
+ STATE_UNSPECIFIED = 0
140
+ CREATING = 1
141
+ ACTIVE = 2
142
+ FAILED = 3
143
+
144
+ tuned_model_source: "TunedModelSource" = proto.Field(
145
+ proto.MESSAGE,
146
+ number=3,
147
+ oneof="source_model",
148
+ message="TunedModelSource",
149
+ )
150
+ base_model: str = proto.Field(
151
+ proto.STRING,
152
+ number=4,
153
+ oneof="source_model",
154
+ )
155
+ name: str = proto.Field(
156
+ proto.STRING,
157
+ number=1,
158
+ )
159
+ display_name: str = proto.Field(
160
+ proto.STRING,
161
+ number=5,
162
+ )
163
+ description: str = proto.Field(
164
+ proto.STRING,
165
+ number=6,
166
+ )
167
+ temperature: float = proto.Field(
168
+ proto.FLOAT,
169
+ number=11,
170
+ optional=True,
171
+ )
172
+ top_p: float = proto.Field(
173
+ proto.FLOAT,
174
+ number=12,
175
+ optional=True,
176
+ )
177
+ top_k: int = proto.Field(
178
+ proto.INT32,
179
+ number=13,
180
+ optional=True,
181
+ )
182
+ state: State = proto.Field(
183
+ proto.ENUM,
184
+ number=7,
185
+ enum=State,
186
+ )
187
+ create_time: timestamp_pb2.Timestamp = proto.Field(
188
+ proto.MESSAGE,
189
+ number=8,
190
+ message=timestamp_pb2.Timestamp,
191
+ )
192
+ update_time: timestamp_pb2.Timestamp = proto.Field(
193
+ proto.MESSAGE,
194
+ number=9,
195
+ message=timestamp_pb2.Timestamp,
196
+ )
197
+ tuning_task: "TuningTask" = proto.Field(
198
+ proto.MESSAGE,
199
+ number=10,
200
+ message="TuningTask",
201
+ )
202
+ reader_project_numbers: MutableSequence[int] = proto.RepeatedField(
203
+ proto.INT64,
204
+ number=14,
205
+ )
206
+
207
+
208
+ class TunedModelSource(proto.Message):
209
+ r"""Tuned model as a source for training a new model.
210
+
211
+ Attributes:
212
+ tuned_model (str):
213
+ Immutable. The name of the ``TunedModel`` to use as the
214
+ starting point for training the new model. Example:
215
+ ``tunedModels/my-tuned-model``
216
+ base_model (str):
217
+ Output only. The name of the base ``Model`` this
218
+ ``TunedModel`` was tuned from. Example:
219
+ ``models/gemini-1.5-flash-001``
220
+ """
221
+
222
+ tuned_model: str = proto.Field(
223
+ proto.STRING,
224
+ number=1,
225
+ )
226
+ base_model: str = proto.Field(
227
+ proto.STRING,
228
+ number=2,
229
+ )
230
+
231
+
232
+ class TuningTask(proto.Message):
233
+ r"""Tuning tasks that create tuned models.
234
+
235
+ Attributes:
236
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
237
+ Output only. The timestamp when tuning this
238
+ model started.
239
+ complete_time (google.protobuf.timestamp_pb2.Timestamp):
240
+ Output only. The timestamp when tuning this
241
+ model completed.
242
+ snapshots (MutableSequence[google.ai.generativelanguage_v1beta.types.TuningSnapshot]):
243
+ Output only. Metrics collected during tuning.
244
+ training_data (google.ai.generativelanguage_v1beta.types.Dataset):
245
+ Required. Input only. Immutable. The model
246
+ training data.
247
+ hyperparameters (google.ai.generativelanguage_v1beta.types.Hyperparameters):
248
+ Immutable. Hyperparameters controlling the
249
+ tuning process. If not provided, default values
250
+ will be used.
251
+ """
252
+
253
+ start_time: timestamp_pb2.Timestamp = proto.Field(
254
+ proto.MESSAGE,
255
+ number=1,
256
+ message=timestamp_pb2.Timestamp,
257
+ )
258
+ complete_time: timestamp_pb2.Timestamp = proto.Field(
259
+ proto.MESSAGE,
260
+ number=2,
261
+ message=timestamp_pb2.Timestamp,
262
+ )
263
+ snapshots: MutableSequence["TuningSnapshot"] = proto.RepeatedField(
264
+ proto.MESSAGE,
265
+ number=3,
266
+ message="TuningSnapshot",
267
+ )
268
+ training_data: "Dataset" = proto.Field(
269
+ proto.MESSAGE,
270
+ number=4,
271
+ message="Dataset",
272
+ )
273
+ hyperparameters: "Hyperparameters" = proto.Field(
274
+ proto.MESSAGE,
275
+ number=5,
276
+ message="Hyperparameters",
277
+ )
278
+
279
+
280
+ class Hyperparameters(proto.Message):
281
+ r"""Hyperparameters controlling the tuning process. Read more at
282
+ https://ai.google.dev/docs/model_tuning_guidance
283
+
284
+ This message has `oneof`_ fields (mutually exclusive fields).
285
+ For each oneof, at most one member field can be set at the same time.
286
+ Setting any member of the oneof automatically clears all other
287
+ members.
288
+
289
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
290
+
291
+ Attributes:
292
+ learning_rate (float):
293
+ Optional. Immutable. The learning rate
294
+ hyperparameter for tuning. If not set, a default
295
+ of 0.001 or 0.0002 will be calculated based on
296
+ the number of training examples.
297
+
298
+ This field is a member of `oneof`_ ``learning_rate_option``.
299
+ learning_rate_multiplier (float):
300
+ Optional. Immutable. The learning rate multiplier is used to
301
+ calculate a final learning_rate based on the default
302
+ (recommended) value. Actual learning rate :=
303
+ learning_rate_multiplier \* default learning rate Default
304
+ learning rate is dependent on base model and dataset size.
305
+ If not set, a default of 1.0 will be used.
306
+
307
+ This field is a member of `oneof`_ ``learning_rate_option``.
308
+ epoch_count (int):
309
+ Immutable. The number of training epochs. An
310
+ epoch is one pass through the training data. If
311
+ not set, a default of 5 will be used.
312
+
313
+ This field is a member of `oneof`_ ``_epoch_count``.
314
+ batch_size (int):
315
+ Immutable. The batch size hyperparameter for
316
+ tuning. If not set, a default of 4 or 16 will be
317
+ used based on the number of training examples.
318
+
319
+ This field is a member of `oneof`_ ``_batch_size``.
320
+ """
321
+
322
+ learning_rate: float = proto.Field(
323
+ proto.FLOAT,
324
+ number=16,
325
+ oneof="learning_rate_option",
326
+ )
327
+ learning_rate_multiplier: float = proto.Field(
328
+ proto.FLOAT,
329
+ number=17,
330
+ oneof="learning_rate_option",
331
+ )
332
+ epoch_count: int = proto.Field(
333
+ proto.INT32,
334
+ number=14,
335
+ optional=True,
336
+ )
337
+ batch_size: int = proto.Field(
338
+ proto.INT32,
339
+ number=15,
340
+ optional=True,
341
+ )
342
+
343
+
344
+ class Dataset(proto.Message):
345
+ r"""Dataset for training or validation.
346
+
347
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
348
+
349
+ Attributes:
350
+ examples (google.ai.generativelanguage_v1beta.types.TuningExamples):
351
+ Optional. Inline examples with simple
352
+ input/output text.
353
+
354
+ This field is a member of `oneof`_ ``dataset``.
355
+ """
356
+
357
+ examples: "TuningExamples" = proto.Field(
358
+ proto.MESSAGE,
359
+ number=1,
360
+ oneof="dataset",
361
+ message="TuningExamples",
362
+ )
363
+
364
+
365
+ class TuningExamples(proto.Message):
366
+ r"""A set of tuning examples. Can be training or validation data.
367
+
368
+ Attributes:
369
+ examples (MutableSequence[google.ai.generativelanguage_v1beta.types.TuningExample]):
370
+ The examples. Example input can be for text
371
+ or discuss, but all examples in a set must be of
372
+ the same type.
373
+ """
374
+
375
+ examples: MutableSequence["TuningExample"] = proto.RepeatedField(
376
+ proto.MESSAGE,
377
+ number=1,
378
+ message="TuningExample",
379
+ )
380
+
381
+
382
+ class TuningExample(proto.Message):
383
+ r"""A single example for tuning.
384
+
385
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
386
+
387
+ Attributes:
388
+ text_input (str):
389
+ Optional. Text model input.
390
+
391
+ This field is a member of `oneof`_ ``model_input``.
392
+ output (str):
393
+ Required. The expected model output.
394
+ """
395
+
396
+ text_input: str = proto.Field(
397
+ proto.STRING,
398
+ number=1,
399
+ oneof="model_input",
400
+ )
401
+ output: str = proto.Field(
402
+ proto.STRING,
403
+ number=3,
404
+ )
405
+
406
+
407
+ class TuningSnapshot(proto.Message):
408
+ r"""Record for a single tuning step.
409
+
410
+ Attributes:
411
+ step (int):
412
+ Output only. The tuning step.
413
+ epoch (int):
414
+ Output only. The epoch this step was part of.
415
+ mean_loss (float):
416
+ Output only. The mean loss of the training
417
+ examples for this step.
418
+ compute_time (google.protobuf.timestamp_pb2.Timestamp):
419
+ Output only. The timestamp when this metric
420
+ was computed.
421
+ """
422
+
423
+ step: int = proto.Field(
424
+ proto.INT32,
425
+ number=1,
426
+ )
427
+ epoch: int = proto.Field(
428
+ proto.INT32,
429
+ number=2,
430
+ )
431
+ mean_loss: float = proto.Field(
432
+ proto.FLOAT,
433
+ number=3,
434
+ )
435
+ compute_time: timestamp_pb2.Timestamp = proto.Field(
436
+ proto.MESSAGE,
437
+ number=4,
438
+ message=timestamp_pb2.Timestamp,
439
+ )
440
+
441
+
442
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (2.04 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/__pycache__/gapic_version.cpython-311.pyc ADDED
Binary file (237 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .async_client import DiscussServiceAsyncClient
17
+ from .client import DiscussServiceClient
18
+
19
+ __all__ = (
20
+ "DiscussServiceClient",
21
+ "DiscussServiceAsyncClient",
22
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (409 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (26.7 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (43.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/async_client.py ADDED
@@ -0,0 +1,628 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1beta2 import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.ai.generativelanguage_v1beta2.types import discuss_service, safety
47
+
48
+ from .client import DiscussServiceClient
49
+ from .transports.base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
50
+ from .transports.grpc_asyncio import DiscussServiceGrpcAsyncIOTransport
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+
62
+ class DiscussServiceAsyncClient:
63
+ """An API for using Generative Language Models (GLMs) in dialog
64
+ applications.
65
+ Also known as large language models (LLMs), this API provides
66
+ models that are trained for multi-turn dialog.
67
+ """
68
+
69
+ _client: DiscussServiceClient
70
+
71
+ # Copy defaults from the synchronous client for use here.
72
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
73
+ DEFAULT_ENDPOINT = DiscussServiceClient.DEFAULT_ENDPOINT
74
+ DEFAULT_MTLS_ENDPOINT = DiscussServiceClient.DEFAULT_MTLS_ENDPOINT
75
+ _DEFAULT_ENDPOINT_TEMPLATE = DiscussServiceClient._DEFAULT_ENDPOINT_TEMPLATE
76
+ _DEFAULT_UNIVERSE = DiscussServiceClient._DEFAULT_UNIVERSE
77
+
78
+ model_path = staticmethod(DiscussServiceClient.model_path)
79
+ parse_model_path = staticmethod(DiscussServiceClient.parse_model_path)
80
+ common_billing_account_path = staticmethod(
81
+ DiscussServiceClient.common_billing_account_path
82
+ )
83
+ parse_common_billing_account_path = staticmethod(
84
+ DiscussServiceClient.parse_common_billing_account_path
85
+ )
86
+ common_folder_path = staticmethod(DiscussServiceClient.common_folder_path)
87
+ parse_common_folder_path = staticmethod(
88
+ DiscussServiceClient.parse_common_folder_path
89
+ )
90
+ common_organization_path = staticmethod(
91
+ DiscussServiceClient.common_organization_path
92
+ )
93
+ parse_common_organization_path = staticmethod(
94
+ DiscussServiceClient.parse_common_organization_path
95
+ )
96
+ common_project_path = staticmethod(DiscussServiceClient.common_project_path)
97
+ parse_common_project_path = staticmethod(
98
+ DiscussServiceClient.parse_common_project_path
99
+ )
100
+ common_location_path = staticmethod(DiscussServiceClient.common_location_path)
101
+ parse_common_location_path = staticmethod(
102
+ DiscussServiceClient.parse_common_location_path
103
+ )
104
+
105
+ @classmethod
106
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
107
+ """Creates an instance of this client using the provided credentials
108
+ info.
109
+
110
+ Args:
111
+ info (dict): The service account private key info.
112
+ args: Additional arguments to pass to the constructor.
113
+ kwargs: Additional arguments to pass to the constructor.
114
+
115
+ Returns:
116
+ DiscussServiceAsyncClient: The constructed client.
117
+ """
118
+ return DiscussServiceClient.from_service_account_info.__func__(DiscussServiceAsyncClient, info, *args, **kwargs) # type: ignore
119
+
120
+ @classmethod
121
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
122
+ """Creates an instance of this client using the provided credentials
123
+ file.
124
+
125
+ Args:
126
+ filename (str): The path to the service account private key json
127
+ file.
128
+ args: Additional arguments to pass to the constructor.
129
+ kwargs: Additional arguments to pass to the constructor.
130
+
131
+ Returns:
132
+ DiscussServiceAsyncClient: The constructed client.
133
+ """
134
+ return DiscussServiceClient.from_service_account_file.__func__(DiscussServiceAsyncClient, filename, *args, **kwargs) # type: ignore
135
+
136
+ from_service_account_json = from_service_account_file
137
+
138
+ @classmethod
139
+ def get_mtls_endpoint_and_cert_source(
140
+ cls, client_options: Optional[ClientOptions] = None
141
+ ):
142
+ """Return the API endpoint and client cert source for mutual TLS.
143
+
144
+ The client cert source is determined in the following order:
145
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
146
+ client cert source is None.
147
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
148
+ default client cert source exists, use the default one; otherwise the client cert
149
+ source is None.
150
+
151
+ The API endpoint is determined in the following order:
152
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
153
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
154
+ default mTLS endpoint; if the environment variable is "never", use the default API
155
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
156
+ use the default API endpoint.
157
+
158
+ More details can be found at https://google.aip.dev/auth/4114.
159
+
160
+ Args:
161
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
162
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
163
+ in this method.
164
+
165
+ Returns:
166
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
167
+ client cert source to use.
168
+
169
+ Raises:
170
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
171
+ """
172
+ return DiscussServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
173
+
174
+ @property
175
+ def transport(self) -> DiscussServiceTransport:
176
+ """Returns the transport used by the client instance.
177
+
178
+ Returns:
179
+ DiscussServiceTransport: The transport used by the client instance.
180
+ """
181
+ return self._client.transport
182
+
183
+ @property
184
+ def api_endpoint(self):
185
+ """Return the API endpoint used by the client instance.
186
+
187
+ Returns:
188
+ str: The API endpoint used by the client instance.
189
+ """
190
+ return self._client._api_endpoint
191
+
192
+ @property
193
+ def universe_domain(self) -> str:
194
+ """Return the universe domain used by the client instance.
195
+
196
+ Returns:
197
+ str: The universe domain used
198
+ by the client instance.
199
+ """
200
+ return self._client._universe_domain
201
+
202
+ get_transport_class = DiscussServiceClient.get_transport_class
203
+
204
+ def __init__(
205
+ self,
206
+ *,
207
+ credentials: Optional[ga_credentials.Credentials] = None,
208
+ transport: Optional[
209
+ Union[str, DiscussServiceTransport, Callable[..., DiscussServiceTransport]]
210
+ ] = "grpc_asyncio",
211
+ client_options: Optional[ClientOptions] = None,
212
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
213
+ ) -> None:
214
+ """Instantiates the discuss service async client.
215
+
216
+ Args:
217
+ credentials (Optional[google.auth.credentials.Credentials]): The
218
+ authorization credentials to attach to requests. These
219
+ credentials identify the application to the service; if none
220
+ are specified, the client will attempt to ascertain the
221
+ credentials from the environment.
222
+ transport (Optional[Union[str,DiscussServiceTransport,Callable[..., DiscussServiceTransport]]]):
223
+ The transport to use, or a Callable that constructs and returns a new transport to use.
224
+ If a Callable is given, it will be called with the same set of initialization
225
+ arguments as used in the DiscussServiceTransport constructor.
226
+ If set to None, a transport is chosen automatically.
227
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
228
+ Custom options for the client.
229
+
230
+ 1. The ``api_endpoint`` property can be used to override the
231
+ default endpoint provided by the client when ``transport`` is
232
+ not explicitly provided. Only if this property is not set and
233
+ ``transport`` was not explicitly provided, the endpoint is
234
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
235
+ variable, which have one of the following values:
236
+ "always" (always use the default mTLS endpoint), "never" (always
237
+ use the default regular endpoint) and "auto" (auto-switch to the
238
+ default mTLS endpoint if client certificate is present; this is
239
+ the default value).
240
+
241
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
242
+ is "true", then the ``client_cert_source`` property can be used
243
+ to provide a client certificate for mTLS transport. If
244
+ not provided, the default SSL client certificate will be used if
245
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
246
+ set, no client certificate will be used.
247
+
248
+ 3. The ``universe_domain`` property can be used to override the
249
+ default "googleapis.com" universe. Note that ``api_endpoint``
250
+ property still takes precedence; and ``universe_domain`` is
251
+ currently not supported for mTLS.
252
+
253
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
254
+ The client info used to send a user-agent string along with
255
+ API requests. If ``None``, then default info will be used.
256
+ Generally, you only need to set this if you're developing
257
+ your own client library.
258
+
259
+ Raises:
260
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
261
+ creation failed for any reason.
262
+ """
263
+ self._client = DiscussServiceClient(
264
+ credentials=credentials,
265
+ transport=transport,
266
+ client_options=client_options,
267
+ client_info=client_info,
268
+ )
269
+
270
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
271
+ std_logging.DEBUG
272
+ ): # pragma: NO COVER
273
+ _LOGGER.debug(
274
+ "Created client `google.ai.generativelanguage_v1beta2.DiscussServiceAsyncClient`.",
275
+ extra={
276
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
277
+ "universeDomain": getattr(
278
+ self._client._transport._credentials, "universe_domain", ""
279
+ ),
280
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
281
+ "credentialsInfo": getattr(
282
+ self.transport._credentials, "get_cred_info", lambda: None
283
+ )(),
284
+ }
285
+ if hasattr(self._client._transport, "_credentials")
286
+ else {
287
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
288
+ "credentialsType": None,
289
+ },
290
+ )
291
+
292
+ async def generate_message(
293
+ self,
294
+ request: Optional[Union[discuss_service.GenerateMessageRequest, dict]] = None,
295
+ *,
296
+ model: Optional[str] = None,
297
+ prompt: Optional[discuss_service.MessagePrompt] = None,
298
+ temperature: Optional[float] = None,
299
+ candidate_count: Optional[int] = None,
300
+ top_p: Optional[float] = None,
301
+ top_k: Optional[int] = None,
302
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
303
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
304
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
305
+ ) -> discuss_service.GenerateMessageResponse:
306
+ r"""Generates a response from the model given an input
307
+ ``MessagePrompt``.
308
+
309
+ .. code-block:: python
310
+
311
+ # This snippet has been automatically generated and should be regarded as a
312
+ # code template only.
313
+ # It will require modifications to work:
314
+ # - It may require correct/in-range values for request initialization.
315
+ # - It may require specifying regional endpoints when creating the service
316
+ # client as shown in:
317
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
318
+ from google.ai import generativelanguage_v1beta2
319
+
320
+ async def sample_generate_message():
321
+ # Create a client
322
+ client = generativelanguage_v1beta2.DiscussServiceAsyncClient()
323
+
324
+ # Initialize request argument(s)
325
+ prompt = generativelanguage_v1beta2.MessagePrompt()
326
+ prompt.messages.content = "content_value"
327
+
328
+ request = generativelanguage_v1beta2.GenerateMessageRequest(
329
+ model="model_value",
330
+ prompt=prompt,
331
+ )
332
+
333
+ # Make the request
334
+ response = await client.generate_message(request=request)
335
+
336
+ # Handle the response
337
+ print(response)
338
+
339
+ Args:
340
+ request (Optional[Union[google.ai.generativelanguage_v1beta2.types.GenerateMessageRequest, dict]]):
341
+ The request object. Request to generate a message
342
+ response from the model.
343
+ model (:class:`str`):
344
+ Required. The name of the model to use.
345
+
346
+ Format: ``name=models/{model}``.
347
+
348
+ This corresponds to the ``model`` field
349
+ on the ``request`` instance; if ``request`` is provided, this
350
+ should not be set.
351
+ prompt (:class:`google.ai.generativelanguage_v1beta2.types.MessagePrompt`):
352
+ Required. The structured textual
353
+ input given to the model as a prompt.
354
+ Given a
355
+ prompt, the model will return what it
356
+ predicts is the next message in the
357
+ discussion.
358
+
359
+ This corresponds to the ``prompt`` field
360
+ on the ``request`` instance; if ``request`` is provided, this
361
+ should not be set.
362
+ temperature (:class:`float`):
363
+ Optional. Controls the randomness of the output.
364
+
365
+ Values can range over ``[0.0,1.0]``, inclusive. A value
366
+ closer to ``1.0`` will produce responses that are more
367
+ varied, while a value closer to ``0.0`` will typically
368
+ result in less surprising responses from the model.
369
+
370
+ This corresponds to the ``temperature`` field
371
+ on the ``request`` instance; if ``request`` is provided, this
372
+ should not be set.
373
+ candidate_count (:class:`int`):
374
+ Optional. The number of generated response messages to
375
+ return.
376
+
377
+ This value must be between ``[1, 8]``, inclusive. If
378
+ unset, this will default to ``1``.
379
+
380
+ This corresponds to the ``candidate_count`` field
381
+ on the ``request`` instance; if ``request`` is provided, this
382
+ should not be set.
383
+ top_p (:class:`float`):
384
+ Optional. The maximum cumulative probability of tokens
385
+ to consider when sampling.
386
+
387
+ The model uses combined Top-k and nucleus sampling.
388
+
389
+ Nucleus sampling considers the smallest set of tokens
390
+ whose probability sum is at least ``top_p``.
391
+
392
+ This corresponds to the ``top_p`` field
393
+ on the ``request`` instance; if ``request`` is provided, this
394
+ should not be set.
395
+ top_k (:class:`int`):
396
+ Optional. The maximum number of tokens to consider when
397
+ sampling.
398
+
399
+ The model uses combined Top-k and nucleus sampling.
400
+
401
+ Top-k sampling considers the set of ``top_k`` most
402
+ probable tokens.
403
+
404
+ This corresponds to the ``top_k`` field
405
+ on the ``request`` instance; if ``request`` is provided, this
406
+ should not be set.
407
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
408
+ should be retried.
409
+ timeout (float): The timeout for this request.
410
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
411
+ sent along with the request as metadata. Normally, each value must be of type `str`,
412
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
413
+ be of type `bytes`.
414
+
415
+ Returns:
416
+ google.ai.generativelanguage_v1beta2.types.GenerateMessageResponse:
417
+ The response from the model.
418
+
419
+ This includes candidate messages and
420
+ conversation history in the form of
421
+ chronologically-ordered messages.
422
+
423
+ """
424
+ # Create or coerce a protobuf request object.
425
+ # - Quick check: If we got a request object, we should *not* have
426
+ # gotten any keyword arguments that map to the request.
427
+ has_flattened_params = any(
428
+ [model, prompt, temperature, candidate_count, top_p, top_k]
429
+ )
430
+ if request is not None and has_flattened_params:
431
+ raise ValueError(
432
+ "If the `request` argument is set, then none of "
433
+ "the individual field arguments should be set."
434
+ )
435
+
436
+ # - Use the request object if provided (there's no risk of modifying the input as
437
+ # there are no flattened fields), or create one.
438
+ if not isinstance(request, discuss_service.GenerateMessageRequest):
439
+ request = discuss_service.GenerateMessageRequest(request)
440
+
441
+ # If we have keyword arguments corresponding to fields on the
442
+ # request, apply these.
443
+ if model is not None:
444
+ request.model = model
445
+ if prompt is not None:
446
+ request.prompt = prompt
447
+ if temperature is not None:
448
+ request.temperature = temperature
449
+ if candidate_count is not None:
450
+ request.candidate_count = candidate_count
451
+ if top_p is not None:
452
+ request.top_p = top_p
453
+ if top_k is not None:
454
+ request.top_k = top_k
455
+
456
+ # Wrap the RPC method; this adds retry and timeout information,
457
+ # and friendly error handling.
458
+ rpc = self._client._transport._wrapped_methods[
459
+ self._client._transport.generate_message
460
+ ]
461
+
462
+ # Certain fields should be provided within the metadata header;
463
+ # add these here.
464
+ metadata = tuple(metadata) + (
465
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
466
+ )
467
+
468
+ # Validate the universe domain.
469
+ self._client._validate_universe_domain()
470
+
471
+ # Send the request.
472
+ response = await rpc(
473
+ request,
474
+ retry=retry,
475
+ timeout=timeout,
476
+ metadata=metadata,
477
+ )
478
+
479
+ # Done; return the response.
480
+ return response
481
+
482
+ async def count_message_tokens(
483
+ self,
484
+ request: Optional[
485
+ Union[discuss_service.CountMessageTokensRequest, dict]
486
+ ] = None,
487
+ *,
488
+ model: Optional[str] = None,
489
+ prompt: Optional[discuss_service.MessagePrompt] = None,
490
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
491
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
492
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
493
+ ) -> discuss_service.CountMessageTokensResponse:
494
+ r"""Runs a model's tokenizer on a string and returns the
495
+ token count.
496
+
497
+ .. code-block:: python
498
+
499
+ # This snippet has been automatically generated and should be regarded as a
500
+ # code template only.
501
+ # It will require modifications to work:
502
+ # - It may require correct/in-range values for request initialization.
503
+ # - It may require specifying regional endpoints when creating the service
504
+ # client as shown in:
505
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
506
+ from google.ai import generativelanguage_v1beta2
507
+
508
+ async def sample_count_message_tokens():
509
+ # Create a client
510
+ client = generativelanguage_v1beta2.DiscussServiceAsyncClient()
511
+
512
+ # Initialize request argument(s)
513
+ prompt = generativelanguage_v1beta2.MessagePrompt()
514
+ prompt.messages.content = "content_value"
515
+
516
+ request = generativelanguage_v1beta2.CountMessageTokensRequest(
517
+ model="model_value",
518
+ prompt=prompt,
519
+ )
520
+
521
+ # Make the request
522
+ response = await client.count_message_tokens(request=request)
523
+
524
+ # Handle the response
525
+ print(response)
526
+
527
+ Args:
528
+ request (Optional[Union[google.ai.generativelanguage_v1beta2.types.CountMessageTokensRequest, dict]]):
529
+ The request object. Counts the number of tokens in the ``prompt`` sent to a
530
+ model.
531
+
532
+ Models may tokenize text differently, so each model may
533
+ return a different ``token_count``.
534
+ model (:class:`str`):
535
+ Required. The model's resource name. This serves as an
536
+ ID for the Model to use.
537
+
538
+ This name should match a model name returned by the
539
+ ``ListModels`` method.
540
+
541
+ Format: ``models/{model}``
542
+
543
+ This corresponds to the ``model`` field
544
+ on the ``request`` instance; if ``request`` is provided, this
545
+ should not be set.
546
+ prompt (:class:`google.ai.generativelanguage_v1beta2.types.MessagePrompt`):
547
+ Required. The prompt, whose token
548
+ count is to be returned.
549
+
550
+ This corresponds to the ``prompt`` field
551
+ on the ``request`` instance; if ``request`` is provided, this
552
+ should not be set.
553
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
554
+ should be retried.
555
+ timeout (float): The timeout for this request.
556
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
557
+ sent along with the request as metadata. Normally, each value must be of type `str`,
558
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
559
+ be of type `bytes`.
560
+
561
+ Returns:
562
+ google.ai.generativelanguage_v1beta2.types.CountMessageTokensResponse:
563
+ A response from CountMessageTokens.
564
+
565
+ It returns the model's token_count for the prompt.
566
+
567
+ """
568
+ # Create or coerce a protobuf request object.
569
+ # - Quick check: If we got a request object, we should *not* have
570
+ # gotten any keyword arguments that map to the request.
571
+ has_flattened_params = any([model, prompt])
572
+ if request is not None and has_flattened_params:
573
+ raise ValueError(
574
+ "If the `request` argument is set, then none of "
575
+ "the individual field arguments should be set."
576
+ )
577
+
578
+ # - Use the request object if provided (there's no risk of modifying the input as
579
+ # there are no flattened fields), or create one.
580
+ if not isinstance(request, discuss_service.CountMessageTokensRequest):
581
+ request = discuss_service.CountMessageTokensRequest(request)
582
+
583
+ # If we have keyword arguments corresponding to fields on the
584
+ # request, apply these.
585
+ if model is not None:
586
+ request.model = model
587
+ if prompt is not None:
588
+ request.prompt = prompt
589
+
590
+ # Wrap the RPC method; this adds retry and timeout information,
591
+ # and friendly error handling.
592
+ rpc = self._client._transport._wrapped_methods[
593
+ self._client._transport.count_message_tokens
594
+ ]
595
+
596
+ # Certain fields should be provided within the metadata header;
597
+ # add these here.
598
+ metadata = tuple(metadata) + (
599
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
600
+ )
601
+
602
+ # Validate the universe domain.
603
+ self._client._validate_universe_domain()
604
+
605
+ # Send the request.
606
+ response = await rpc(
607
+ request,
608
+ retry=retry,
609
+ timeout=timeout,
610
+ metadata=metadata,
611
+ )
612
+
613
+ # Done; return the response.
614
+ return response
615
+
616
+ async def __aenter__(self) -> "DiscussServiceAsyncClient":
617
+ return self
618
+
619
+ async def __aexit__(self, exc_type, exc, tb):
620
+ await self.transport.close()
621
+
622
+
623
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
624
+ gapic_version=package_version.__version__
625
+ )
626
+
627
+
628
+ __all__ = ("DiscussServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/client.py ADDED
@@ -0,0 +1,1016 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1beta2 import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.ai.generativelanguage_v1beta2.types import discuss_service, safety
62
+
63
+ from .transports.base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
64
+ from .transports.grpc import DiscussServiceGrpcTransport
65
+ from .transports.grpc_asyncio import DiscussServiceGrpcAsyncIOTransport
66
+ from .transports.rest import DiscussServiceRestTransport
67
+
68
+
69
+ class DiscussServiceClientMeta(type):
70
+ """Metaclass for the DiscussService client.
71
+
72
+ This provides class-level methods for building and retrieving
73
+ support objects (e.g. transport) without polluting the client instance
74
+ objects.
75
+ """
76
+
77
+ _transport_registry = (
78
+ OrderedDict()
79
+ ) # type: Dict[str, Type[DiscussServiceTransport]]
80
+ _transport_registry["grpc"] = DiscussServiceGrpcTransport
81
+ _transport_registry["grpc_asyncio"] = DiscussServiceGrpcAsyncIOTransport
82
+ _transport_registry["rest"] = DiscussServiceRestTransport
83
+
84
+ def get_transport_class(
85
+ cls,
86
+ label: Optional[str] = None,
87
+ ) -> Type[DiscussServiceTransport]:
88
+ """Returns an appropriate transport class.
89
+
90
+ Args:
91
+ label: The name of the desired transport. If none is
92
+ provided, then the first transport in the registry is used.
93
+
94
+ Returns:
95
+ The transport class to use.
96
+ """
97
+ # If a specific transport is requested, return that one.
98
+ if label:
99
+ return cls._transport_registry[label]
100
+
101
+ # No transport is requested; return the default (that is, the first one
102
+ # in the dictionary).
103
+ return next(iter(cls._transport_registry.values()))
104
+
105
+
106
+ class DiscussServiceClient(metaclass=DiscussServiceClientMeta):
107
+ """An API for using Generative Language Models (GLMs) in dialog
108
+ applications.
109
+ Also known as large language models (LLMs), this API provides
110
+ models that are trained for multi-turn dialog.
111
+ """
112
+
113
+ @staticmethod
114
+ def _get_default_mtls_endpoint(api_endpoint):
115
+ """Converts api endpoint to mTLS endpoint.
116
+
117
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
118
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
119
+ Args:
120
+ api_endpoint (Optional[str]): the api endpoint to convert.
121
+ Returns:
122
+ str: converted mTLS api endpoint.
123
+ """
124
+ if not api_endpoint:
125
+ return api_endpoint
126
+
127
+ mtls_endpoint_re = re.compile(
128
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
129
+ )
130
+
131
+ m = mtls_endpoint_re.match(api_endpoint)
132
+ name, mtls, sandbox, googledomain = m.groups()
133
+ if mtls or not googledomain:
134
+ return api_endpoint
135
+
136
+ if sandbox:
137
+ return api_endpoint.replace(
138
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
139
+ )
140
+
141
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
142
+
143
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
144
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
145
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
146
+ DEFAULT_ENDPOINT
147
+ )
148
+
149
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
150
+ _DEFAULT_UNIVERSE = "googleapis.com"
151
+
152
+ @classmethod
153
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
154
+ """Creates an instance of this client using the provided credentials
155
+ info.
156
+
157
+ Args:
158
+ info (dict): The service account private key info.
159
+ args: Additional arguments to pass to the constructor.
160
+ kwargs: Additional arguments to pass to the constructor.
161
+
162
+ Returns:
163
+ DiscussServiceClient: The constructed client.
164
+ """
165
+ credentials = service_account.Credentials.from_service_account_info(info)
166
+ kwargs["credentials"] = credentials
167
+ return cls(*args, **kwargs)
168
+
169
+ @classmethod
170
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
171
+ """Creates an instance of this client using the provided credentials
172
+ file.
173
+
174
+ Args:
175
+ filename (str): The path to the service account private key json
176
+ file.
177
+ args: Additional arguments to pass to the constructor.
178
+ kwargs: Additional arguments to pass to the constructor.
179
+
180
+ Returns:
181
+ DiscussServiceClient: The constructed client.
182
+ """
183
+ credentials = service_account.Credentials.from_service_account_file(filename)
184
+ kwargs["credentials"] = credentials
185
+ return cls(*args, **kwargs)
186
+
187
+ from_service_account_json = from_service_account_file
188
+
189
+ @property
190
+ def transport(self) -> DiscussServiceTransport:
191
+ """Returns the transport used by the client instance.
192
+
193
+ Returns:
194
+ DiscussServiceTransport: The transport used by the client
195
+ instance.
196
+ """
197
+ return self._transport
198
+
199
+ @staticmethod
200
+ def model_path(
201
+ model: str,
202
+ ) -> str:
203
+ """Returns a fully-qualified model string."""
204
+ return "models/{model}".format(
205
+ model=model,
206
+ )
207
+
208
+ @staticmethod
209
+ def parse_model_path(path: str) -> Dict[str, str]:
210
+ """Parses a model path into its component segments."""
211
+ m = re.match(r"^models/(?P<model>.+?)$", path)
212
+ return m.groupdict() if m else {}
213
+
214
+ @staticmethod
215
+ def common_billing_account_path(
216
+ billing_account: str,
217
+ ) -> str:
218
+ """Returns a fully-qualified billing_account string."""
219
+ return "billingAccounts/{billing_account}".format(
220
+ billing_account=billing_account,
221
+ )
222
+
223
+ @staticmethod
224
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
225
+ """Parse a billing_account path into its component segments."""
226
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
227
+ return m.groupdict() if m else {}
228
+
229
+ @staticmethod
230
+ def common_folder_path(
231
+ folder: str,
232
+ ) -> str:
233
+ """Returns a fully-qualified folder string."""
234
+ return "folders/{folder}".format(
235
+ folder=folder,
236
+ )
237
+
238
+ @staticmethod
239
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
240
+ """Parse a folder path into its component segments."""
241
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
242
+ return m.groupdict() if m else {}
243
+
244
+ @staticmethod
245
+ def common_organization_path(
246
+ organization: str,
247
+ ) -> str:
248
+ """Returns a fully-qualified organization string."""
249
+ return "organizations/{organization}".format(
250
+ organization=organization,
251
+ )
252
+
253
+ @staticmethod
254
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
255
+ """Parse a organization path into its component segments."""
256
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
257
+ return m.groupdict() if m else {}
258
+
259
+ @staticmethod
260
+ def common_project_path(
261
+ project: str,
262
+ ) -> str:
263
+ """Returns a fully-qualified project string."""
264
+ return "projects/{project}".format(
265
+ project=project,
266
+ )
267
+
268
+ @staticmethod
269
+ def parse_common_project_path(path: str) -> Dict[str, str]:
270
+ """Parse a project path into its component segments."""
271
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
272
+ return m.groupdict() if m else {}
273
+
274
+ @staticmethod
275
+ def common_location_path(
276
+ project: str,
277
+ location: str,
278
+ ) -> str:
279
+ """Returns a fully-qualified location string."""
280
+ return "projects/{project}/locations/{location}".format(
281
+ project=project,
282
+ location=location,
283
+ )
284
+
285
+ @staticmethod
286
+ def parse_common_location_path(path: str) -> Dict[str, str]:
287
+ """Parse a location path into its component segments."""
288
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
289
+ return m.groupdict() if m else {}
290
+
291
+ @classmethod
292
+ def get_mtls_endpoint_and_cert_source(
293
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
294
+ ):
295
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
296
+
297
+ The client cert source is determined in the following order:
298
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
299
+ client cert source is None.
300
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
301
+ default client cert source exists, use the default one; otherwise the client cert
302
+ source is None.
303
+
304
+ The API endpoint is determined in the following order:
305
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
306
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
307
+ default mTLS endpoint; if the environment variable is "never", use the default API
308
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
309
+ use the default API endpoint.
310
+
311
+ More details can be found at https://google.aip.dev/auth/4114.
312
+
313
+ Args:
314
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
315
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
316
+ in this method.
317
+
318
+ Returns:
319
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
320
+ client cert source to use.
321
+
322
+ Raises:
323
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
324
+ """
325
+
326
+ warnings.warn(
327
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
328
+ DeprecationWarning,
329
+ )
330
+ if client_options is None:
331
+ client_options = client_options_lib.ClientOptions()
332
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
333
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
334
+ if use_client_cert not in ("true", "false"):
335
+ raise ValueError(
336
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
337
+ )
338
+ if use_mtls_endpoint not in ("auto", "never", "always"):
339
+ raise MutualTLSChannelError(
340
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
341
+ )
342
+
343
+ # Figure out the client cert source to use.
344
+ client_cert_source = None
345
+ if use_client_cert == "true":
346
+ if client_options.client_cert_source:
347
+ client_cert_source = client_options.client_cert_source
348
+ elif mtls.has_default_client_cert_source():
349
+ client_cert_source = mtls.default_client_cert_source()
350
+
351
+ # Figure out which api endpoint to use.
352
+ if client_options.api_endpoint is not None:
353
+ api_endpoint = client_options.api_endpoint
354
+ elif use_mtls_endpoint == "always" or (
355
+ use_mtls_endpoint == "auto" and client_cert_source
356
+ ):
357
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
358
+ else:
359
+ api_endpoint = cls.DEFAULT_ENDPOINT
360
+
361
+ return api_endpoint, client_cert_source
362
+
363
+ @staticmethod
364
+ def _read_environment_variables():
365
+ """Returns the environment variables used by the client.
366
+
367
+ Returns:
368
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
369
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
370
+
371
+ Raises:
372
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
373
+ any of ["true", "false"].
374
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
375
+ is not any of ["auto", "never", "always"].
376
+ """
377
+ use_client_cert = os.getenv(
378
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
379
+ ).lower()
380
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
381
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
382
+ if use_client_cert not in ("true", "false"):
383
+ raise ValueError(
384
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
385
+ )
386
+ if use_mtls_endpoint not in ("auto", "never", "always"):
387
+ raise MutualTLSChannelError(
388
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
389
+ )
390
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
391
+
392
+ @staticmethod
393
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
394
+ """Return the client cert source to be used by the client.
395
+
396
+ Args:
397
+ provided_cert_source (bytes): The client certificate source provided.
398
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
399
+
400
+ Returns:
401
+ bytes or None: The client cert source to be used by the client.
402
+ """
403
+ client_cert_source = None
404
+ if use_cert_flag:
405
+ if provided_cert_source:
406
+ client_cert_source = provided_cert_source
407
+ elif mtls.has_default_client_cert_source():
408
+ client_cert_source = mtls.default_client_cert_source()
409
+ return client_cert_source
410
+
411
+ @staticmethod
412
+ def _get_api_endpoint(
413
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
414
+ ):
415
+ """Return the API endpoint used by the client.
416
+
417
+ Args:
418
+ api_override (str): The API endpoint override. If specified, this is always
419
+ the return value of this function and the other arguments are not used.
420
+ client_cert_source (bytes): The client certificate source used by the client.
421
+ universe_domain (str): The universe domain used by the client.
422
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
423
+ Possible values are "always", "auto", or "never".
424
+
425
+ Returns:
426
+ str: The API endpoint to be used by the client.
427
+ """
428
+ if api_override is not None:
429
+ api_endpoint = api_override
430
+ elif use_mtls_endpoint == "always" or (
431
+ use_mtls_endpoint == "auto" and client_cert_source
432
+ ):
433
+ _default_universe = DiscussServiceClient._DEFAULT_UNIVERSE
434
+ if universe_domain != _default_universe:
435
+ raise MutualTLSChannelError(
436
+ f"mTLS is not supported in any universe other than {_default_universe}."
437
+ )
438
+ api_endpoint = DiscussServiceClient.DEFAULT_MTLS_ENDPOINT
439
+ else:
440
+ api_endpoint = DiscussServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
441
+ UNIVERSE_DOMAIN=universe_domain
442
+ )
443
+ return api_endpoint
444
+
445
+ @staticmethod
446
+ def _get_universe_domain(
447
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
448
+ ) -> str:
449
+ """Return the universe domain used by the client.
450
+
451
+ Args:
452
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
453
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
454
+
455
+ Returns:
456
+ str: The universe domain to be used by the client.
457
+
458
+ Raises:
459
+ ValueError: If the universe domain is an empty string.
460
+ """
461
+ universe_domain = DiscussServiceClient._DEFAULT_UNIVERSE
462
+ if client_universe_domain is not None:
463
+ universe_domain = client_universe_domain
464
+ elif universe_domain_env is not None:
465
+ universe_domain = universe_domain_env
466
+ if len(universe_domain.strip()) == 0:
467
+ raise ValueError("Universe Domain cannot be an empty string.")
468
+ return universe_domain
469
+
470
+ def _validate_universe_domain(self):
471
+ """Validates client's and credentials' universe domains are consistent.
472
+
473
+ Returns:
474
+ bool: True iff the configured universe domain is valid.
475
+
476
+ Raises:
477
+ ValueError: If the configured universe domain is not valid.
478
+ """
479
+
480
+ # NOTE (b/349488459): universe validation is disabled until further notice.
481
+ return True
482
+
483
+ @property
484
+ def api_endpoint(self):
485
+ """Return the API endpoint used by the client instance.
486
+
487
+ Returns:
488
+ str: The API endpoint used by the client instance.
489
+ """
490
+ return self._api_endpoint
491
+
492
+ @property
493
+ def universe_domain(self) -> str:
494
+ """Return the universe domain used by the client instance.
495
+
496
+ Returns:
497
+ str: The universe domain used by the client instance.
498
+ """
499
+ return self._universe_domain
500
+
501
+ def __init__(
502
+ self,
503
+ *,
504
+ credentials: Optional[ga_credentials.Credentials] = None,
505
+ transport: Optional[
506
+ Union[str, DiscussServiceTransport, Callable[..., DiscussServiceTransport]]
507
+ ] = None,
508
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
509
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
510
+ ) -> None:
511
+ """Instantiates the discuss service client.
512
+
513
+ Args:
514
+ credentials (Optional[google.auth.credentials.Credentials]): The
515
+ authorization credentials to attach to requests. These
516
+ credentials identify the application to the service; if none
517
+ are specified, the client will attempt to ascertain the
518
+ credentials from the environment.
519
+ transport (Optional[Union[str,DiscussServiceTransport,Callable[..., DiscussServiceTransport]]]):
520
+ The transport to use, or a Callable that constructs and returns a new transport.
521
+ If a Callable is given, it will be called with the same set of initialization
522
+ arguments as used in the DiscussServiceTransport constructor.
523
+ If set to None, a transport is chosen automatically.
524
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
525
+ Custom options for the client.
526
+
527
+ 1. The ``api_endpoint`` property can be used to override the
528
+ default endpoint provided by the client when ``transport`` is
529
+ not explicitly provided. Only if this property is not set and
530
+ ``transport`` was not explicitly provided, the endpoint is
531
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
532
+ variable, which have one of the following values:
533
+ "always" (always use the default mTLS endpoint), "never" (always
534
+ use the default regular endpoint) and "auto" (auto-switch to the
535
+ default mTLS endpoint if client certificate is present; this is
536
+ the default value).
537
+
538
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
539
+ is "true", then the ``client_cert_source`` property can be used
540
+ to provide a client certificate for mTLS transport. If
541
+ not provided, the default SSL client certificate will be used if
542
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
543
+ set, no client certificate will be used.
544
+
545
+ 3. The ``universe_domain`` property can be used to override the
546
+ default "googleapis.com" universe. Note that the ``api_endpoint``
547
+ property still takes precedence; and ``universe_domain`` is
548
+ currently not supported for mTLS.
549
+
550
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
551
+ The client info used to send a user-agent string along with
552
+ API requests. If ``None``, then default info will be used.
553
+ Generally, you only need to set this if you're developing
554
+ your own client library.
555
+
556
+ Raises:
557
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
558
+ creation failed for any reason.
559
+ """
560
+ self._client_options = client_options
561
+ if isinstance(self._client_options, dict):
562
+ self._client_options = client_options_lib.from_dict(self._client_options)
563
+ if self._client_options is None:
564
+ self._client_options = client_options_lib.ClientOptions()
565
+ self._client_options = cast(
566
+ client_options_lib.ClientOptions, self._client_options
567
+ )
568
+
569
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
570
+
571
+ (
572
+ self._use_client_cert,
573
+ self._use_mtls_endpoint,
574
+ self._universe_domain_env,
575
+ ) = DiscussServiceClient._read_environment_variables()
576
+ self._client_cert_source = DiscussServiceClient._get_client_cert_source(
577
+ self._client_options.client_cert_source, self._use_client_cert
578
+ )
579
+ self._universe_domain = DiscussServiceClient._get_universe_domain(
580
+ universe_domain_opt, self._universe_domain_env
581
+ )
582
+ self._api_endpoint = None # updated below, depending on `transport`
583
+
584
+ # Initialize the universe domain validation.
585
+ self._is_universe_domain_valid = False
586
+
587
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
588
+ # Setup logging.
589
+ client_logging.initialize_logging()
590
+
591
+ api_key_value = getattr(self._client_options, "api_key", None)
592
+ if api_key_value and credentials:
593
+ raise ValueError(
594
+ "client_options.api_key and credentials are mutually exclusive"
595
+ )
596
+
597
+ # Save or instantiate the transport.
598
+ # Ordinarily, we provide the transport, but allowing a custom transport
599
+ # instance provides an extensibility point for unusual situations.
600
+ transport_provided = isinstance(transport, DiscussServiceTransport)
601
+ if transport_provided:
602
+ # transport is a DiscussServiceTransport instance.
603
+ if credentials or self._client_options.credentials_file or api_key_value:
604
+ raise ValueError(
605
+ "When providing a transport instance, "
606
+ "provide its credentials directly."
607
+ )
608
+ if self._client_options.scopes:
609
+ raise ValueError(
610
+ "When providing a transport instance, provide its scopes "
611
+ "directly."
612
+ )
613
+ self._transport = cast(DiscussServiceTransport, transport)
614
+ self._api_endpoint = self._transport.host
615
+
616
+ self._api_endpoint = (
617
+ self._api_endpoint
618
+ or DiscussServiceClient._get_api_endpoint(
619
+ self._client_options.api_endpoint,
620
+ self._client_cert_source,
621
+ self._universe_domain,
622
+ self._use_mtls_endpoint,
623
+ )
624
+ )
625
+
626
+ if not transport_provided:
627
+ import google.auth._default # type: ignore
628
+
629
+ if api_key_value and hasattr(
630
+ google.auth._default, "get_api_key_credentials"
631
+ ):
632
+ credentials = google.auth._default.get_api_key_credentials(
633
+ api_key_value
634
+ )
635
+
636
+ transport_init: Union[
637
+ Type[DiscussServiceTransport], Callable[..., DiscussServiceTransport]
638
+ ] = (
639
+ DiscussServiceClient.get_transport_class(transport)
640
+ if isinstance(transport, str) or transport is None
641
+ else cast(Callable[..., DiscussServiceTransport], transport)
642
+ )
643
+ # initialize with the provided callable or the passed in class
644
+ self._transport = transport_init(
645
+ credentials=credentials,
646
+ credentials_file=self._client_options.credentials_file,
647
+ host=self._api_endpoint,
648
+ scopes=self._client_options.scopes,
649
+ client_cert_source_for_mtls=self._client_cert_source,
650
+ quota_project_id=self._client_options.quota_project_id,
651
+ client_info=client_info,
652
+ always_use_jwt_access=True,
653
+ api_audience=self._client_options.api_audience,
654
+ )
655
+
656
+ if "async" not in str(self._transport):
657
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
658
+ std_logging.DEBUG
659
+ ): # pragma: NO COVER
660
+ _LOGGER.debug(
661
+ "Created client `google.ai.generativelanguage_v1beta2.DiscussServiceClient`.",
662
+ extra={
663
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
664
+ "universeDomain": getattr(
665
+ self._transport._credentials, "universe_domain", ""
666
+ ),
667
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
668
+ "credentialsInfo": getattr(
669
+ self.transport._credentials, "get_cred_info", lambda: None
670
+ )(),
671
+ }
672
+ if hasattr(self._transport, "_credentials")
673
+ else {
674
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
675
+ "credentialsType": None,
676
+ },
677
+ )
678
+
679
+ def generate_message(
680
+ self,
681
+ request: Optional[Union[discuss_service.GenerateMessageRequest, dict]] = None,
682
+ *,
683
+ model: Optional[str] = None,
684
+ prompt: Optional[discuss_service.MessagePrompt] = None,
685
+ temperature: Optional[float] = None,
686
+ candidate_count: Optional[int] = None,
687
+ top_p: Optional[float] = None,
688
+ top_k: Optional[int] = None,
689
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
690
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
691
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
692
+ ) -> discuss_service.GenerateMessageResponse:
693
+ r"""Generates a response from the model given an input
694
+ ``MessagePrompt``.
695
+
696
+ .. code-block:: python
697
+
698
+ # This snippet has been automatically generated and should be regarded as a
699
+ # code template only.
700
+ # It will require modifications to work:
701
+ # - It may require correct/in-range values for request initialization.
702
+ # - It may require specifying regional endpoints when creating the service
703
+ # client as shown in:
704
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
705
+ from google.ai import generativelanguage_v1beta2
706
+
707
+ def sample_generate_message():
708
+ # Create a client
709
+ client = generativelanguage_v1beta2.DiscussServiceClient()
710
+
711
+ # Initialize request argument(s)
712
+ prompt = generativelanguage_v1beta2.MessagePrompt()
713
+ prompt.messages.content = "content_value"
714
+
715
+ request = generativelanguage_v1beta2.GenerateMessageRequest(
716
+ model="model_value",
717
+ prompt=prompt,
718
+ )
719
+
720
+ # Make the request
721
+ response = client.generate_message(request=request)
722
+
723
+ # Handle the response
724
+ print(response)
725
+
726
+ Args:
727
+ request (Union[google.ai.generativelanguage_v1beta2.types.GenerateMessageRequest, dict]):
728
+ The request object. Request to generate a message
729
+ response from the model.
730
+ model (str):
731
+ Required. The name of the model to use.
732
+
733
+ Format: ``name=models/{model}``.
734
+
735
+ This corresponds to the ``model`` field
736
+ on the ``request`` instance; if ``request`` is provided, this
737
+ should not be set.
738
+ prompt (google.ai.generativelanguage_v1beta2.types.MessagePrompt):
739
+ Required. The structured textual
740
+ input given to the model as a prompt.
741
+ Given a
742
+ prompt, the model will return what it
743
+ predicts is the next message in the
744
+ discussion.
745
+
746
+ This corresponds to the ``prompt`` field
747
+ on the ``request`` instance; if ``request`` is provided, this
748
+ should not be set.
749
+ temperature (float):
750
+ Optional. Controls the randomness of the output.
751
+
752
+ Values can range over ``[0.0,1.0]``, inclusive. A value
753
+ closer to ``1.0`` will produce responses that are more
754
+ varied, while a value closer to ``0.0`` will typically
755
+ result in less surprising responses from the model.
756
+
757
+ This corresponds to the ``temperature`` field
758
+ on the ``request`` instance; if ``request`` is provided, this
759
+ should not be set.
760
+ candidate_count (int):
761
+ Optional. The number of generated response messages to
762
+ return.
763
+
764
+ This value must be between ``[1, 8]``, inclusive. If
765
+ unset, this will default to ``1``.
766
+
767
+ This corresponds to the ``candidate_count`` field
768
+ on the ``request`` instance; if ``request`` is provided, this
769
+ should not be set.
770
+ top_p (float):
771
+ Optional. The maximum cumulative probability of tokens
772
+ to consider when sampling.
773
+
774
+ The model uses combined Top-k and nucleus sampling.
775
+
776
+ Nucleus sampling considers the smallest set of tokens
777
+ whose probability sum is at least ``top_p``.
778
+
779
+ This corresponds to the ``top_p`` field
780
+ on the ``request`` instance; if ``request`` is provided, this
781
+ should not be set.
782
+ top_k (int):
783
+ Optional. The maximum number of tokens to consider when
784
+ sampling.
785
+
786
+ The model uses combined Top-k and nucleus sampling.
787
+
788
+ Top-k sampling considers the set of ``top_k`` most
789
+ probable tokens.
790
+
791
+ This corresponds to the ``top_k`` field
792
+ on the ``request`` instance; if ``request`` is provided, this
793
+ should not be set.
794
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
795
+ should be retried.
796
+ timeout (float): The timeout for this request.
797
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
798
+ sent along with the request as metadata. Normally, each value must be of type `str`,
799
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
800
+ be of type `bytes`.
801
+
802
+ Returns:
803
+ google.ai.generativelanguage_v1beta2.types.GenerateMessageResponse:
804
+ The response from the model.
805
+
806
+ This includes candidate messages and
807
+ conversation history in the form of
808
+ chronologically-ordered messages.
809
+
810
+ """
811
+ # Create or coerce a protobuf request object.
812
+ # - Quick check: If we got a request object, we should *not* have
813
+ # gotten any keyword arguments that map to the request.
814
+ has_flattened_params = any(
815
+ [model, prompt, temperature, candidate_count, top_p, top_k]
816
+ )
817
+ if request is not None and has_flattened_params:
818
+ raise ValueError(
819
+ "If the `request` argument is set, then none of "
820
+ "the individual field arguments should be set."
821
+ )
822
+
823
+ # - Use the request object if provided (there's no risk of modifying the input as
824
+ # there are no flattened fields), or create one.
825
+ if not isinstance(request, discuss_service.GenerateMessageRequest):
826
+ request = discuss_service.GenerateMessageRequest(request)
827
+ # If we have keyword arguments corresponding to fields on the
828
+ # request, apply these.
829
+ if model is not None:
830
+ request.model = model
831
+ if prompt is not None:
832
+ request.prompt = prompt
833
+ if temperature is not None:
834
+ request.temperature = temperature
835
+ if candidate_count is not None:
836
+ request.candidate_count = candidate_count
837
+ if top_p is not None:
838
+ request.top_p = top_p
839
+ if top_k is not None:
840
+ request.top_k = top_k
841
+
842
+ # Wrap the RPC method; this adds retry and timeout information,
843
+ # and friendly error handling.
844
+ rpc = self._transport._wrapped_methods[self._transport.generate_message]
845
+
846
+ # Certain fields should be provided within the metadata header;
847
+ # add these here.
848
+ metadata = tuple(metadata) + (
849
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
850
+ )
851
+
852
+ # Validate the universe domain.
853
+ self._validate_universe_domain()
854
+
855
+ # Send the request.
856
+ response = rpc(
857
+ request,
858
+ retry=retry,
859
+ timeout=timeout,
860
+ metadata=metadata,
861
+ )
862
+
863
+ # Done; return the response.
864
+ return response
865
+
866
+ def count_message_tokens(
867
+ self,
868
+ request: Optional[
869
+ Union[discuss_service.CountMessageTokensRequest, dict]
870
+ ] = None,
871
+ *,
872
+ model: Optional[str] = None,
873
+ prompt: Optional[discuss_service.MessagePrompt] = None,
874
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
875
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
876
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
877
+ ) -> discuss_service.CountMessageTokensResponse:
878
+ r"""Runs a model's tokenizer on a string and returns the
879
+ token count.
880
+
881
+ .. code-block:: python
882
+
883
+ # This snippet has been automatically generated and should be regarded as a
884
+ # code template only.
885
+ # It will require modifications to work:
886
+ # - It may require correct/in-range values for request initialization.
887
+ # - It may require specifying regional endpoints when creating the service
888
+ # client as shown in:
889
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
890
+ from google.ai import generativelanguage_v1beta2
891
+
892
+ def sample_count_message_tokens():
893
+ # Create a client
894
+ client = generativelanguage_v1beta2.DiscussServiceClient()
895
+
896
+ # Initialize request argument(s)
897
+ prompt = generativelanguage_v1beta2.MessagePrompt()
898
+ prompt.messages.content = "content_value"
899
+
900
+ request = generativelanguage_v1beta2.CountMessageTokensRequest(
901
+ model="model_value",
902
+ prompt=prompt,
903
+ )
904
+
905
+ # Make the request
906
+ response = client.count_message_tokens(request=request)
907
+
908
+ # Handle the response
909
+ print(response)
910
+
911
+ Args:
912
+ request (Union[google.ai.generativelanguage_v1beta2.types.CountMessageTokensRequest, dict]):
913
+ The request object. Counts the number of tokens in the ``prompt`` sent to a
914
+ model.
915
+
916
+ Models may tokenize text differently, so each model may
917
+ return a different ``token_count``.
918
+ model (str):
919
+ Required. The model's resource name. This serves as an
920
+ ID for the Model to use.
921
+
922
+ This name should match a model name returned by the
923
+ ``ListModels`` method.
924
+
925
+ Format: ``models/{model}``
926
+
927
+ This corresponds to the ``model`` field
928
+ on the ``request`` instance; if ``request`` is provided, this
929
+ should not be set.
930
+ prompt (google.ai.generativelanguage_v1beta2.types.MessagePrompt):
931
+ Required. The prompt, whose token
932
+ count is to be returned.
933
+
934
+ This corresponds to the ``prompt`` field
935
+ on the ``request`` instance; if ``request`` is provided, this
936
+ should not be set.
937
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
938
+ should be retried.
939
+ timeout (float): The timeout for this request.
940
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
941
+ sent along with the request as metadata. Normally, each value must be of type `str`,
942
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
943
+ be of type `bytes`.
944
+
945
+ Returns:
946
+ google.ai.generativelanguage_v1beta2.types.CountMessageTokensResponse:
947
+ A response from CountMessageTokens.
948
+
949
+ It returns the model's token_count for the prompt.
950
+
951
+ """
952
+ # Create or coerce a protobuf request object.
953
+ # - Quick check: If we got a request object, we should *not* have
954
+ # gotten any keyword arguments that map to the request.
955
+ has_flattened_params = any([model, prompt])
956
+ if request is not None and has_flattened_params:
957
+ raise ValueError(
958
+ "If the `request` argument is set, then none of "
959
+ "the individual field arguments should be set."
960
+ )
961
+
962
+ # - Use the request object if provided (there's no risk of modifying the input as
963
+ # there are no flattened fields), or create one.
964
+ if not isinstance(request, discuss_service.CountMessageTokensRequest):
965
+ request = discuss_service.CountMessageTokensRequest(request)
966
+ # If we have keyword arguments corresponding to fields on the
967
+ # request, apply these.
968
+ if model is not None:
969
+ request.model = model
970
+ if prompt is not None:
971
+ request.prompt = prompt
972
+
973
+ # Wrap the RPC method; this adds retry and timeout information,
974
+ # and friendly error handling.
975
+ rpc = self._transport._wrapped_methods[self._transport.count_message_tokens]
976
+
977
+ # Certain fields should be provided within the metadata header;
978
+ # add these here.
979
+ metadata = tuple(metadata) + (
980
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
981
+ )
982
+
983
+ # Validate the universe domain.
984
+ self._validate_universe_domain()
985
+
986
+ # Send the request.
987
+ response = rpc(
988
+ request,
989
+ retry=retry,
990
+ timeout=timeout,
991
+ metadata=metadata,
992
+ )
993
+
994
+ # Done; return the response.
995
+ return response
996
+
997
+ def __enter__(self) -> "DiscussServiceClient":
998
+ return self
999
+
1000
+ def __exit__(self, type, value, traceback):
1001
+ """Releases underlying transport's resources.
1002
+
1003
+ .. warning::
1004
+ ONLY use as a context manager if the transport is NOT shared
1005
+ with other clients! Exiting the with block will CLOSE the transport
1006
+ and may cause errors in other clients!
1007
+ """
1008
+ self.transport.close()
1009
+
1010
+
1011
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1012
+ gapic_version=package_version.__version__
1013
+ )
1014
+
1015
+
1016
+ __all__ = ("DiscussServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import DiscussServiceTransport
20
+ from .grpc import DiscussServiceGrpcTransport
21
+ from .grpc_asyncio import DiscussServiceGrpcAsyncIOTransport
22
+ from .rest import DiscussServiceRestInterceptor, DiscussServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[DiscussServiceTransport]]
26
+ _transport_registry["grpc"] = DiscussServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = DiscussServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = DiscussServiceRestTransport
29
+
30
+ __all__ = (
31
+ "DiscussServiceTransport",
32
+ "DiscussServiceGrpcTransport",
33
+ "DiscussServiceGrpcAsyncIOTransport",
34
+ "DiscussServiceRestTransport",
35
+ "DiscussServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (908 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (7.88 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (17.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (20.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (23.5 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (9.75 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/base.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.oauth2 import service_account # type: ignore
26
+
27
+ from google.ai.generativelanguage_v1beta2 import gapic_version as package_version
28
+ from google.ai.generativelanguage_v1beta2.types import discuss_service
29
+
30
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
31
+ gapic_version=package_version.__version__
32
+ )
33
+
34
+
35
+ class DiscussServiceTransport(abc.ABC):
36
+ """Abstract transport class for DiscussService."""
37
+
38
+ AUTH_SCOPES = ()
39
+
40
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
41
+
42
+ def __init__(
43
+ self,
44
+ *,
45
+ host: str = DEFAULT_HOST,
46
+ credentials: Optional[ga_credentials.Credentials] = None,
47
+ credentials_file: Optional[str] = None,
48
+ scopes: Optional[Sequence[str]] = None,
49
+ quota_project_id: Optional[str] = None,
50
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
51
+ always_use_jwt_access: Optional[bool] = False,
52
+ api_audience: Optional[str] = None,
53
+ **kwargs,
54
+ ) -> None:
55
+ """Instantiate the transport.
56
+
57
+ Args:
58
+ host (Optional[str]):
59
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
60
+ credentials (Optional[google.auth.credentials.Credentials]): The
61
+ authorization credentials to attach to requests. These
62
+ credentials identify the application to the service; if none
63
+ are specified, the client will attempt to ascertain the
64
+ credentials from the environment.
65
+ credentials_file (Optional[str]): A file with credentials that can
66
+ be loaded with :func:`google.auth.load_credentials_from_file`.
67
+ This argument is mutually exclusive with credentials.
68
+ scopes (Optional[Sequence[str]]): A list of scopes.
69
+ quota_project_id (Optional[str]): An optional project to use for billing
70
+ and quota.
71
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
72
+ The client info used to send a user-agent string along with
73
+ API requests. If ``None``, then default info will be used.
74
+ Generally, you only need to set this if you're developing
75
+ your own client library.
76
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
77
+ be used for service account credentials.
78
+ """
79
+
80
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
81
+
82
+ # Save the scopes.
83
+ self._scopes = scopes
84
+ if not hasattr(self, "_ignore_credentials"):
85
+ self._ignore_credentials: bool = False
86
+
87
+ # If no credentials are provided, then determine the appropriate
88
+ # defaults.
89
+ if credentials and credentials_file:
90
+ raise core_exceptions.DuplicateCredentialArgs(
91
+ "'credentials_file' and 'credentials' are mutually exclusive"
92
+ )
93
+
94
+ if credentials_file is not None:
95
+ credentials, _ = google.auth.load_credentials_from_file(
96
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
97
+ )
98
+ elif credentials is None and not self._ignore_credentials:
99
+ credentials, _ = google.auth.default(
100
+ **scopes_kwargs, quota_project_id=quota_project_id
101
+ )
102
+ # Don't apply audience if the credentials file passed from user.
103
+ if hasattr(credentials, "with_gdch_audience"):
104
+ credentials = credentials.with_gdch_audience(
105
+ api_audience if api_audience else host
106
+ )
107
+
108
+ # If the credentials are service account credentials, then always try to use self signed JWT.
109
+ if (
110
+ always_use_jwt_access
111
+ and isinstance(credentials, service_account.Credentials)
112
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
113
+ ):
114
+ credentials = credentials.with_always_use_jwt_access(True)
115
+
116
+ # Save the credentials.
117
+ self._credentials = credentials
118
+
119
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
120
+ if ":" not in host:
121
+ host += ":443"
122
+ self._host = host
123
+
124
+ @property
125
+ def host(self):
126
+ return self._host
127
+
128
+ def _prep_wrapped_messages(self, client_info):
129
+ # Precompute the wrapped methods.
130
+ self._wrapped_methods = {
131
+ self.generate_message: gapic_v1.method.wrap_method(
132
+ self.generate_message,
133
+ default_retry=retries.Retry(
134
+ initial=1.0,
135
+ maximum=10.0,
136
+ multiplier=1.3,
137
+ predicate=retries.if_exception_type(
138
+ core_exceptions.ServiceUnavailable,
139
+ ),
140
+ deadline=60.0,
141
+ ),
142
+ default_timeout=60.0,
143
+ client_info=client_info,
144
+ ),
145
+ self.count_message_tokens: gapic_v1.method.wrap_method(
146
+ self.count_message_tokens,
147
+ default_retry=retries.Retry(
148
+ initial=1.0,
149
+ maximum=10.0,
150
+ multiplier=1.3,
151
+ predicate=retries.if_exception_type(
152
+ core_exceptions.ServiceUnavailable,
153
+ ),
154
+ deadline=60.0,
155
+ ),
156
+ default_timeout=60.0,
157
+ client_info=client_info,
158
+ ),
159
+ }
160
+
161
+ def close(self):
162
+ """Closes resources associated with the transport.
163
+
164
+ .. warning::
165
+ Only call this method if the transport is NOT shared
166
+ with other clients - this may cause errors in other clients!
167
+ """
168
+ raise NotImplementedError()
169
+
170
+ @property
171
+ def generate_message(
172
+ self,
173
+ ) -> Callable[
174
+ [discuss_service.GenerateMessageRequest],
175
+ Union[
176
+ discuss_service.GenerateMessageResponse,
177
+ Awaitable[discuss_service.GenerateMessageResponse],
178
+ ],
179
+ ]:
180
+ raise NotImplementedError()
181
+
182
+ @property
183
+ def count_message_tokens(
184
+ self,
185
+ ) -> Callable[
186
+ [discuss_service.CountMessageTokensRequest],
187
+ Union[
188
+ discuss_service.CountMessageTokensResponse,
189
+ Awaitable[discuss_service.CountMessageTokensResponse],
190
+ ],
191
+ ]:
192
+ raise NotImplementedError()
193
+
194
+ @property
195
+ def kind(self) -> str:
196
+ raise NotImplementedError()
197
+
198
+
199
+ __all__ = ("DiscussServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/grpc.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json
17
+ import logging as std_logging
18
+ import pickle
19
+ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, grpc_helpers
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.auth.transport.grpc import SslCredentials # type: ignore
26
+ from google.protobuf.json_format import MessageToJson
27
+ import google.protobuf.message
28
+ import grpc # type: ignore
29
+ import proto # type: ignore
30
+
31
+ from google.ai.generativelanguage_v1beta2.types import discuss_service
32
+
33
+ from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
34
+
35
+ try:
36
+ from google.api_core import client_logging # type: ignore
37
+
38
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
39
+ except ImportError: # pragma: NO COVER
40
+ CLIENT_LOGGING_SUPPORTED = False
41
+
42
+ _LOGGER = std_logging.getLogger(__name__)
43
+
44
+
45
+ class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
46
+ def intercept_unary_unary(self, continuation, client_call_details, request):
47
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
48
+ std_logging.DEBUG
49
+ )
50
+ if logging_enabled: # pragma: NO COVER
51
+ request_metadata = client_call_details.metadata
52
+ if isinstance(request, proto.Message):
53
+ request_payload = type(request).to_json(request)
54
+ elif isinstance(request, google.protobuf.message.Message):
55
+ request_payload = MessageToJson(request)
56
+ else:
57
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
58
+
59
+ request_metadata = {
60
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
61
+ for key, value in request_metadata
62
+ }
63
+ grpc_request = {
64
+ "payload": request_payload,
65
+ "requestMethod": "grpc",
66
+ "metadata": dict(request_metadata),
67
+ }
68
+ _LOGGER.debug(
69
+ f"Sending request for {client_call_details.method}",
70
+ extra={
71
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
72
+ "rpcName": client_call_details.method,
73
+ "request": grpc_request,
74
+ "metadata": grpc_request["metadata"],
75
+ },
76
+ )
77
+
78
+ response = continuation(client_call_details, request)
79
+ if logging_enabled: # pragma: NO COVER
80
+ response_metadata = response.trailing_metadata()
81
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
82
+ metadata = (
83
+ dict([(k, str(v)) for k, v in response_metadata])
84
+ if response_metadata
85
+ else None
86
+ )
87
+ result = response.result()
88
+ if isinstance(result, proto.Message):
89
+ response_payload = type(result).to_json(result)
90
+ elif isinstance(result, google.protobuf.message.Message):
91
+ response_payload = MessageToJson(result)
92
+ else:
93
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
94
+ grpc_response = {
95
+ "payload": response_payload,
96
+ "metadata": metadata,
97
+ "status": "OK",
98
+ }
99
+ _LOGGER.debug(
100
+ f"Received response for {client_call_details.method}.",
101
+ extra={
102
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
103
+ "rpcName": client_call_details.method,
104
+ "response": grpc_response,
105
+ "metadata": grpc_response["metadata"],
106
+ },
107
+ )
108
+ return response
109
+
110
+
111
+ class DiscussServiceGrpcTransport(DiscussServiceTransport):
112
+ """gRPC backend transport for DiscussService.
113
+
114
+ An API for using Generative Language Models (GLMs) in dialog
115
+ applications.
116
+ Also known as large language models (LLMs), this API provides
117
+ models that are trained for multi-turn dialog.
118
+
119
+ This class defines the same methods as the primary client, so the
120
+ primary client can load the underlying transport implementation
121
+ and call it.
122
+
123
+ It sends protocol buffers over the wire using gRPC (which is built on
124
+ top of HTTP/2); the ``grpcio`` package must be installed.
125
+ """
126
+
127
+ _stubs: Dict[str, Callable]
128
+
129
+ def __init__(
130
+ self,
131
+ *,
132
+ host: str = "generativelanguage.googleapis.com",
133
+ credentials: Optional[ga_credentials.Credentials] = None,
134
+ credentials_file: Optional[str] = None,
135
+ scopes: Optional[Sequence[str]] = None,
136
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
137
+ api_mtls_endpoint: Optional[str] = None,
138
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
139
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
140
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
141
+ quota_project_id: Optional[str] = None,
142
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
143
+ always_use_jwt_access: Optional[bool] = False,
144
+ api_audience: Optional[str] = None,
145
+ ) -> None:
146
+ """Instantiate the transport.
147
+
148
+ Args:
149
+ host (Optional[str]):
150
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
151
+ credentials (Optional[google.auth.credentials.Credentials]): The
152
+ authorization credentials to attach to requests. These
153
+ credentials identify the application to the service; if none
154
+ are specified, the client will attempt to ascertain the
155
+ credentials from the environment.
156
+ This argument is ignored if a ``channel`` instance is provided.
157
+ credentials_file (Optional[str]): A file with credentials that can
158
+ be loaded with :func:`google.auth.load_credentials_from_file`.
159
+ This argument is ignored if a ``channel`` instance is provided.
160
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
161
+ ignored if a ``channel`` instance is provided.
162
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
163
+ A ``Channel`` instance through which to make calls, or a Callable
164
+ that constructs and returns one. If set to None, ``self.create_channel``
165
+ is used to create the channel. If a Callable is given, it will be called
166
+ with the same arguments as used in ``self.create_channel``.
167
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
168
+ If provided, it overrides the ``host`` argument and tries to create
169
+ a mutual TLS channel with client SSL credentials from
170
+ ``client_cert_source`` or application default SSL credentials.
171
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
172
+ Deprecated. A callback to provide client SSL certificate bytes and
173
+ private key bytes, both in PEM format. It is ignored if
174
+ ``api_mtls_endpoint`` is None.
175
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
176
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
177
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
178
+ A callback to provide client certificate bytes and private key bytes,
179
+ both in PEM format. It is used to configure a mutual TLS channel. It is
180
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
181
+ quota_project_id (Optional[str]): An optional project to use for billing
182
+ and quota.
183
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
184
+ The client info used to send a user-agent string along with
185
+ API requests. If ``None``, then default info will be used.
186
+ Generally, you only need to set this if you're developing
187
+ your own client library.
188
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
189
+ be used for service account credentials.
190
+
191
+ Raises:
192
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
193
+ creation failed for any reason.
194
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
195
+ and ``credentials_file`` are passed.
196
+ """
197
+ self._grpc_channel = None
198
+ self._ssl_channel_credentials = ssl_channel_credentials
199
+ self._stubs: Dict[str, Callable] = {}
200
+
201
+ if api_mtls_endpoint:
202
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
203
+ if client_cert_source:
204
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
205
+
206
+ if isinstance(channel, grpc.Channel):
207
+ # Ignore credentials if a channel was passed.
208
+ credentials = None
209
+ self._ignore_credentials = True
210
+ # If a channel was explicitly provided, set it.
211
+ self._grpc_channel = channel
212
+ self._ssl_channel_credentials = None
213
+
214
+ else:
215
+ if api_mtls_endpoint:
216
+ host = api_mtls_endpoint
217
+
218
+ # Create SSL credentials with client_cert_source or application
219
+ # default SSL credentials.
220
+ if client_cert_source:
221
+ cert, key = client_cert_source()
222
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
223
+ certificate_chain=cert, private_key=key
224
+ )
225
+ else:
226
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
227
+
228
+ else:
229
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
230
+ cert, key = client_cert_source_for_mtls()
231
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
232
+ certificate_chain=cert, private_key=key
233
+ )
234
+
235
+ # The base transport sets the host, credentials and scopes
236
+ super().__init__(
237
+ host=host,
238
+ credentials=credentials,
239
+ credentials_file=credentials_file,
240
+ scopes=scopes,
241
+ quota_project_id=quota_project_id,
242
+ client_info=client_info,
243
+ always_use_jwt_access=always_use_jwt_access,
244
+ api_audience=api_audience,
245
+ )
246
+
247
+ if not self._grpc_channel:
248
+ # initialize with the provided callable or the default channel
249
+ channel_init = channel or type(self).create_channel
250
+ self._grpc_channel = channel_init(
251
+ self._host,
252
+ # use the credentials which are saved
253
+ credentials=self._credentials,
254
+ # Set ``credentials_file`` to ``None`` here as
255
+ # the credentials that we saved earlier should be used.
256
+ credentials_file=None,
257
+ scopes=self._scopes,
258
+ ssl_credentials=self._ssl_channel_credentials,
259
+ quota_project_id=quota_project_id,
260
+ options=[
261
+ ("grpc.max_send_message_length", -1),
262
+ ("grpc.max_receive_message_length", -1),
263
+ ],
264
+ )
265
+
266
+ self._interceptor = _LoggingClientInterceptor()
267
+ self._logged_channel = grpc.intercept_channel(
268
+ self._grpc_channel, self._interceptor
269
+ )
270
+
271
+ # Wrap messages. This must be done after self._logged_channel exists
272
+ self._prep_wrapped_messages(client_info)
273
+
274
+ @classmethod
275
+ def create_channel(
276
+ cls,
277
+ host: str = "generativelanguage.googleapis.com",
278
+ credentials: Optional[ga_credentials.Credentials] = None,
279
+ credentials_file: Optional[str] = None,
280
+ scopes: Optional[Sequence[str]] = None,
281
+ quota_project_id: Optional[str] = None,
282
+ **kwargs,
283
+ ) -> grpc.Channel:
284
+ """Create and return a gRPC channel object.
285
+ Args:
286
+ host (Optional[str]): The host for the channel to use.
287
+ credentials (Optional[~.Credentials]): The
288
+ authorization credentials to attach to requests. These
289
+ credentials identify this application to the service. If
290
+ none are specified, the client will attempt to ascertain
291
+ the credentials from the environment.
292
+ credentials_file (Optional[str]): A file with credentials that can
293
+ be loaded with :func:`google.auth.load_credentials_from_file`.
294
+ This argument is mutually exclusive with credentials.
295
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
296
+ service. These are only used when credentials are not specified and
297
+ are passed to :func:`google.auth.default`.
298
+ quota_project_id (Optional[str]): An optional project to use for billing
299
+ and quota.
300
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
301
+ channel creation.
302
+ Returns:
303
+ grpc.Channel: A gRPC channel object.
304
+
305
+ Raises:
306
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
307
+ and ``credentials_file`` are passed.
308
+ """
309
+
310
+ return grpc_helpers.create_channel(
311
+ host,
312
+ credentials=credentials,
313
+ credentials_file=credentials_file,
314
+ quota_project_id=quota_project_id,
315
+ default_scopes=cls.AUTH_SCOPES,
316
+ scopes=scopes,
317
+ default_host=cls.DEFAULT_HOST,
318
+ **kwargs,
319
+ )
320
+
321
+ @property
322
+ def grpc_channel(self) -> grpc.Channel:
323
+ """Return the channel designed to connect to this service."""
324
+ return self._grpc_channel
325
+
326
+ @property
327
+ def generate_message(
328
+ self,
329
+ ) -> Callable[
330
+ [discuss_service.GenerateMessageRequest],
331
+ discuss_service.GenerateMessageResponse,
332
+ ]:
333
+ r"""Return a callable for the generate message method over gRPC.
334
+
335
+ Generates a response from the model given an input
336
+ ``MessagePrompt``.
337
+
338
+ Returns:
339
+ Callable[[~.GenerateMessageRequest],
340
+ ~.GenerateMessageResponse]:
341
+ A function that, when called, will call the underlying RPC
342
+ on the server.
343
+ """
344
+ # Generate a "stub function" on-the-fly which will actually make
345
+ # the request.
346
+ # gRPC handles serialization and deserialization, so we just need
347
+ # to pass in the functions for each.
348
+ if "generate_message" not in self._stubs:
349
+ self._stubs["generate_message"] = self._logged_channel.unary_unary(
350
+ "/google.ai.generativelanguage.v1beta2.DiscussService/GenerateMessage",
351
+ request_serializer=discuss_service.GenerateMessageRequest.serialize,
352
+ response_deserializer=discuss_service.GenerateMessageResponse.deserialize,
353
+ )
354
+ return self._stubs["generate_message"]
355
+
356
+ @property
357
+ def count_message_tokens(
358
+ self,
359
+ ) -> Callable[
360
+ [discuss_service.CountMessageTokensRequest],
361
+ discuss_service.CountMessageTokensResponse,
362
+ ]:
363
+ r"""Return a callable for the count message tokens method over gRPC.
364
+
365
+ Runs a model's tokenizer on a string and returns the
366
+ token count.
367
+
368
+ Returns:
369
+ Callable[[~.CountMessageTokensRequest],
370
+ ~.CountMessageTokensResponse]:
371
+ A function that, when called, will call the underlying RPC
372
+ on the server.
373
+ """
374
+ # Generate a "stub function" on-the-fly which will actually make
375
+ # the request.
376
+ # gRPC handles serialization and deserialization, so we just need
377
+ # to pass in the functions for each.
378
+ if "count_message_tokens" not in self._stubs:
379
+ self._stubs["count_message_tokens"] = self._logged_channel.unary_unary(
380
+ "/google.ai.generativelanguage.v1beta2.DiscussService/CountMessageTokens",
381
+ request_serializer=discuss_service.CountMessageTokensRequest.serialize,
382
+ response_deserializer=discuss_service.CountMessageTokensResponse.deserialize,
383
+ )
384
+ return self._stubs["count_message_tokens"]
385
+
386
+ def close(self):
387
+ self._logged_channel.close()
388
+
389
+ @property
390
+ def kind(self) -> str:
391
+ return "grpc"
392
+
393
+
394
+ __all__ = ("DiscussServiceGrpcTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.protobuf.json_format import MessageToJson
29
+ import google.protobuf.message
30
+ import grpc # type: ignore
31
+ from grpc.experimental import aio # type: ignore
32
+ import proto # type: ignore
33
+
34
+ from google.ai.generativelanguage_v1beta2.types import discuss_service
35
+
36
+ from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
37
+ from .grpc import DiscussServiceGrpcTransport
38
+
39
+ try:
40
+ from google.api_core import client_logging # type: ignore
41
+
42
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
43
+ except ImportError: # pragma: NO COVER
44
+ CLIENT_LOGGING_SUPPORTED = False
45
+
46
+ _LOGGER = std_logging.getLogger(__name__)
47
+
48
+
49
+ class _LoggingClientAIOInterceptor(
50
+ grpc.aio.UnaryUnaryClientInterceptor
51
+ ): # pragma: NO COVER
52
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
53
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
54
+ std_logging.DEBUG
55
+ )
56
+ if logging_enabled: # pragma: NO COVER
57
+ request_metadata = client_call_details.metadata
58
+ if isinstance(request, proto.Message):
59
+ request_payload = type(request).to_json(request)
60
+ elif isinstance(request, google.protobuf.message.Message):
61
+ request_payload = MessageToJson(request)
62
+ else:
63
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
64
+
65
+ request_metadata = {
66
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
67
+ for key, value in request_metadata
68
+ }
69
+ grpc_request = {
70
+ "payload": request_payload,
71
+ "requestMethod": "grpc",
72
+ "metadata": dict(request_metadata),
73
+ }
74
+ _LOGGER.debug(
75
+ f"Sending request for {client_call_details.method}",
76
+ extra={
77
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
78
+ "rpcName": str(client_call_details.method),
79
+ "request": grpc_request,
80
+ "metadata": grpc_request["metadata"],
81
+ },
82
+ )
83
+ response = await continuation(client_call_details, request)
84
+ if logging_enabled: # pragma: NO COVER
85
+ response_metadata = await response.trailing_metadata()
86
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
87
+ metadata = (
88
+ dict([(k, str(v)) for k, v in response_metadata])
89
+ if response_metadata
90
+ else None
91
+ )
92
+ result = await response
93
+ if isinstance(result, proto.Message):
94
+ response_payload = type(result).to_json(result)
95
+ elif isinstance(result, google.protobuf.message.Message):
96
+ response_payload = MessageToJson(result)
97
+ else:
98
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
99
+ grpc_response = {
100
+ "payload": response_payload,
101
+ "metadata": metadata,
102
+ "status": "OK",
103
+ }
104
+ _LOGGER.debug(
105
+ f"Received response to rpc {client_call_details.method}.",
106
+ extra={
107
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
108
+ "rpcName": str(client_call_details.method),
109
+ "response": grpc_response,
110
+ "metadata": grpc_response["metadata"],
111
+ },
112
+ )
113
+ return response
114
+
115
+
116
+ class DiscussServiceGrpcAsyncIOTransport(DiscussServiceTransport):
117
+ """gRPC AsyncIO backend transport for DiscussService.
118
+
119
+ An API for using Generative Language Models (GLMs) in dialog
120
+ applications.
121
+ Also known as large language models (LLMs), this API provides
122
+ models that are trained for multi-turn dialog.
123
+
124
+ This class defines the same methods as the primary client, so the
125
+ primary client can load the underlying transport implementation
126
+ and call it.
127
+
128
+ It sends protocol buffers over the wire using gRPC (which is built on
129
+ top of HTTP/2); the ``grpcio`` package must be installed.
130
+ """
131
+
132
+ _grpc_channel: aio.Channel
133
+ _stubs: Dict[str, Callable] = {}
134
+
135
+ @classmethod
136
+ def create_channel(
137
+ cls,
138
+ host: str = "generativelanguage.googleapis.com",
139
+ credentials: Optional[ga_credentials.Credentials] = None,
140
+ credentials_file: Optional[str] = None,
141
+ scopes: Optional[Sequence[str]] = None,
142
+ quota_project_id: Optional[str] = None,
143
+ **kwargs,
144
+ ) -> aio.Channel:
145
+ """Create and return a gRPC AsyncIO channel object.
146
+ Args:
147
+ host (Optional[str]): The host for the channel to use.
148
+ credentials (Optional[~.Credentials]): The
149
+ authorization credentials to attach to requests. These
150
+ credentials identify this application to the service. If
151
+ none are specified, the client will attempt to ascertain
152
+ the credentials from the environment.
153
+ credentials_file (Optional[str]): A file with credentials that can
154
+ be loaded with :func:`google.auth.load_credentials_from_file`.
155
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
156
+ service. These are only used when credentials are not specified and
157
+ are passed to :func:`google.auth.default`.
158
+ quota_project_id (Optional[str]): An optional project to use for billing
159
+ and quota.
160
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
161
+ channel creation.
162
+ Returns:
163
+ aio.Channel: A gRPC AsyncIO channel object.
164
+ """
165
+
166
+ return grpc_helpers_async.create_channel(
167
+ host,
168
+ credentials=credentials,
169
+ credentials_file=credentials_file,
170
+ quota_project_id=quota_project_id,
171
+ default_scopes=cls.AUTH_SCOPES,
172
+ scopes=scopes,
173
+ default_host=cls.DEFAULT_HOST,
174
+ **kwargs,
175
+ )
176
+
177
+ def __init__(
178
+ self,
179
+ *,
180
+ host: str = "generativelanguage.googleapis.com",
181
+ credentials: Optional[ga_credentials.Credentials] = None,
182
+ credentials_file: Optional[str] = None,
183
+ scopes: Optional[Sequence[str]] = None,
184
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
185
+ api_mtls_endpoint: Optional[str] = None,
186
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
187
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
188
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
189
+ quota_project_id: Optional[str] = None,
190
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
191
+ always_use_jwt_access: Optional[bool] = False,
192
+ api_audience: Optional[str] = None,
193
+ ) -> None:
194
+ """Instantiate the transport.
195
+
196
+ Args:
197
+ host (Optional[str]):
198
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
199
+ credentials (Optional[google.auth.credentials.Credentials]): The
200
+ authorization credentials to attach to requests. These
201
+ credentials identify the application to the service; if none
202
+ are specified, the client will attempt to ascertain the
203
+ credentials from the environment.
204
+ This argument is ignored if a ``channel`` instance is provided.
205
+ credentials_file (Optional[str]): A file with credentials that can
206
+ be loaded with :func:`google.auth.load_credentials_from_file`.
207
+ This argument is ignored if a ``channel`` instance is provided.
208
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
209
+ service. These are only used when credentials are not specified and
210
+ are passed to :func:`google.auth.default`.
211
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
212
+ A ``Channel`` instance through which to make calls, or a Callable
213
+ that constructs and returns one. If set to None, ``self.create_channel``
214
+ is used to create the channel. If a Callable is given, it will be called
215
+ with the same arguments as used in ``self.create_channel``.
216
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
217
+ If provided, it overrides the ``host`` argument and tries to create
218
+ a mutual TLS channel with client SSL credentials from
219
+ ``client_cert_source`` or application default SSL credentials.
220
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
221
+ Deprecated. A callback to provide client SSL certificate bytes and
222
+ private key bytes, both in PEM format. It is ignored if
223
+ ``api_mtls_endpoint`` is None.
224
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
225
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
226
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
227
+ A callback to provide client certificate bytes and private key bytes,
228
+ both in PEM format. It is used to configure a mutual TLS channel. It is
229
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
230
+ quota_project_id (Optional[str]): An optional project to use for billing
231
+ and quota.
232
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
233
+ The client info used to send a user-agent string along with
234
+ API requests. If ``None``, then default info will be used.
235
+ Generally, you only need to set this if you're developing
236
+ your own client library.
237
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
238
+ be used for service account credentials.
239
+
240
+ Raises:
241
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
242
+ creation failed for any reason.
243
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
244
+ and ``credentials_file`` are passed.
245
+ """
246
+ self._grpc_channel = None
247
+ self._ssl_channel_credentials = ssl_channel_credentials
248
+ self._stubs: Dict[str, Callable] = {}
249
+
250
+ if api_mtls_endpoint:
251
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
252
+ if client_cert_source:
253
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
254
+
255
+ if isinstance(channel, aio.Channel):
256
+ # Ignore credentials if a channel was passed.
257
+ credentials = None
258
+ self._ignore_credentials = True
259
+ # If a channel was explicitly provided, set it.
260
+ self._grpc_channel = channel
261
+ self._ssl_channel_credentials = None
262
+ else:
263
+ if api_mtls_endpoint:
264
+ host = api_mtls_endpoint
265
+
266
+ # Create SSL credentials with client_cert_source or application
267
+ # default SSL credentials.
268
+ if client_cert_source:
269
+ cert, key = client_cert_source()
270
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
271
+ certificate_chain=cert, private_key=key
272
+ )
273
+ else:
274
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
275
+
276
+ else:
277
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
278
+ cert, key = client_cert_source_for_mtls()
279
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
280
+ certificate_chain=cert, private_key=key
281
+ )
282
+
283
+ # The base transport sets the host, credentials and scopes
284
+ super().__init__(
285
+ host=host,
286
+ credentials=credentials,
287
+ credentials_file=credentials_file,
288
+ scopes=scopes,
289
+ quota_project_id=quota_project_id,
290
+ client_info=client_info,
291
+ always_use_jwt_access=always_use_jwt_access,
292
+ api_audience=api_audience,
293
+ )
294
+
295
+ if not self._grpc_channel:
296
+ # initialize with the provided callable or the default channel
297
+ channel_init = channel or type(self).create_channel
298
+ self._grpc_channel = channel_init(
299
+ self._host,
300
+ # use the credentials which are saved
301
+ credentials=self._credentials,
302
+ # Set ``credentials_file`` to ``None`` here as
303
+ # the credentials that we saved earlier should be used.
304
+ credentials_file=None,
305
+ scopes=self._scopes,
306
+ ssl_credentials=self._ssl_channel_credentials,
307
+ quota_project_id=quota_project_id,
308
+ options=[
309
+ ("grpc.max_send_message_length", -1),
310
+ ("grpc.max_receive_message_length", -1),
311
+ ],
312
+ )
313
+
314
+ self._interceptor = _LoggingClientAIOInterceptor()
315
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
316
+ self._logged_channel = self._grpc_channel
317
+ self._wrap_with_kind = (
318
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
319
+ )
320
+ # Wrap messages. This must be done after self._logged_channel exists
321
+ self._prep_wrapped_messages(client_info)
322
+
323
+ @property
324
+ def grpc_channel(self) -> aio.Channel:
325
+ """Create the channel designed to connect to this service.
326
+
327
+ This property caches on the instance; repeated calls return
328
+ the same channel.
329
+ """
330
+ # Return the channel from cache.
331
+ return self._grpc_channel
332
+
333
+ @property
334
+ def generate_message(
335
+ self,
336
+ ) -> Callable[
337
+ [discuss_service.GenerateMessageRequest],
338
+ Awaitable[discuss_service.GenerateMessageResponse],
339
+ ]:
340
+ r"""Return a callable for the generate message method over gRPC.
341
+
342
+ Generates a response from the model given an input
343
+ ``MessagePrompt``.
344
+
345
+ Returns:
346
+ Callable[[~.GenerateMessageRequest],
347
+ Awaitable[~.GenerateMessageResponse]]:
348
+ A function that, when called, will call the underlying RPC
349
+ on the server.
350
+ """
351
+ # Generate a "stub function" on-the-fly which will actually make
352
+ # the request.
353
+ # gRPC handles serialization and deserialization, so we just need
354
+ # to pass in the functions for each.
355
+ if "generate_message" not in self._stubs:
356
+ self._stubs["generate_message"] = self._logged_channel.unary_unary(
357
+ "/google.ai.generativelanguage.v1beta2.DiscussService/GenerateMessage",
358
+ request_serializer=discuss_service.GenerateMessageRequest.serialize,
359
+ response_deserializer=discuss_service.GenerateMessageResponse.deserialize,
360
+ )
361
+ return self._stubs["generate_message"]
362
+
363
+ @property
364
+ def count_message_tokens(
365
+ self,
366
+ ) -> Callable[
367
+ [discuss_service.CountMessageTokensRequest],
368
+ Awaitable[discuss_service.CountMessageTokensResponse],
369
+ ]:
370
+ r"""Return a callable for the count message tokens method over gRPC.
371
+
372
+ Runs a model's tokenizer on a string and returns the
373
+ token count.
374
+
375
+ Returns:
376
+ Callable[[~.CountMessageTokensRequest],
377
+ Awaitable[~.CountMessageTokensResponse]]:
378
+ A function that, when called, will call the underlying RPC
379
+ on the server.
380
+ """
381
+ # Generate a "stub function" on-the-fly which will actually make
382
+ # the request.
383
+ # gRPC handles serialization and deserialization, so we just need
384
+ # to pass in the functions for each.
385
+ if "count_message_tokens" not in self._stubs:
386
+ self._stubs["count_message_tokens"] = self._logged_channel.unary_unary(
387
+ "/google.ai.generativelanguage.v1beta2.DiscussService/CountMessageTokens",
388
+ request_serializer=discuss_service.CountMessageTokensRequest.serialize,
389
+ response_deserializer=discuss_service.CountMessageTokensResponse.deserialize,
390
+ )
391
+ return self._stubs["count_message_tokens"]
392
+
393
+ def _prep_wrapped_messages(self, client_info):
394
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
395
+ self._wrapped_methods = {
396
+ self.generate_message: self._wrap_method(
397
+ self.generate_message,
398
+ default_retry=retries.AsyncRetry(
399
+ initial=1.0,
400
+ maximum=10.0,
401
+ multiplier=1.3,
402
+ predicate=retries.if_exception_type(
403
+ core_exceptions.ServiceUnavailable,
404
+ ),
405
+ deadline=60.0,
406
+ ),
407
+ default_timeout=60.0,
408
+ client_info=client_info,
409
+ ),
410
+ self.count_message_tokens: self._wrap_method(
411
+ self.count_message_tokens,
412
+ default_retry=retries.AsyncRetry(
413
+ initial=1.0,
414
+ maximum=10.0,
415
+ multiplier=1.3,
416
+ predicate=retries.if_exception_type(
417
+ core_exceptions.ServiceUnavailable,
418
+ ),
419
+ deadline=60.0,
420
+ ),
421
+ default_timeout=60.0,
422
+ client_info=client_info,
423
+ ),
424
+ }
425
+
426
+ def _wrap_method(self, func, *args, **kwargs):
427
+ if self._wrap_with_kind: # pragma: NO COVER
428
+ kwargs["kind"] = self.kind
429
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
430
+
431
+ def close(self):
432
+ return self._logged_channel.close()
433
+
434
+ @property
435
+ def kind(self) -> str:
436
+ return "grpc_asyncio"
437
+
438
+
439
+ __all__ = ("DiscussServiceGrpcAsyncIOTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/rest.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import dataclasses
17
+ import json # type: ignore
18
+ import logging
19
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import exceptions as core_exceptions
23
+ from google.api_core import gapic_v1, rest_helpers, rest_streaming
24
+ from google.api_core import retry as retries
25
+ from google.auth import credentials as ga_credentials # type: ignore
26
+ from google.auth.transport.requests import AuthorizedSession # type: ignore
27
+ from google.protobuf import json_format
28
+ from requests import __version__ as requests_version
29
+
30
+ from google.ai.generativelanguage_v1beta2.types import discuss_service
31
+
32
+ from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
33
+ from .rest_base import _BaseDiscussServiceRestTransport
34
+
35
+ try:
36
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
37
+ except AttributeError: # pragma: NO COVER
38
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
39
+
40
+ try:
41
+ from google.api_core import client_logging # type: ignore
42
+
43
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
44
+ except ImportError: # pragma: NO COVER
45
+ CLIENT_LOGGING_SUPPORTED = False
46
+
47
+ _LOGGER = logging.getLogger(__name__)
48
+
49
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
50
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
51
+ grpc_version=None,
52
+ rest_version=f"requests@{requests_version}",
53
+ )
54
+
55
+
56
+ class DiscussServiceRestInterceptor:
57
+ """Interceptor for DiscussService.
58
+
59
+ Interceptors are used to manipulate requests, request metadata, and responses
60
+ in arbitrary ways.
61
+ Example use cases include:
62
+ * Logging
63
+ * Verifying requests according to service or custom semantics
64
+ * Stripping extraneous information from responses
65
+
66
+ These use cases and more can be enabled by injecting an
67
+ instance of a custom subclass when constructing the DiscussServiceRestTransport.
68
+
69
+ .. code-block:: python
70
+ class MyCustomDiscussServiceInterceptor(DiscussServiceRestInterceptor):
71
+ def pre_count_message_tokens(self, request, metadata):
72
+ logging.log(f"Received request: {request}")
73
+ return request, metadata
74
+
75
+ def post_count_message_tokens(self, response):
76
+ logging.log(f"Received response: {response}")
77
+ return response
78
+
79
+ def pre_generate_message(self, request, metadata):
80
+ logging.log(f"Received request: {request}")
81
+ return request, metadata
82
+
83
+ def post_generate_message(self, response):
84
+ logging.log(f"Received response: {response}")
85
+ return response
86
+
87
+ transport = DiscussServiceRestTransport(interceptor=MyCustomDiscussServiceInterceptor())
88
+ client = DiscussServiceClient(transport=transport)
89
+
90
+
91
+ """
92
+
93
+ def pre_count_message_tokens(
94
+ self,
95
+ request: discuss_service.CountMessageTokensRequest,
96
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
97
+ ) -> Tuple[
98
+ discuss_service.CountMessageTokensRequest,
99
+ Sequence[Tuple[str, Union[str, bytes]]],
100
+ ]:
101
+ """Pre-rpc interceptor for count_message_tokens
102
+
103
+ Override in a subclass to manipulate the request or metadata
104
+ before they are sent to the DiscussService server.
105
+ """
106
+ return request, metadata
107
+
108
+ def post_count_message_tokens(
109
+ self, response: discuss_service.CountMessageTokensResponse
110
+ ) -> discuss_service.CountMessageTokensResponse:
111
+ """Post-rpc interceptor for count_message_tokens
112
+
113
+ Override in a subclass to manipulate the response
114
+ after it is returned by the DiscussService server but before
115
+ it is returned to user code.
116
+ """
117
+ return response
118
+
119
+ def pre_generate_message(
120
+ self,
121
+ request: discuss_service.GenerateMessageRequest,
122
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
123
+ ) -> Tuple[
124
+ discuss_service.GenerateMessageRequest, Sequence[Tuple[str, Union[str, bytes]]]
125
+ ]:
126
+ """Pre-rpc interceptor for generate_message
127
+
128
+ Override in a subclass to manipulate the request or metadata
129
+ before they are sent to the DiscussService server.
130
+ """
131
+ return request, metadata
132
+
133
+ def post_generate_message(
134
+ self, response: discuss_service.GenerateMessageResponse
135
+ ) -> discuss_service.GenerateMessageResponse:
136
+ """Post-rpc interceptor for generate_message
137
+
138
+ Override in a subclass to manipulate the response
139
+ after it is returned by the DiscussService server but before
140
+ it is returned to user code.
141
+ """
142
+ return response
143
+
144
+
145
+ @dataclasses.dataclass
146
+ class DiscussServiceRestStub:
147
+ _session: AuthorizedSession
148
+ _host: str
149
+ _interceptor: DiscussServiceRestInterceptor
150
+
151
+
152
+ class DiscussServiceRestTransport(_BaseDiscussServiceRestTransport):
153
+ """REST backend synchronous transport for DiscussService.
154
+
155
+ An API for using Generative Language Models (GLMs) in dialog
156
+ applications.
157
+ Also known as large language models (LLMs), this API provides
158
+ models that are trained for multi-turn dialog.
159
+
160
+ This class defines the same methods as the primary client, so the
161
+ primary client can load the underlying transport implementation
162
+ and call it.
163
+
164
+ It sends JSON representations of protocol buffers over HTTP/1.1
165
+ """
166
+
167
+ def __init__(
168
+ self,
169
+ *,
170
+ host: str = "generativelanguage.googleapis.com",
171
+ credentials: Optional[ga_credentials.Credentials] = None,
172
+ credentials_file: Optional[str] = None,
173
+ scopes: Optional[Sequence[str]] = None,
174
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
175
+ quota_project_id: Optional[str] = None,
176
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
177
+ always_use_jwt_access: Optional[bool] = False,
178
+ url_scheme: str = "https",
179
+ interceptor: Optional[DiscussServiceRestInterceptor] = None,
180
+ api_audience: Optional[str] = None,
181
+ ) -> None:
182
+ """Instantiate the transport.
183
+
184
+ Args:
185
+ host (Optional[str]):
186
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
187
+ credentials (Optional[google.auth.credentials.Credentials]): The
188
+ authorization credentials to attach to requests. These
189
+ credentials identify the application to the service; if none
190
+ are specified, the client will attempt to ascertain the
191
+ credentials from the environment.
192
+
193
+ credentials_file (Optional[str]): A file with credentials that can
194
+ be loaded with :func:`google.auth.load_credentials_from_file`.
195
+ This argument is ignored if ``channel`` is provided.
196
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
197
+ ignored if ``channel`` is provided.
198
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
199
+ certificate to configure mutual TLS HTTP channel. It is ignored
200
+ if ``channel`` is provided.
201
+ quota_project_id (Optional[str]): An optional project to use for billing
202
+ and quota.
203
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
204
+ The client info used to send a user-agent string along with
205
+ API requests. If ``None``, then default info will be used.
206
+ Generally, you only need to set this if you are developing
207
+ your own client library.
208
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
209
+ be used for service account credentials.
210
+ url_scheme: the protocol scheme for the API endpoint. Normally
211
+ "https", but for testing or local servers,
212
+ "http" can be specified.
213
+ """
214
+ # Run the base constructor
215
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
216
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
217
+ # credentials object
218
+ super().__init__(
219
+ host=host,
220
+ credentials=credentials,
221
+ client_info=client_info,
222
+ always_use_jwt_access=always_use_jwt_access,
223
+ url_scheme=url_scheme,
224
+ api_audience=api_audience,
225
+ )
226
+ self._session = AuthorizedSession(
227
+ self._credentials, default_host=self.DEFAULT_HOST
228
+ )
229
+ if client_cert_source_for_mtls:
230
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
231
+ self._interceptor = interceptor or DiscussServiceRestInterceptor()
232
+ self._prep_wrapped_messages(client_info)
233
+
234
+ class _CountMessageTokens(
235
+ _BaseDiscussServiceRestTransport._BaseCountMessageTokens, DiscussServiceRestStub
236
+ ):
237
+ def __hash__(self):
238
+ return hash("DiscussServiceRestTransport.CountMessageTokens")
239
+
240
+ @staticmethod
241
+ def _get_response(
242
+ host,
243
+ metadata,
244
+ query_params,
245
+ session,
246
+ timeout,
247
+ transcoded_request,
248
+ body=None,
249
+ ):
250
+ uri = transcoded_request["uri"]
251
+ method = transcoded_request["method"]
252
+ headers = dict(metadata)
253
+ headers["Content-Type"] = "application/json"
254
+ response = getattr(session, method)(
255
+ "{host}{uri}".format(host=host, uri=uri),
256
+ timeout=timeout,
257
+ headers=headers,
258
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
259
+ data=body,
260
+ )
261
+ return response
262
+
263
+ def __call__(
264
+ self,
265
+ request: discuss_service.CountMessageTokensRequest,
266
+ *,
267
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
268
+ timeout: Optional[float] = None,
269
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
270
+ ) -> discuss_service.CountMessageTokensResponse:
271
+ r"""Call the count message tokens method over HTTP.
272
+
273
+ Args:
274
+ request (~.discuss_service.CountMessageTokensRequest):
275
+ The request object. Counts the number of tokens in the ``prompt`` sent to a
276
+ model.
277
+
278
+ Models may tokenize text differently, so each model may
279
+ return a different ``token_count``.
280
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
281
+ should be retried.
282
+ timeout (float): The timeout for this request.
283
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
284
+ sent along with the request as metadata. Normally, each value must be of type `str`,
285
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
286
+ be of type `bytes`.
287
+
288
+ Returns:
289
+ ~.discuss_service.CountMessageTokensResponse:
290
+ A response from ``CountMessageTokens``.
291
+
292
+ It returns the model's ``token_count`` for the
293
+ ``prompt``.
294
+
295
+ """
296
+
297
+ http_options = (
298
+ _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_http_options()
299
+ )
300
+
301
+ request, metadata = self._interceptor.pre_count_message_tokens(
302
+ request, metadata
303
+ )
304
+ transcoded_request = _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_transcoded_request(
305
+ http_options, request
306
+ )
307
+
308
+ body = _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_request_body_json(
309
+ transcoded_request
310
+ )
311
+
312
+ # Jsonify the query params
313
+ query_params = _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_query_params_json(
314
+ transcoded_request
315
+ )
316
+
317
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
318
+ logging.DEBUG
319
+ ): # pragma: NO COVER
320
+ request_url = "{host}{uri}".format(
321
+ host=self._host, uri=transcoded_request["uri"]
322
+ )
323
+ method = transcoded_request["method"]
324
+ try:
325
+ request_payload = type(request).to_json(request)
326
+ except:
327
+ request_payload = None
328
+ http_request = {
329
+ "payload": request_payload,
330
+ "requestMethod": method,
331
+ "requestUrl": request_url,
332
+ "headers": dict(metadata),
333
+ }
334
+ _LOGGER.debug(
335
+ f"Sending request for google.ai.generativelanguage_v1beta2.DiscussServiceClient.CountMessageTokens",
336
+ extra={
337
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
338
+ "rpcName": "CountMessageTokens",
339
+ "httpRequest": http_request,
340
+ "metadata": http_request["headers"],
341
+ },
342
+ )
343
+
344
+ # Send the request
345
+ response = DiscussServiceRestTransport._CountMessageTokens._get_response(
346
+ self._host,
347
+ metadata,
348
+ query_params,
349
+ self._session,
350
+ timeout,
351
+ transcoded_request,
352
+ body,
353
+ )
354
+
355
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
356
+ # subclass.
357
+ if response.status_code >= 400:
358
+ raise core_exceptions.from_http_response(response)
359
+
360
+ # Return the response
361
+ resp = discuss_service.CountMessageTokensResponse()
362
+ pb_resp = discuss_service.CountMessageTokensResponse.pb(resp)
363
+
364
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
365
+
366
+ resp = self._interceptor.post_count_message_tokens(resp)
367
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
368
+ logging.DEBUG
369
+ ): # pragma: NO COVER
370
+ try:
371
+ response_payload = (
372
+ discuss_service.CountMessageTokensResponse.to_json(response)
373
+ )
374
+ except:
375
+ response_payload = None
376
+ http_response = {
377
+ "payload": response_payload,
378
+ "headers": dict(response.headers),
379
+ "status": response.status_code,
380
+ }
381
+ _LOGGER.debug(
382
+ "Received response for google.ai.generativelanguage_v1beta2.DiscussServiceClient.count_message_tokens",
383
+ extra={
384
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
385
+ "rpcName": "CountMessageTokens",
386
+ "metadata": http_response["headers"],
387
+ "httpResponse": http_response,
388
+ },
389
+ )
390
+ return resp
391
+
392
+ class _GenerateMessage(
393
+ _BaseDiscussServiceRestTransport._BaseGenerateMessage, DiscussServiceRestStub
394
+ ):
395
+ def __hash__(self):
396
+ return hash("DiscussServiceRestTransport.GenerateMessage")
397
+
398
+ @staticmethod
399
+ def _get_response(
400
+ host,
401
+ metadata,
402
+ query_params,
403
+ session,
404
+ timeout,
405
+ transcoded_request,
406
+ body=None,
407
+ ):
408
+ uri = transcoded_request["uri"]
409
+ method = transcoded_request["method"]
410
+ headers = dict(metadata)
411
+ headers["Content-Type"] = "application/json"
412
+ response = getattr(session, method)(
413
+ "{host}{uri}".format(host=host, uri=uri),
414
+ timeout=timeout,
415
+ headers=headers,
416
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
417
+ data=body,
418
+ )
419
+ return response
420
+
421
+ def __call__(
422
+ self,
423
+ request: discuss_service.GenerateMessageRequest,
424
+ *,
425
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
426
+ timeout: Optional[float] = None,
427
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
428
+ ) -> discuss_service.GenerateMessageResponse:
429
+ r"""Call the generate message method over HTTP.
430
+
431
+ Args:
432
+ request (~.discuss_service.GenerateMessageRequest):
433
+ The request object. Request to generate a message
434
+ response from the model.
435
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
436
+ should be retried.
437
+ timeout (float): The timeout for this request.
438
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
439
+ sent along with the request as metadata. Normally, each value must be of type `str`,
440
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
441
+ be of type `bytes`.
442
+
443
+ Returns:
444
+ ~.discuss_service.GenerateMessageResponse:
445
+ The response from the model.
446
+
447
+ This includes candidate messages and
448
+ conversation history in the form of
449
+ chronologically-ordered messages.
450
+
451
+ """
452
+
453
+ http_options = (
454
+ _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_http_options()
455
+ )
456
+
457
+ request, metadata = self._interceptor.pre_generate_message(
458
+ request, metadata
459
+ )
460
+ transcoded_request = _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_transcoded_request(
461
+ http_options, request
462
+ )
463
+
464
+ body = _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_request_body_json(
465
+ transcoded_request
466
+ )
467
+
468
+ # Jsonify the query params
469
+ query_params = _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_query_params_json(
470
+ transcoded_request
471
+ )
472
+
473
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
474
+ logging.DEBUG
475
+ ): # pragma: NO COVER
476
+ request_url = "{host}{uri}".format(
477
+ host=self._host, uri=transcoded_request["uri"]
478
+ )
479
+ method = transcoded_request["method"]
480
+ try:
481
+ request_payload = type(request).to_json(request)
482
+ except:
483
+ request_payload = None
484
+ http_request = {
485
+ "payload": request_payload,
486
+ "requestMethod": method,
487
+ "requestUrl": request_url,
488
+ "headers": dict(metadata),
489
+ }
490
+ _LOGGER.debug(
491
+ f"Sending request for google.ai.generativelanguage_v1beta2.DiscussServiceClient.GenerateMessage",
492
+ extra={
493
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
494
+ "rpcName": "GenerateMessage",
495
+ "httpRequest": http_request,
496
+ "metadata": http_request["headers"],
497
+ },
498
+ )
499
+
500
+ # Send the request
501
+ response = DiscussServiceRestTransport._GenerateMessage._get_response(
502
+ self._host,
503
+ metadata,
504
+ query_params,
505
+ self._session,
506
+ timeout,
507
+ transcoded_request,
508
+ body,
509
+ )
510
+
511
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
512
+ # subclass.
513
+ if response.status_code >= 400:
514
+ raise core_exceptions.from_http_response(response)
515
+
516
+ # Return the response
517
+ resp = discuss_service.GenerateMessageResponse()
518
+ pb_resp = discuss_service.GenerateMessageResponse.pb(resp)
519
+
520
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
521
+
522
+ resp = self._interceptor.post_generate_message(resp)
523
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
524
+ logging.DEBUG
525
+ ): # pragma: NO COVER
526
+ try:
527
+ response_payload = discuss_service.GenerateMessageResponse.to_json(
528
+ response
529
+ )
530
+ except:
531
+ response_payload = None
532
+ http_response = {
533
+ "payload": response_payload,
534
+ "headers": dict(response.headers),
535
+ "status": response.status_code,
536
+ }
537
+ _LOGGER.debug(
538
+ "Received response for google.ai.generativelanguage_v1beta2.DiscussServiceClient.generate_message",
539
+ extra={
540
+ "serviceName": "google.ai.generativelanguage.v1beta2.DiscussService",
541
+ "rpcName": "GenerateMessage",
542
+ "metadata": http_response["headers"],
543
+ "httpResponse": http_response,
544
+ },
545
+ )
546
+ return resp
547
+
548
+ @property
549
+ def count_message_tokens(
550
+ self,
551
+ ) -> Callable[
552
+ [discuss_service.CountMessageTokensRequest],
553
+ discuss_service.CountMessageTokensResponse,
554
+ ]:
555
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
556
+ # In C++ this would require a dynamic_cast
557
+ return self._CountMessageTokens(self._session, self._host, self._interceptor) # type: ignore
558
+
559
+ @property
560
+ def generate_message(
561
+ self,
562
+ ) -> Callable[
563
+ [discuss_service.GenerateMessageRequest],
564
+ discuss_service.GenerateMessageResponse,
565
+ ]:
566
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
567
+ # In C++ this would require a dynamic_cast
568
+ return self._GenerateMessage(self._session, self._host, self._interceptor) # type: ignore
569
+
570
+ @property
571
+ def kind(self) -> str:
572
+ return "rest"
573
+
574
+ def close(self):
575
+ self._session.close()
576
+
577
+
578
+ __all__ = ("DiscussServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta2/services/discuss_service/transports/rest_base.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json # type: ignore
17
+ import re
18
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
19
+
20
+ from google.api_core import gapic_v1, path_template
21
+ from google.protobuf import json_format
22
+
23
+ from google.ai.generativelanguage_v1beta2.types import discuss_service
24
+
25
+ from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
26
+
27
+
28
+ class _BaseDiscussServiceRestTransport(DiscussServiceTransport):
29
+ """Base REST backend transport for DiscussService.
30
+
31
+ Note: This class is not meant to be used directly. Use its sync and
32
+ async sub-classes instead.
33
+
34
+ This class defines the same methods as the primary client, so the
35
+ primary client can load the underlying transport implementation
36
+ and call it.
37
+
38
+ It sends JSON representations of protocol buffers over HTTP/1.1
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ *,
44
+ host: str = "generativelanguage.googleapis.com",
45
+ credentials: Optional[Any] = None,
46
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
47
+ always_use_jwt_access: Optional[bool] = False,
48
+ url_scheme: str = "https",
49
+ api_audience: Optional[str] = None,
50
+ ) -> None:
51
+ """Instantiate the transport.
52
+ Args:
53
+ host (Optional[str]):
54
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
55
+ credentials (Optional[Any]): The
56
+ authorization credentials to attach to requests. These
57
+ credentials identify the application to the service; if none
58
+ are specified, the client will attempt to ascertain the
59
+ credentials from the environment.
60
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
61
+ The client info used to send a user-agent string along with
62
+ API requests. If ``None``, then default info will be used.
63
+ Generally, you only need to set this if you are developing
64
+ your own client library.
65
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
66
+ be used for service account credentials.
67
+ url_scheme: the protocol scheme for the API endpoint. Normally
68
+ "https", but for testing or local servers,
69
+ "http" can be specified.
70
+ """
71
+ # Run the base constructor
72
+ maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
73
+ if maybe_url_match is None:
74
+ raise ValueError(
75
+ f"Unexpected hostname structure: {host}"
76
+ ) # pragma: NO COVER
77
+
78
+ url_match_items = maybe_url_match.groupdict()
79
+
80
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
81
+
82
+ super().__init__(
83
+ host=host,
84
+ credentials=credentials,
85
+ client_info=client_info,
86
+ always_use_jwt_access=always_use_jwt_access,
87
+ api_audience=api_audience,
88
+ )
89
+
90
+ class _BaseCountMessageTokens:
91
+ def __hash__(self): # pragma: NO COVER
92
+ return NotImplementedError("__hash__ must be implemented.")
93
+
94
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
95
+
96
+ @classmethod
97
+ def _get_unset_required_fields(cls, message_dict):
98
+ return {
99
+ k: v
100
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
101
+ if k not in message_dict
102
+ }
103
+
104
+ @staticmethod
105
+ def _get_http_options():
106
+ http_options: List[Dict[str, str]] = [
107
+ {
108
+ "method": "post",
109
+ "uri": "/v1beta2/{model=models/*}:countMessageTokens",
110
+ "body": "*",
111
+ },
112
+ ]
113
+ return http_options
114
+
115
+ @staticmethod
116
+ def _get_transcoded_request(http_options, request):
117
+ pb_request = discuss_service.CountMessageTokensRequest.pb(request)
118
+ transcoded_request = path_template.transcode(http_options, pb_request)
119
+ return transcoded_request
120
+
121
+ @staticmethod
122
+ def _get_request_body_json(transcoded_request):
123
+ # Jsonify the request body
124
+
125
+ body = json_format.MessageToJson(
126
+ transcoded_request["body"], use_integers_for_enums=True
127
+ )
128
+ return body
129
+
130
+ @staticmethod
131
+ def _get_query_params_json(transcoded_request):
132
+ query_params = json.loads(
133
+ json_format.MessageToJson(
134
+ transcoded_request["query_params"],
135
+ use_integers_for_enums=True,
136
+ )
137
+ )
138
+ query_params.update(
139
+ _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_unset_required_fields(
140
+ query_params
141
+ )
142
+ )
143
+
144
+ query_params["$alt"] = "json;enum-encoding=int"
145
+ return query_params
146
+
147
+ class _BaseGenerateMessage:
148
+ def __hash__(self): # pragma: NO COVER
149
+ return NotImplementedError("__hash__ must be implemented.")
150
+
151
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
152
+
153
+ @classmethod
154
+ def _get_unset_required_fields(cls, message_dict):
155
+ return {
156
+ k: v
157
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
158
+ if k not in message_dict
159
+ }
160
+
161
+ @staticmethod
162
+ def _get_http_options():
163
+ http_options: List[Dict[str, str]] = [
164
+ {
165
+ "method": "post",
166
+ "uri": "/v1beta2/{model=models/*}:generateMessage",
167
+ "body": "*",
168
+ },
169
+ ]
170
+ return http_options
171
+
172
+ @staticmethod
173
+ def _get_transcoded_request(http_options, request):
174
+ pb_request = discuss_service.GenerateMessageRequest.pb(request)
175
+ transcoded_request = path_template.transcode(http_options, pb_request)
176
+ return transcoded_request
177
+
178
+ @staticmethod
179
+ def _get_request_body_json(transcoded_request):
180
+ # Jsonify the request body
181
+
182
+ body = json_format.MessageToJson(
183
+ transcoded_request["body"], use_integers_for_enums=True
184
+ )
185
+ return body
186
+
187
+ @staticmethod
188
+ def _get_query_params_json(transcoded_request):
189
+ query_params = json.loads(
190
+ json_format.MessageToJson(
191
+ transcoded_request["query_params"],
192
+ use_integers_for_enums=True,
193
+ )
194
+ )
195
+ query_params.update(
196
+ _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_unset_required_fields(
197
+ query_params
198
+ )
199
+ )
200
+
201
+ query_params["$alt"] = "json;enum-encoding=int"
202
+ return query_params
203
+
204
+
205
+ __all__ = ("_BaseDiscussServiceRestTransport",)