koichi12 commited on
Commit
dae2552
·
verified ·
1 Parent(s): db1baab

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/__init__.py +413 -0
  2. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/gapic_metadata.json +1020 -0
  3. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/py.typed +2 -0
  4. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__init__.py +369 -0
  5. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/cached_content.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/discuss_service.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/file.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/generative_service.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/model_service.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/permission.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/retriever.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/safety.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/tuned_model.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/cache_service.py +167 -0
  15. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/content.py +819 -0
  16. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/model.py +171 -0
  17. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/model_service.py +332 -0
  18. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/prediction_service.py +79 -0
  19. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/retriever.py +411 -0
  20. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/retriever_service.py +793 -0
  21. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/text_service.py +441 -0
  22. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/tuned_model.py +542 -0
  23. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/__pycache__/__init__.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/__pycache__/gapic_version.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/__pycache__/__init__.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__init__.py +22 -0
  27. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__pycache__/__init__.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__pycache__/async_client.cpython-311.pyc +0 -0
  29. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__pycache__/client.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/async_client.py +630 -0
  31. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/client.py +1018 -0
  32. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__init__.py +36 -0
  33. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  35. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  38. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  39. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/base.py +182 -0
  40. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/grpc.py +395 -0
  41. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/grpc_asyncio.py +422 -0
  42. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/rest.py +579 -0
  43. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/rest_base.py +206 -0
  44. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/__init__.cpython-311.pyc +0 -0
  45. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/async_client.cpython-311.pyc +0 -0
  46. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/client.cpython-311.pyc +0 -0
  47. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/pagers.cpython-311.pyc +0 -0
  48. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/async_client.py +1156 -0
  49. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  50. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/__pycache__/__init__.cpython-311.pyc +0 -0
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/__init__.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
17
+
18
+ __version__ = package_version.__version__
19
+
20
+
21
+ from .services.cache_service import CacheServiceAsyncClient, CacheServiceClient
22
+ from .services.discuss_service import DiscussServiceAsyncClient, DiscussServiceClient
23
+ from .services.file_service import FileServiceAsyncClient, FileServiceClient
24
+ from .services.generative_service import (
25
+ GenerativeServiceAsyncClient,
26
+ GenerativeServiceClient,
27
+ )
28
+ from .services.model_service import ModelServiceAsyncClient, ModelServiceClient
29
+ from .services.permission_service import (
30
+ PermissionServiceAsyncClient,
31
+ PermissionServiceClient,
32
+ )
33
+ from .services.prediction_service import (
34
+ PredictionServiceAsyncClient,
35
+ PredictionServiceClient,
36
+ )
37
+ from .services.retriever_service import (
38
+ RetrieverServiceAsyncClient,
39
+ RetrieverServiceClient,
40
+ )
41
+ from .services.text_service import TextServiceAsyncClient, TextServiceClient
42
+ from .types.cache_service import (
43
+ CreateCachedContentRequest,
44
+ DeleteCachedContentRequest,
45
+ GetCachedContentRequest,
46
+ ListCachedContentsRequest,
47
+ ListCachedContentsResponse,
48
+ UpdateCachedContentRequest,
49
+ )
50
+ from .types.cached_content import CachedContent
51
+ from .types.citation import CitationMetadata, CitationSource
52
+ from .types.content import (
53
+ Blob,
54
+ CodeExecution,
55
+ CodeExecutionResult,
56
+ Content,
57
+ DynamicRetrievalConfig,
58
+ ExecutableCode,
59
+ FileData,
60
+ FunctionCall,
61
+ FunctionCallingConfig,
62
+ FunctionDeclaration,
63
+ FunctionResponse,
64
+ GoogleSearchRetrieval,
65
+ GroundingPassage,
66
+ GroundingPassages,
67
+ Part,
68
+ Schema,
69
+ Tool,
70
+ ToolConfig,
71
+ Type,
72
+ )
73
+ from .types.discuss_service import (
74
+ CountMessageTokensRequest,
75
+ CountMessageTokensResponse,
76
+ Example,
77
+ GenerateMessageRequest,
78
+ GenerateMessageResponse,
79
+ Message,
80
+ MessagePrompt,
81
+ )
82
+ from .types.file import File, VideoMetadata
83
+ from .types.file_service import (
84
+ CreateFileRequest,
85
+ CreateFileResponse,
86
+ DeleteFileRequest,
87
+ GetFileRequest,
88
+ ListFilesRequest,
89
+ ListFilesResponse,
90
+ )
91
+ from .types.generative_service import (
92
+ AttributionSourceId,
93
+ BatchEmbedContentsRequest,
94
+ BatchEmbedContentsResponse,
95
+ BidiGenerateContentClientContent,
96
+ BidiGenerateContentClientMessage,
97
+ BidiGenerateContentRealtimeInput,
98
+ BidiGenerateContentServerContent,
99
+ BidiGenerateContentServerMessage,
100
+ BidiGenerateContentSetup,
101
+ BidiGenerateContentSetupComplete,
102
+ BidiGenerateContentToolCall,
103
+ BidiGenerateContentToolCallCancellation,
104
+ BidiGenerateContentToolResponse,
105
+ Candidate,
106
+ ContentEmbedding,
107
+ CountTokensRequest,
108
+ CountTokensResponse,
109
+ EmbedContentRequest,
110
+ EmbedContentResponse,
111
+ GenerateAnswerRequest,
112
+ GenerateAnswerResponse,
113
+ GenerateContentRequest,
114
+ GenerateContentResponse,
115
+ GenerationConfig,
116
+ GroundingAttribution,
117
+ GroundingChunk,
118
+ GroundingMetadata,
119
+ GroundingSupport,
120
+ LogprobsResult,
121
+ PrebuiltVoiceConfig,
122
+ RetrievalMetadata,
123
+ SearchEntryPoint,
124
+ Segment,
125
+ SemanticRetrieverConfig,
126
+ SpeechConfig,
127
+ TaskType,
128
+ VoiceConfig,
129
+ )
130
+ from .types.model import Model
131
+ from .types.model_service import (
132
+ CreateTunedModelMetadata,
133
+ CreateTunedModelRequest,
134
+ DeleteTunedModelRequest,
135
+ GetModelRequest,
136
+ GetTunedModelRequest,
137
+ ListModelsRequest,
138
+ ListModelsResponse,
139
+ ListTunedModelsRequest,
140
+ ListTunedModelsResponse,
141
+ UpdateTunedModelRequest,
142
+ )
143
+ from .types.permission import Permission
144
+ from .types.permission_service import (
145
+ CreatePermissionRequest,
146
+ DeletePermissionRequest,
147
+ GetPermissionRequest,
148
+ ListPermissionsRequest,
149
+ ListPermissionsResponse,
150
+ TransferOwnershipRequest,
151
+ TransferOwnershipResponse,
152
+ UpdatePermissionRequest,
153
+ )
154
+ from .types.prediction_service import PredictRequest, PredictResponse
155
+ from .types.retriever import (
156
+ Chunk,
157
+ ChunkData,
158
+ Condition,
159
+ Corpus,
160
+ CustomMetadata,
161
+ Document,
162
+ MetadataFilter,
163
+ StringList,
164
+ )
165
+ from .types.retriever_service import (
166
+ BatchCreateChunksRequest,
167
+ BatchCreateChunksResponse,
168
+ BatchDeleteChunksRequest,
169
+ BatchUpdateChunksRequest,
170
+ BatchUpdateChunksResponse,
171
+ CreateChunkRequest,
172
+ CreateCorpusRequest,
173
+ CreateDocumentRequest,
174
+ DeleteChunkRequest,
175
+ DeleteCorpusRequest,
176
+ DeleteDocumentRequest,
177
+ GetChunkRequest,
178
+ GetCorpusRequest,
179
+ GetDocumentRequest,
180
+ ListChunksRequest,
181
+ ListChunksResponse,
182
+ ListCorporaRequest,
183
+ ListCorporaResponse,
184
+ ListDocumentsRequest,
185
+ ListDocumentsResponse,
186
+ QueryCorpusRequest,
187
+ QueryCorpusResponse,
188
+ QueryDocumentRequest,
189
+ QueryDocumentResponse,
190
+ RelevantChunk,
191
+ UpdateChunkRequest,
192
+ UpdateCorpusRequest,
193
+ UpdateDocumentRequest,
194
+ )
195
+ from .types.safety import (
196
+ ContentFilter,
197
+ HarmCategory,
198
+ SafetyFeedback,
199
+ SafetyRating,
200
+ SafetySetting,
201
+ )
202
+ from .types.text_service import (
203
+ BatchEmbedTextRequest,
204
+ BatchEmbedTextResponse,
205
+ CountTextTokensRequest,
206
+ CountTextTokensResponse,
207
+ Embedding,
208
+ EmbedTextRequest,
209
+ EmbedTextResponse,
210
+ GenerateTextRequest,
211
+ GenerateTextResponse,
212
+ TextCompletion,
213
+ TextPrompt,
214
+ )
215
+ from .types.tuned_model import (
216
+ Dataset,
217
+ Hyperparameters,
218
+ TunedModel,
219
+ TunedModelSource,
220
+ TuningContent,
221
+ TuningExample,
222
+ TuningExamples,
223
+ TuningMultiturnExample,
224
+ TuningPart,
225
+ TuningSnapshot,
226
+ TuningTask,
227
+ )
228
+
229
+ __all__ = (
230
+ "CacheServiceAsyncClient",
231
+ "DiscussServiceAsyncClient",
232
+ "FileServiceAsyncClient",
233
+ "GenerativeServiceAsyncClient",
234
+ "ModelServiceAsyncClient",
235
+ "PermissionServiceAsyncClient",
236
+ "PredictionServiceAsyncClient",
237
+ "RetrieverServiceAsyncClient",
238
+ "TextServiceAsyncClient",
239
+ "AttributionSourceId",
240
+ "BatchCreateChunksRequest",
241
+ "BatchCreateChunksResponse",
242
+ "BatchDeleteChunksRequest",
243
+ "BatchEmbedContentsRequest",
244
+ "BatchEmbedContentsResponse",
245
+ "BatchEmbedTextRequest",
246
+ "BatchEmbedTextResponse",
247
+ "BatchUpdateChunksRequest",
248
+ "BatchUpdateChunksResponse",
249
+ "BidiGenerateContentClientContent",
250
+ "BidiGenerateContentClientMessage",
251
+ "BidiGenerateContentRealtimeInput",
252
+ "BidiGenerateContentServerContent",
253
+ "BidiGenerateContentServerMessage",
254
+ "BidiGenerateContentSetup",
255
+ "BidiGenerateContentSetupComplete",
256
+ "BidiGenerateContentToolCall",
257
+ "BidiGenerateContentToolCallCancellation",
258
+ "BidiGenerateContentToolResponse",
259
+ "Blob",
260
+ "CacheServiceClient",
261
+ "CachedContent",
262
+ "Candidate",
263
+ "Chunk",
264
+ "ChunkData",
265
+ "CitationMetadata",
266
+ "CitationSource",
267
+ "CodeExecution",
268
+ "CodeExecutionResult",
269
+ "Condition",
270
+ "Content",
271
+ "ContentEmbedding",
272
+ "ContentFilter",
273
+ "Corpus",
274
+ "CountMessageTokensRequest",
275
+ "CountMessageTokensResponse",
276
+ "CountTextTokensRequest",
277
+ "CountTextTokensResponse",
278
+ "CountTokensRequest",
279
+ "CountTokensResponse",
280
+ "CreateCachedContentRequest",
281
+ "CreateChunkRequest",
282
+ "CreateCorpusRequest",
283
+ "CreateDocumentRequest",
284
+ "CreateFileRequest",
285
+ "CreateFileResponse",
286
+ "CreatePermissionRequest",
287
+ "CreateTunedModelMetadata",
288
+ "CreateTunedModelRequest",
289
+ "CustomMetadata",
290
+ "Dataset",
291
+ "DeleteCachedContentRequest",
292
+ "DeleteChunkRequest",
293
+ "DeleteCorpusRequest",
294
+ "DeleteDocumentRequest",
295
+ "DeleteFileRequest",
296
+ "DeletePermissionRequest",
297
+ "DeleteTunedModelRequest",
298
+ "DiscussServiceClient",
299
+ "Document",
300
+ "DynamicRetrievalConfig",
301
+ "EmbedContentRequest",
302
+ "EmbedContentResponse",
303
+ "EmbedTextRequest",
304
+ "EmbedTextResponse",
305
+ "Embedding",
306
+ "Example",
307
+ "ExecutableCode",
308
+ "File",
309
+ "FileData",
310
+ "FileServiceClient",
311
+ "FunctionCall",
312
+ "FunctionCallingConfig",
313
+ "FunctionDeclaration",
314
+ "FunctionResponse",
315
+ "GenerateAnswerRequest",
316
+ "GenerateAnswerResponse",
317
+ "GenerateContentRequest",
318
+ "GenerateContentResponse",
319
+ "GenerateMessageRequest",
320
+ "GenerateMessageResponse",
321
+ "GenerateTextRequest",
322
+ "GenerateTextResponse",
323
+ "GenerationConfig",
324
+ "GenerativeServiceClient",
325
+ "GetCachedContentRequest",
326
+ "GetChunkRequest",
327
+ "GetCorpusRequest",
328
+ "GetDocumentRequest",
329
+ "GetFileRequest",
330
+ "GetModelRequest",
331
+ "GetPermissionRequest",
332
+ "GetTunedModelRequest",
333
+ "GoogleSearchRetrieval",
334
+ "GroundingAttribution",
335
+ "GroundingChunk",
336
+ "GroundingMetadata",
337
+ "GroundingPassage",
338
+ "GroundingPassages",
339
+ "GroundingSupport",
340
+ "HarmCategory",
341
+ "Hyperparameters",
342
+ "ListCachedContentsRequest",
343
+ "ListCachedContentsResponse",
344
+ "ListChunksRequest",
345
+ "ListChunksResponse",
346
+ "ListCorporaRequest",
347
+ "ListCorporaResponse",
348
+ "ListDocumentsRequest",
349
+ "ListDocumentsResponse",
350
+ "ListFilesRequest",
351
+ "ListFilesResponse",
352
+ "ListModelsRequest",
353
+ "ListModelsResponse",
354
+ "ListPermissionsRequest",
355
+ "ListPermissionsResponse",
356
+ "ListTunedModelsRequest",
357
+ "ListTunedModelsResponse",
358
+ "LogprobsResult",
359
+ "Message",
360
+ "MessagePrompt",
361
+ "MetadataFilter",
362
+ "Model",
363
+ "ModelServiceClient",
364
+ "Part",
365
+ "Permission",
366
+ "PermissionServiceClient",
367
+ "PrebuiltVoiceConfig",
368
+ "PredictRequest",
369
+ "PredictResponse",
370
+ "PredictionServiceClient",
371
+ "QueryCorpusRequest",
372
+ "QueryCorpusResponse",
373
+ "QueryDocumentRequest",
374
+ "QueryDocumentResponse",
375
+ "RelevantChunk",
376
+ "RetrievalMetadata",
377
+ "RetrieverServiceClient",
378
+ "SafetyFeedback",
379
+ "SafetyRating",
380
+ "SafetySetting",
381
+ "Schema",
382
+ "SearchEntryPoint",
383
+ "Segment",
384
+ "SemanticRetrieverConfig",
385
+ "SpeechConfig",
386
+ "StringList",
387
+ "TaskType",
388
+ "TextCompletion",
389
+ "TextPrompt",
390
+ "TextServiceClient",
391
+ "Tool",
392
+ "ToolConfig",
393
+ "TransferOwnershipRequest",
394
+ "TransferOwnershipResponse",
395
+ "TunedModel",
396
+ "TunedModelSource",
397
+ "TuningContent",
398
+ "TuningExample",
399
+ "TuningExamples",
400
+ "TuningMultiturnExample",
401
+ "TuningPart",
402
+ "TuningSnapshot",
403
+ "TuningTask",
404
+ "Type",
405
+ "UpdateCachedContentRequest",
406
+ "UpdateChunkRequest",
407
+ "UpdateCorpusRequest",
408
+ "UpdateDocumentRequest",
409
+ "UpdatePermissionRequest",
410
+ "UpdateTunedModelRequest",
411
+ "VideoMetadata",
412
+ "VoiceConfig",
413
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/gapic_metadata.json ADDED
@@ -0,0 +1,1020 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods",
3
+ "language": "python",
4
+ "libraryPackage": "google.ai.generativelanguage_v1alpha",
5
+ "protoPackage": "google.ai.generativelanguage.v1alpha",
6
+ "schema": "1.0",
7
+ "services": {
8
+ "CacheService": {
9
+ "clients": {
10
+ "grpc": {
11
+ "libraryClient": "CacheServiceClient",
12
+ "rpcs": {
13
+ "CreateCachedContent": {
14
+ "methods": [
15
+ "create_cached_content"
16
+ ]
17
+ },
18
+ "DeleteCachedContent": {
19
+ "methods": [
20
+ "delete_cached_content"
21
+ ]
22
+ },
23
+ "GetCachedContent": {
24
+ "methods": [
25
+ "get_cached_content"
26
+ ]
27
+ },
28
+ "ListCachedContents": {
29
+ "methods": [
30
+ "list_cached_contents"
31
+ ]
32
+ },
33
+ "UpdateCachedContent": {
34
+ "methods": [
35
+ "update_cached_content"
36
+ ]
37
+ }
38
+ }
39
+ },
40
+ "grpc-async": {
41
+ "libraryClient": "CacheServiceAsyncClient",
42
+ "rpcs": {
43
+ "CreateCachedContent": {
44
+ "methods": [
45
+ "create_cached_content"
46
+ ]
47
+ },
48
+ "DeleteCachedContent": {
49
+ "methods": [
50
+ "delete_cached_content"
51
+ ]
52
+ },
53
+ "GetCachedContent": {
54
+ "methods": [
55
+ "get_cached_content"
56
+ ]
57
+ },
58
+ "ListCachedContents": {
59
+ "methods": [
60
+ "list_cached_contents"
61
+ ]
62
+ },
63
+ "UpdateCachedContent": {
64
+ "methods": [
65
+ "update_cached_content"
66
+ ]
67
+ }
68
+ }
69
+ },
70
+ "rest": {
71
+ "libraryClient": "CacheServiceClient",
72
+ "rpcs": {
73
+ "CreateCachedContent": {
74
+ "methods": [
75
+ "create_cached_content"
76
+ ]
77
+ },
78
+ "DeleteCachedContent": {
79
+ "methods": [
80
+ "delete_cached_content"
81
+ ]
82
+ },
83
+ "GetCachedContent": {
84
+ "methods": [
85
+ "get_cached_content"
86
+ ]
87
+ },
88
+ "ListCachedContents": {
89
+ "methods": [
90
+ "list_cached_contents"
91
+ ]
92
+ },
93
+ "UpdateCachedContent": {
94
+ "methods": [
95
+ "update_cached_content"
96
+ ]
97
+ }
98
+ }
99
+ }
100
+ }
101
+ },
102
+ "DiscussService": {
103
+ "clients": {
104
+ "grpc": {
105
+ "libraryClient": "DiscussServiceClient",
106
+ "rpcs": {
107
+ "CountMessageTokens": {
108
+ "methods": [
109
+ "count_message_tokens"
110
+ ]
111
+ },
112
+ "GenerateMessage": {
113
+ "methods": [
114
+ "generate_message"
115
+ ]
116
+ }
117
+ }
118
+ },
119
+ "grpc-async": {
120
+ "libraryClient": "DiscussServiceAsyncClient",
121
+ "rpcs": {
122
+ "CountMessageTokens": {
123
+ "methods": [
124
+ "count_message_tokens"
125
+ ]
126
+ },
127
+ "GenerateMessage": {
128
+ "methods": [
129
+ "generate_message"
130
+ ]
131
+ }
132
+ }
133
+ },
134
+ "rest": {
135
+ "libraryClient": "DiscussServiceClient",
136
+ "rpcs": {
137
+ "CountMessageTokens": {
138
+ "methods": [
139
+ "count_message_tokens"
140
+ ]
141
+ },
142
+ "GenerateMessage": {
143
+ "methods": [
144
+ "generate_message"
145
+ ]
146
+ }
147
+ }
148
+ }
149
+ }
150
+ },
151
+ "FileService": {
152
+ "clients": {
153
+ "grpc": {
154
+ "libraryClient": "FileServiceClient",
155
+ "rpcs": {
156
+ "CreateFile": {
157
+ "methods": [
158
+ "create_file"
159
+ ]
160
+ },
161
+ "DeleteFile": {
162
+ "methods": [
163
+ "delete_file"
164
+ ]
165
+ },
166
+ "GetFile": {
167
+ "methods": [
168
+ "get_file"
169
+ ]
170
+ },
171
+ "ListFiles": {
172
+ "methods": [
173
+ "list_files"
174
+ ]
175
+ }
176
+ }
177
+ },
178
+ "grpc-async": {
179
+ "libraryClient": "FileServiceAsyncClient",
180
+ "rpcs": {
181
+ "CreateFile": {
182
+ "methods": [
183
+ "create_file"
184
+ ]
185
+ },
186
+ "DeleteFile": {
187
+ "methods": [
188
+ "delete_file"
189
+ ]
190
+ },
191
+ "GetFile": {
192
+ "methods": [
193
+ "get_file"
194
+ ]
195
+ },
196
+ "ListFiles": {
197
+ "methods": [
198
+ "list_files"
199
+ ]
200
+ }
201
+ }
202
+ },
203
+ "rest": {
204
+ "libraryClient": "FileServiceClient",
205
+ "rpcs": {
206
+ "CreateFile": {
207
+ "methods": [
208
+ "create_file"
209
+ ]
210
+ },
211
+ "DeleteFile": {
212
+ "methods": [
213
+ "delete_file"
214
+ ]
215
+ },
216
+ "GetFile": {
217
+ "methods": [
218
+ "get_file"
219
+ ]
220
+ },
221
+ "ListFiles": {
222
+ "methods": [
223
+ "list_files"
224
+ ]
225
+ }
226
+ }
227
+ }
228
+ }
229
+ },
230
+ "GenerativeService": {
231
+ "clients": {
232
+ "grpc": {
233
+ "libraryClient": "GenerativeServiceClient",
234
+ "rpcs": {
235
+ "BatchEmbedContents": {
236
+ "methods": [
237
+ "batch_embed_contents"
238
+ ]
239
+ },
240
+ "BidiGenerateContent": {
241
+ "methods": [
242
+ "bidi_generate_content"
243
+ ]
244
+ },
245
+ "CountTokens": {
246
+ "methods": [
247
+ "count_tokens"
248
+ ]
249
+ },
250
+ "EmbedContent": {
251
+ "methods": [
252
+ "embed_content"
253
+ ]
254
+ },
255
+ "GenerateAnswer": {
256
+ "methods": [
257
+ "generate_answer"
258
+ ]
259
+ },
260
+ "GenerateContent": {
261
+ "methods": [
262
+ "generate_content"
263
+ ]
264
+ },
265
+ "StreamGenerateContent": {
266
+ "methods": [
267
+ "stream_generate_content"
268
+ ]
269
+ }
270
+ }
271
+ },
272
+ "grpc-async": {
273
+ "libraryClient": "GenerativeServiceAsyncClient",
274
+ "rpcs": {
275
+ "BatchEmbedContents": {
276
+ "methods": [
277
+ "batch_embed_contents"
278
+ ]
279
+ },
280
+ "BidiGenerateContent": {
281
+ "methods": [
282
+ "bidi_generate_content"
283
+ ]
284
+ },
285
+ "CountTokens": {
286
+ "methods": [
287
+ "count_tokens"
288
+ ]
289
+ },
290
+ "EmbedContent": {
291
+ "methods": [
292
+ "embed_content"
293
+ ]
294
+ },
295
+ "GenerateAnswer": {
296
+ "methods": [
297
+ "generate_answer"
298
+ ]
299
+ },
300
+ "GenerateContent": {
301
+ "methods": [
302
+ "generate_content"
303
+ ]
304
+ },
305
+ "StreamGenerateContent": {
306
+ "methods": [
307
+ "stream_generate_content"
308
+ ]
309
+ }
310
+ }
311
+ },
312
+ "rest": {
313
+ "libraryClient": "GenerativeServiceClient",
314
+ "rpcs": {
315
+ "BatchEmbedContents": {
316
+ "methods": [
317
+ "batch_embed_contents"
318
+ ]
319
+ },
320
+ "BidiGenerateContent": {
321
+ "methods": [
322
+ "bidi_generate_content"
323
+ ]
324
+ },
325
+ "CountTokens": {
326
+ "methods": [
327
+ "count_tokens"
328
+ ]
329
+ },
330
+ "EmbedContent": {
331
+ "methods": [
332
+ "embed_content"
333
+ ]
334
+ },
335
+ "GenerateAnswer": {
336
+ "methods": [
337
+ "generate_answer"
338
+ ]
339
+ },
340
+ "GenerateContent": {
341
+ "methods": [
342
+ "generate_content"
343
+ ]
344
+ },
345
+ "StreamGenerateContent": {
346
+ "methods": [
347
+ "stream_generate_content"
348
+ ]
349
+ }
350
+ }
351
+ }
352
+ }
353
+ },
354
+ "ModelService": {
355
+ "clients": {
356
+ "grpc": {
357
+ "libraryClient": "ModelServiceClient",
358
+ "rpcs": {
359
+ "CreateTunedModel": {
360
+ "methods": [
361
+ "create_tuned_model"
362
+ ]
363
+ },
364
+ "DeleteTunedModel": {
365
+ "methods": [
366
+ "delete_tuned_model"
367
+ ]
368
+ },
369
+ "GetModel": {
370
+ "methods": [
371
+ "get_model"
372
+ ]
373
+ },
374
+ "GetTunedModel": {
375
+ "methods": [
376
+ "get_tuned_model"
377
+ ]
378
+ },
379
+ "ListModels": {
380
+ "methods": [
381
+ "list_models"
382
+ ]
383
+ },
384
+ "ListTunedModels": {
385
+ "methods": [
386
+ "list_tuned_models"
387
+ ]
388
+ },
389
+ "UpdateTunedModel": {
390
+ "methods": [
391
+ "update_tuned_model"
392
+ ]
393
+ }
394
+ }
395
+ },
396
+ "grpc-async": {
397
+ "libraryClient": "ModelServiceAsyncClient",
398
+ "rpcs": {
399
+ "CreateTunedModel": {
400
+ "methods": [
401
+ "create_tuned_model"
402
+ ]
403
+ },
404
+ "DeleteTunedModel": {
405
+ "methods": [
406
+ "delete_tuned_model"
407
+ ]
408
+ },
409
+ "GetModel": {
410
+ "methods": [
411
+ "get_model"
412
+ ]
413
+ },
414
+ "GetTunedModel": {
415
+ "methods": [
416
+ "get_tuned_model"
417
+ ]
418
+ },
419
+ "ListModels": {
420
+ "methods": [
421
+ "list_models"
422
+ ]
423
+ },
424
+ "ListTunedModels": {
425
+ "methods": [
426
+ "list_tuned_models"
427
+ ]
428
+ },
429
+ "UpdateTunedModel": {
430
+ "methods": [
431
+ "update_tuned_model"
432
+ ]
433
+ }
434
+ }
435
+ },
436
+ "rest": {
437
+ "libraryClient": "ModelServiceClient",
438
+ "rpcs": {
439
+ "CreateTunedModel": {
440
+ "methods": [
441
+ "create_tuned_model"
442
+ ]
443
+ },
444
+ "DeleteTunedModel": {
445
+ "methods": [
446
+ "delete_tuned_model"
447
+ ]
448
+ },
449
+ "GetModel": {
450
+ "methods": [
451
+ "get_model"
452
+ ]
453
+ },
454
+ "GetTunedModel": {
455
+ "methods": [
456
+ "get_tuned_model"
457
+ ]
458
+ },
459
+ "ListModels": {
460
+ "methods": [
461
+ "list_models"
462
+ ]
463
+ },
464
+ "ListTunedModels": {
465
+ "methods": [
466
+ "list_tuned_models"
467
+ ]
468
+ },
469
+ "UpdateTunedModel": {
470
+ "methods": [
471
+ "update_tuned_model"
472
+ ]
473
+ }
474
+ }
475
+ }
476
+ }
477
+ },
478
+ "PermissionService": {
479
+ "clients": {
480
+ "grpc": {
481
+ "libraryClient": "PermissionServiceClient",
482
+ "rpcs": {
483
+ "CreatePermission": {
484
+ "methods": [
485
+ "create_permission"
486
+ ]
487
+ },
488
+ "DeletePermission": {
489
+ "methods": [
490
+ "delete_permission"
491
+ ]
492
+ },
493
+ "GetPermission": {
494
+ "methods": [
495
+ "get_permission"
496
+ ]
497
+ },
498
+ "ListPermissions": {
499
+ "methods": [
500
+ "list_permissions"
501
+ ]
502
+ },
503
+ "TransferOwnership": {
504
+ "methods": [
505
+ "transfer_ownership"
506
+ ]
507
+ },
508
+ "UpdatePermission": {
509
+ "methods": [
510
+ "update_permission"
511
+ ]
512
+ }
513
+ }
514
+ },
515
+ "grpc-async": {
516
+ "libraryClient": "PermissionServiceAsyncClient",
517
+ "rpcs": {
518
+ "CreatePermission": {
519
+ "methods": [
520
+ "create_permission"
521
+ ]
522
+ },
523
+ "DeletePermission": {
524
+ "methods": [
525
+ "delete_permission"
526
+ ]
527
+ },
528
+ "GetPermission": {
529
+ "methods": [
530
+ "get_permission"
531
+ ]
532
+ },
533
+ "ListPermissions": {
534
+ "methods": [
535
+ "list_permissions"
536
+ ]
537
+ },
538
+ "TransferOwnership": {
539
+ "methods": [
540
+ "transfer_ownership"
541
+ ]
542
+ },
543
+ "UpdatePermission": {
544
+ "methods": [
545
+ "update_permission"
546
+ ]
547
+ }
548
+ }
549
+ },
550
+ "rest": {
551
+ "libraryClient": "PermissionServiceClient",
552
+ "rpcs": {
553
+ "CreatePermission": {
554
+ "methods": [
555
+ "create_permission"
556
+ ]
557
+ },
558
+ "DeletePermission": {
559
+ "methods": [
560
+ "delete_permission"
561
+ ]
562
+ },
563
+ "GetPermission": {
564
+ "methods": [
565
+ "get_permission"
566
+ ]
567
+ },
568
+ "ListPermissions": {
569
+ "methods": [
570
+ "list_permissions"
571
+ ]
572
+ },
573
+ "TransferOwnership": {
574
+ "methods": [
575
+ "transfer_ownership"
576
+ ]
577
+ },
578
+ "UpdatePermission": {
579
+ "methods": [
580
+ "update_permission"
581
+ ]
582
+ }
583
+ }
584
+ }
585
+ }
586
+ },
587
+ "PredictionService": {
588
+ "clients": {
589
+ "grpc": {
590
+ "libraryClient": "PredictionServiceClient",
591
+ "rpcs": {
592
+ "Predict": {
593
+ "methods": [
594
+ "predict"
595
+ ]
596
+ }
597
+ }
598
+ },
599
+ "grpc-async": {
600
+ "libraryClient": "PredictionServiceAsyncClient",
601
+ "rpcs": {
602
+ "Predict": {
603
+ "methods": [
604
+ "predict"
605
+ ]
606
+ }
607
+ }
608
+ },
609
+ "rest": {
610
+ "libraryClient": "PredictionServiceClient",
611
+ "rpcs": {
612
+ "Predict": {
613
+ "methods": [
614
+ "predict"
615
+ ]
616
+ }
617
+ }
618
+ }
619
+ }
620
+ },
621
+ "RetrieverService": {
622
+ "clients": {
623
+ "grpc": {
624
+ "libraryClient": "RetrieverServiceClient",
625
+ "rpcs": {
626
+ "BatchCreateChunks": {
627
+ "methods": [
628
+ "batch_create_chunks"
629
+ ]
630
+ },
631
+ "BatchDeleteChunks": {
632
+ "methods": [
633
+ "batch_delete_chunks"
634
+ ]
635
+ },
636
+ "BatchUpdateChunks": {
637
+ "methods": [
638
+ "batch_update_chunks"
639
+ ]
640
+ },
641
+ "CreateChunk": {
642
+ "methods": [
643
+ "create_chunk"
644
+ ]
645
+ },
646
+ "CreateCorpus": {
647
+ "methods": [
648
+ "create_corpus"
649
+ ]
650
+ },
651
+ "CreateDocument": {
652
+ "methods": [
653
+ "create_document"
654
+ ]
655
+ },
656
+ "DeleteChunk": {
657
+ "methods": [
658
+ "delete_chunk"
659
+ ]
660
+ },
661
+ "DeleteCorpus": {
662
+ "methods": [
663
+ "delete_corpus"
664
+ ]
665
+ },
666
+ "DeleteDocument": {
667
+ "methods": [
668
+ "delete_document"
669
+ ]
670
+ },
671
+ "GetChunk": {
672
+ "methods": [
673
+ "get_chunk"
674
+ ]
675
+ },
676
+ "GetCorpus": {
677
+ "methods": [
678
+ "get_corpus"
679
+ ]
680
+ },
681
+ "GetDocument": {
682
+ "methods": [
683
+ "get_document"
684
+ ]
685
+ },
686
+ "ListChunks": {
687
+ "methods": [
688
+ "list_chunks"
689
+ ]
690
+ },
691
+ "ListCorpora": {
692
+ "methods": [
693
+ "list_corpora"
694
+ ]
695
+ },
696
+ "ListDocuments": {
697
+ "methods": [
698
+ "list_documents"
699
+ ]
700
+ },
701
+ "QueryCorpus": {
702
+ "methods": [
703
+ "query_corpus"
704
+ ]
705
+ },
706
+ "QueryDocument": {
707
+ "methods": [
708
+ "query_document"
709
+ ]
710
+ },
711
+ "UpdateChunk": {
712
+ "methods": [
713
+ "update_chunk"
714
+ ]
715
+ },
716
+ "UpdateCorpus": {
717
+ "methods": [
718
+ "update_corpus"
719
+ ]
720
+ },
721
+ "UpdateDocument": {
722
+ "methods": [
723
+ "update_document"
724
+ ]
725
+ }
726
+ }
727
+ },
728
+ "grpc-async": {
729
+ "libraryClient": "RetrieverServiceAsyncClient",
730
+ "rpcs": {
731
+ "BatchCreateChunks": {
732
+ "methods": [
733
+ "batch_create_chunks"
734
+ ]
735
+ },
736
+ "BatchDeleteChunks": {
737
+ "methods": [
738
+ "batch_delete_chunks"
739
+ ]
740
+ },
741
+ "BatchUpdateChunks": {
742
+ "methods": [
743
+ "batch_update_chunks"
744
+ ]
745
+ },
746
+ "CreateChunk": {
747
+ "methods": [
748
+ "create_chunk"
749
+ ]
750
+ },
751
+ "CreateCorpus": {
752
+ "methods": [
753
+ "create_corpus"
754
+ ]
755
+ },
756
+ "CreateDocument": {
757
+ "methods": [
758
+ "create_document"
759
+ ]
760
+ },
761
+ "DeleteChunk": {
762
+ "methods": [
763
+ "delete_chunk"
764
+ ]
765
+ },
766
+ "DeleteCorpus": {
767
+ "methods": [
768
+ "delete_corpus"
769
+ ]
770
+ },
771
+ "DeleteDocument": {
772
+ "methods": [
773
+ "delete_document"
774
+ ]
775
+ },
776
+ "GetChunk": {
777
+ "methods": [
778
+ "get_chunk"
779
+ ]
780
+ },
781
+ "GetCorpus": {
782
+ "methods": [
783
+ "get_corpus"
784
+ ]
785
+ },
786
+ "GetDocument": {
787
+ "methods": [
788
+ "get_document"
789
+ ]
790
+ },
791
+ "ListChunks": {
792
+ "methods": [
793
+ "list_chunks"
794
+ ]
795
+ },
796
+ "ListCorpora": {
797
+ "methods": [
798
+ "list_corpora"
799
+ ]
800
+ },
801
+ "ListDocuments": {
802
+ "methods": [
803
+ "list_documents"
804
+ ]
805
+ },
806
+ "QueryCorpus": {
807
+ "methods": [
808
+ "query_corpus"
809
+ ]
810
+ },
811
+ "QueryDocument": {
812
+ "methods": [
813
+ "query_document"
814
+ ]
815
+ },
816
+ "UpdateChunk": {
817
+ "methods": [
818
+ "update_chunk"
819
+ ]
820
+ },
821
+ "UpdateCorpus": {
822
+ "methods": [
823
+ "update_corpus"
824
+ ]
825
+ },
826
+ "UpdateDocument": {
827
+ "methods": [
828
+ "update_document"
829
+ ]
830
+ }
831
+ }
832
+ },
833
+ "rest": {
834
+ "libraryClient": "RetrieverServiceClient",
835
+ "rpcs": {
836
+ "BatchCreateChunks": {
837
+ "methods": [
838
+ "batch_create_chunks"
839
+ ]
840
+ },
841
+ "BatchDeleteChunks": {
842
+ "methods": [
843
+ "batch_delete_chunks"
844
+ ]
845
+ },
846
+ "BatchUpdateChunks": {
847
+ "methods": [
848
+ "batch_update_chunks"
849
+ ]
850
+ },
851
+ "CreateChunk": {
852
+ "methods": [
853
+ "create_chunk"
854
+ ]
855
+ },
856
+ "CreateCorpus": {
857
+ "methods": [
858
+ "create_corpus"
859
+ ]
860
+ },
861
+ "CreateDocument": {
862
+ "methods": [
863
+ "create_document"
864
+ ]
865
+ },
866
+ "DeleteChunk": {
867
+ "methods": [
868
+ "delete_chunk"
869
+ ]
870
+ },
871
+ "DeleteCorpus": {
872
+ "methods": [
873
+ "delete_corpus"
874
+ ]
875
+ },
876
+ "DeleteDocument": {
877
+ "methods": [
878
+ "delete_document"
879
+ ]
880
+ },
881
+ "GetChunk": {
882
+ "methods": [
883
+ "get_chunk"
884
+ ]
885
+ },
886
+ "GetCorpus": {
887
+ "methods": [
888
+ "get_corpus"
889
+ ]
890
+ },
891
+ "GetDocument": {
892
+ "methods": [
893
+ "get_document"
894
+ ]
895
+ },
896
+ "ListChunks": {
897
+ "methods": [
898
+ "list_chunks"
899
+ ]
900
+ },
901
+ "ListCorpora": {
902
+ "methods": [
903
+ "list_corpora"
904
+ ]
905
+ },
906
+ "ListDocuments": {
907
+ "methods": [
908
+ "list_documents"
909
+ ]
910
+ },
911
+ "QueryCorpus": {
912
+ "methods": [
913
+ "query_corpus"
914
+ ]
915
+ },
916
+ "QueryDocument": {
917
+ "methods": [
918
+ "query_document"
919
+ ]
920
+ },
921
+ "UpdateChunk": {
922
+ "methods": [
923
+ "update_chunk"
924
+ ]
925
+ },
926
+ "UpdateCorpus": {
927
+ "methods": [
928
+ "update_corpus"
929
+ ]
930
+ },
931
+ "UpdateDocument": {
932
+ "methods": [
933
+ "update_document"
934
+ ]
935
+ }
936
+ }
937
+ }
938
+ }
939
+ },
940
+ "TextService": {
941
+ "clients": {
942
+ "grpc": {
943
+ "libraryClient": "TextServiceClient",
944
+ "rpcs": {
945
+ "BatchEmbedText": {
946
+ "methods": [
947
+ "batch_embed_text"
948
+ ]
949
+ },
950
+ "CountTextTokens": {
951
+ "methods": [
952
+ "count_text_tokens"
953
+ ]
954
+ },
955
+ "EmbedText": {
956
+ "methods": [
957
+ "embed_text"
958
+ ]
959
+ },
960
+ "GenerateText": {
961
+ "methods": [
962
+ "generate_text"
963
+ ]
964
+ }
965
+ }
966
+ },
967
+ "grpc-async": {
968
+ "libraryClient": "TextServiceAsyncClient",
969
+ "rpcs": {
970
+ "BatchEmbedText": {
971
+ "methods": [
972
+ "batch_embed_text"
973
+ ]
974
+ },
975
+ "CountTextTokens": {
976
+ "methods": [
977
+ "count_text_tokens"
978
+ ]
979
+ },
980
+ "EmbedText": {
981
+ "methods": [
982
+ "embed_text"
983
+ ]
984
+ },
985
+ "GenerateText": {
986
+ "methods": [
987
+ "generate_text"
988
+ ]
989
+ }
990
+ }
991
+ },
992
+ "rest": {
993
+ "libraryClient": "TextServiceClient",
994
+ "rpcs": {
995
+ "BatchEmbedText": {
996
+ "methods": [
997
+ "batch_embed_text"
998
+ ]
999
+ },
1000
+ "CountTextTokens": {
1001
+ "methods": [
1002
+ "count_text_tokens"
1003
+ ]
1004
+ },
1005
+ "EmbedText": {
1006
+ "methods": [
1007
+ "embed_text"
1008
+ ]
1009
+ },
1010
+ "GenerateText": {
1011
+ "methods": [
1012
+ "generate_text"
1013
+ ]
1014
+ }
1015
+ }
1016
+ }
1017
+ }
1018
+ }
1019
+ }
1020
+ }
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/py.typed ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Marker file for PEP 561.
2
+ # The google-ai-generativelanguage package uses inline types.
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__init__.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .cache_service import (
17
+ CreateCachedContentRequest,
18
+ DeleteCachedContentRequest,
19
+ GetCachedContentRequest,
20
+ ListCachedContentsRequest,
21
+ ListCachedContentsResponse,
22
+ UpdateCachedContentRequest,
23
+ )
24
+ from .cached_content import CachedContent
25
+ from .citation import CitationMetadata, CitationSource
26
+ from .content import (
27
+ Blob,
28
+ CodeExecution,
29
+ CodeExecutionResult,
30
+ Content,
31
+ DynamicRetrievalConfig,
32
+ ExecutableCode,
33
+ FileData,
34
+ FunctionCall,
35
+ FunctionCallingConfig,
36
+ FunctionDeclaration,
37
+ FunctionResponse,
38
+ GoogleSearchRetrieval,
39
+ GroundingPassage,
40
+ GroundingPassages,
41
+ Part,
42
+ Schema,
43
+ Tool,
44
+ ToolConfig,
45
+ Type,
46
+ )
47
+ from .discuss_service import (
48
+ CountMessageTokensRequest,
49
+ CountMessageTokensResponse,
50
+ Example,
51
+ GenerateMessageRequest,
52
+ GenerateMessageResponse,
53
+ Message,
54
+ MessagePrompt,
55
+ )
56
+ from .file import File, VideoMetadata
57
+ from .file_service import (
58
+ CreateFileRequest,
59
+ CreateFileResponse,
60
+ DeleteFileRequest,
61
+ GetFileRequest,
62
+ ListFilesRequest,
63
+ ListFilesResponse,
64
+ )
65
+ from .generative_service import (
66
+ AttributionSourceId,
67
+ BatchEmbedContentsRequest,
68
+ BatchEmbedContentsResponse,
69
+ BidiGenerateContentClientContent,
70
+ BidiGenerateContentClientMessage,
71
+ BidiGenerateContentRealtimeInput,
72
+ BidiGenerateContentServerContent,
73
+ BidiGenerateContentServerMessage,
74
+ BidiGenerateContentSetup,
75
+ BidiGenerateContentSetupComplete,
76
+ BidiGenerateContentToolCall,
77
+ BidiGenerateContentToolCallCancellation,
78
+ BidiGenerateContentToolResponse,
79
+ Candidate,
80
+ ContentEmbedding,
81
+ CountTokensRequest,
82
+ CountTokensResponse,
83
+ EmbedContentRequest,
84
+ EmbedContentResponse,
85
+ GenerateAnswerRequest,
86
+ GenerateAnswerResponse,
87
+ GenerateContentRequest,
88
+ GenerateContentResponse,
89
+ GenerationConfig,
90
+ GroundingAttribution,
91
+ GroundingChunk,
92
+ GroundingMetadata,
93
+ GroundingSupport,
94
+ LogprobsResult,
95
+ PrebuiltVoiceConfig,
96
+ RetrievalMetadata,
97
+ SearchEntryPoint,
98
+ Segment,
99
+ SemanticRetrieverConfig,
100
+ SpeechConfig,
101
+ TaskType,
102
+ VoiceConfig,
103
+ )
104
+ from .model import Model
105
+ from .model_service import (
106
+ CreateTunedModelMetadata,
107
+ CreateTunedModelRequest,
108
+ DeleteTunedModelRequest,
109
+ GetModelRequest,
110
+ GetTunedModelRequest,
111
+ ListModelsRequest,
112
+ ListModelsResponse,
113
+ ListTunedModelsRequest,
114
+ ListTunedModelsResponse,
115
+ UpdateTunedModelRequest,
116
+ )
117
+ from .permission import Permission
118
+ from .permission_service import (
119
+ CreatePermissionRequest,
120
+ DeletePermissionRequest,
121
+ GetPermissionRequest,
122
+ ListPermissionsRequest,
123
+ ListPermissionsResponse,
124
+ TransferOwnershipRequest,
125
+ TransferOwnershipResponse,
126
+ UpdatePermissionRequest,
127
+ )
128
+ from .prediction_service import PredictRequest, PredictResponse
129
+ from .retriever import (
130
+ Chunk,
131
+ ChunkData,
132
+ Condition,
133
+ Corpus,
134
+ CustomMetadata,
135
+ Document,
136
+ MetadataFilter,
137
+ StringList,
138
+ )
139
+ from .retriever_service import (
140
+ BatchCreateChunksRequest,
141
+ BatchCreateChunksResponse,
142
+ BatchDeleteChunksRequest,
143
+ BatchUpdateChunksRequest,
144
+ BatchUpdateChunksResponse,
145
+ CreateChunkRequest,
146
+ CreateCorpusRequest,
147
+ CreateDocumentRequest,
148
+ DeleteChunkRequest,
149
+ DeleteCorpusRequest,
150
+ DeleteDocumentRequest,
151
+ GetChunkRequest,
152
+ GetCorpusRequest,
153
+ GetDocumentRequest,
154
+ ListChunksRequest,
155
+ ListChunksResponse,
156
+ ListCorporaRequest,
157
+ ListCorporaResponse,
158
+ ListDocumentsRequest,
159
+ ListDocumentsResponse,
160
+ QueryCorpusRequest,
161
+ QueryCorpusResponse,
162
+ QueryDocumentRequest,
163
+ QueryDocumentResponse,
164
+ RelevantChunk,
165
+ UpdateChunkRequest,
166
+ UpdateCorpusRequest,
167
+ UpdateDocumentRequest,
168
+ )
169
+ from .safety import (
170
+ ContentFilter,
171
+ HarmCategory,
172
+ SafetyFeedback,
173
+ SafetyRating,
174
+ SafetySetting,
175
+ )
176
+ from .text_service import (
177
+ BatchEmbedTextRequest,
178
+ BatchEmbedTextResponse,
179
+ CountTextTokensRequest,
180
+ CountTextTokensResponse,
181
+ Embedding,
182
+ EmbedTextRequest,
183
+ EmbedTextResponse,
184
+ GenerateTextRequest,
185
+ GenerateTextResponse,
186
+ TextCompletion,
187
+ TextPrompt,
188
+ )
189
+ from .tuned_model import (
190
+ Dataset,
191
+ Hyperparameters,
192
+ TunedModel,
193
+ TunedModelSource,
194
+ TuningContent,
195
+ TuningExample,
196
+ TuningExamples,
197
+ TuningMultiturnExample,
198
+ TuningPart,
199
+ TuningSnapshot,
200
+ TuningTask,
201
+ )
202
+
203
+ __all__ = (
204
+ "CreateCachedContentRequest",
205
+ "DeleteCachedContentRequest",
206
+ "GetCachedContentRequest",
207
+ "ListCachedContentsRequest",
208
+ "ListCachedContentsResponse",
209
+ "UpdateCachedContentRequest",
210
+ "CachedContent",
211
+ "CitationMetadata",
212
+ "CitationSource",
213
+ "Blob",
214
+ "CodeExecution",
215
+ "CodeExecutionResult",
216
+ "Content",
217
+ "DynamicRetrievalConfig",
218
+ "ExecutableCode",
219
+ "FileData",
220
+ "FunctionCall",
221
+ "FunctionCallingConfig",
222
+ "FunctionDeclaration",
223
+ "FunctionResponse",
224
+ "GoogleSearchRetrieval",
225
+ "GroundingPassage",
226
+ "GroundingPassages",
227
+ "Part",
228
+ "Schema",
229
+ "Tool",
230
+ "ToolConfig",
231
+ "Type",
232
+ "CountMessageTokensRequest",
233
+ "CountMessageTokensResponse",
234
+ "Example",
235
+ "GenerateMessageRequest",
236
+ "GenerateMessageResponse",
237
+ "Message",
238
+ "MessagePrompt",
239
+ "File",
240
+ "VideoMetadata",
241
+ "CreateFileRequest",
242
+ "CreateFileResponse",
243
+ "DeleteFileRequest",
244
+ "GetFileRequest",
245
+ "ListFilesRequest",
246
+ "ListFilesResponse",
247
+ "AttributionSourceId",
248
+ "BatchEmbedContentsRequest",
249
+ "BatchEmbedContentsResponse",
250
+ "BidiGenerateContentClientContent",
251
+ "BidiGenerateContentClientMessage",
252
+ "BidiGenerateContentRealtimeInput",
253
+ "BidiGenerateContentServerContent",
254
+ "BidiGenerateContentServerMessage",
255
+ "BidiGenerateContentSetup",
256
+ "BidiGenerateContentSetupComplete",
257
+ "BidiGenerateContentToolCall",
258
+ "BidiGenerateContentToolCallCancellation",
259
+ "BidiGenerateContentToolResponse",
260
+ "Candidate",
261
+ "ContentEmbedding",
262
+ "CountTokensRequest",
263
+ "CountTokensResponse",
264
+ "EmbedContentRequest",
265
+ "EmbedContentResponse",
266
+ "GenerateAnswerRequest",
267
+ "GenerateAnswerResponse",
268
+ "GenerateContentRequest",
269
+ "GenerateContentResponse",
270
+ "GenerationConfig",
271
+ "GroundingAttribution",
272
+ "GroundingChunk",
273
+ "GroundingMetadata",
274
+ "GroundingSupport",
275
+ "LogprobsResult",
276
+ "PrebuiltVoiceConfig",
277
+ "RetrievalMetadata",
278
+ "SearchEntryPoint",
279
+ "Segment",
280
+ "SemanticRetrieverConfig",
281
+ "SpeechConfig",
282
+ "VoiceConfig",
283
+ "TaskType",
284
+ "Model",
285
+ "CreateTunedModelMetadata",
286
+ "CreateTunedModelRequest",
287
+ "DeleteTunedModelRequest",
288
+ "GetModelRequest",
289
+ "GetTunedModelRequest",
290
+ "ListModelsRequest",
291
+ "ListModelsResponse",
292
+ "ListTunedModelsRequest",
293
+ "ListTunedModelsResponse",
294
+ "UpdateTunedModelRequest",
295
+ "Permission",
296
+ "CreatePermissionRequest",
297
+ "DeletePermissionRequest",
298
+ "GetPermissionRequest",
299
+ "ListPermissionsRequest",
300
+ "ListPermissionsResponse",
301
+ "TransferOwnershipRequest",
302
+ "TransferOwnershipResponse",
303
+ "UpdatePermissionRequest",
304
+ "PredictRequest",
305
+ "PredictResponse",
306
+ "Chunk",
307
+ "ChunkData",
308
+ "Condition",
309
+ "Corpus",
310
+ "CustomMetadata",
311
+ "Document",
312
+ "MetadataFilter",
313
+ "StringList",
314
+ "BatchCreateChunksRequest",
315
+ "BatchCreateChunksResponse",
316
+ "BatchDeleteChunksRequest",
317
+ "BatchUpdateChunksRequest",
318
+ "BatchUpdateChunksResponse",
319
+ "CreateChunkRequest",
320
+ "CreateCorpusRequest",
321
+ "CreateDocumentRequest",
322
+ "DeleteChunkRequest",
323
+ "DeleteCorpusRequest",
324
+ "DeleteDocumentRequest",
325
+ "GetChunkRequest",
326
+ "GetCorpusRequest",
327
+ "GetDocumentRequest",
328
+ "ListChunksRequest",
329
+ "ListChunksResponse",
330
+ "ListCorporaRequest",
331
+ "ListCorporaResponse",
332
+ "ListDocumentsRequest",
333
+ "ListDocumentsResponse",
334
+ "QueryCorpusRequest",
335
+ "QueryCorpusResponse",
336
+ "QueryDocumentRequest",
337
+ "QueryDocumentResponse",
338
+ "RelevantChunk",
339
+ "UpdateChunkRequest",
340
+ "UpdateCorpusRequest",
341
+ "UpdateDocumentRequest",
342
+ "ContentFilter",
343
+ "SafetyFeedback",
344
+ "SafetyRating",
345
+ "SafetySetting",
346
+ "HarmCategory",
347
+ "BatchEmbedTextRequest",
348
+ "BatchEmbedTextResponse",
349
+ "CountTextTokensRequest",
350
+ "CountTextTokensResponse",
351
+ "Embedding",
352
+ "EmbedTextRequest",
353
+ "EmbedTextResponse",
354
+ "GenerateTextRequest",
355
+ "GenerateTextResponse",
356
+ "TextCompletion",
357
+ "TextPrompt",
358
+ "Dataset",
359
+ "Hyperparameters",
360
+ "TunedModel",
361
+ "TunedModelSource",
362
+ "TuningContent",
363
+ "TuningExample",
364
+ "TuningExamples",
365
+ "TuningMultiturnExample",
366
+ "TuningPart",
367
+ "TuningSnapshot",
368
+ "TuningTask",
369
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/cached_content.cpython-311.pyc ADDED
Binary file (6.98 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/discuss_service.cpython-311.pyc ADDED
Binary file (13.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/file.cpython-311.pyc ADDED
Binary file (6.55 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/generative_service.cpython-311.pyc ADDED
Binary file (92.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/model_service.cpython-311.pyc ADDED
Binary file (12.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/permission.cpython-311.pyc ADDED
Binary file (5.19 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/retriever.cpython-311.pyc ADDED
Binary file (16.7 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/safety.cpython-311.pyc ADDED
Binary file (11 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/__pycache__/tuned_model.cpython-311.pyc ADDED
Binary file (21.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/cache_service.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1alpha.types import (
24
+ cached_content as gag_cached_content,
25
+ )
26
+
27
+ __protobuf__ = proto.module(
28
+ package="google.ai.generativelanguage.v1alpha",
29
+ manifest={
30
+ "ListCachedContentsRequest",
31
+ "ListCachedContentsResponse",
32
+ "CreateCachedContentRequest",
33
+ "GetCachedContentRequest",
34
+ "UpdateCachedContentRequest",
35
+ "DeleteCachedContentRequest",
36
+ },
37
+ )
38
+
39
+
40
+ class ListCachedContentsRequest(proto.Message):
41
+ r"""Request to list CachedContents.
42
+
43
+ Attributes:
44
+ page_size (int):
45
+ Optional. The maximum number of cached
46
+ contents to return. The service may return fewer
47
+ than this value. If unspecified, some default
48
+ (under maximum) number of items will be
49
+ returned. The maximum value is 1000; values
50
+ above 1000 will be coerced to 1000.
51
+ page_token (str):
52
+ Optional. A page token, received from a previous
53
+ ``ListCachedContents`` call. Provide this to retrieve the
54
+ subsequent page.
55
+
56
+ When paginating, all other parameters provided to
57
+ ``ListCachedContents`` must match the call that provided the
58
+ page token.
59
+ """
60
+
61
+ page_size: int = proto.Field(
62
+ proto.INT32,
63
+ number=1,
64
+ )
65
+ page_token: str = proto.Field(
66
+ proto.STRING,
67
+ number=2,
68
+ )
69
+
70
+
71
+ class ListCachedContentsResponse(proto.Message):
72
+ r"""Response with CachedContents list.
73
+
74
+ Attributes:
75
+ cached_contents (MutableSequence[google.ai.generativelanguage_v1alpha.types.CachedContent]):
76
+ List of cached contents.
77
+ next_page_token (str):
78
+ A token, which can be sent as ``page_token`` to retrieve the
79
+ next page. If this field is omitted, there are no subsequent
80
+ pages.
81
+ """
82
+
83
+ @property
84
+ def raw_page(self):
85
+ return self
86
+
87
+ cached_contents: MutableSequence[
88
+ gag_cached_content.CachedContent
89
+ ] = proto.RepeatedField(
90
+ proto.MESSAGE,
91
+ number=1,
92
+ message=gag_cached_content.CachedContent,
93
+ )
94
+ next_page_token: str = proto.Field(
95
+ proto.STRING,
96
+ number=2,
97
+ )
98
+
99
+
100
+ class CreateCachedContentRequest(proto.Message):
101
+ r"""Request to create CachedContent.
102
+
103
+ Attributes:
104
+ cached_content (google.ai.generativelanguage_v1alpha.types.CachedContent):
105
+ Required. The cached content to create.
106
+ """
107
+
108
+ cached_content: gag_cached_content.CachedContent = proto.Field(
109
+ proto.MESSAGE,
110
+ number=1,
111
+ message=gag_cached_content.CachedContent,
112
+ )
113
+
114
+
115
+ class GetCachedContentRequest(proto.Message):
116
+ r"""Request to read CachedContent.
117
+
118
+ Attributes:
119
+ name (str):
120
+ Required. The resource name referring to the content cache
121
+ entry. Format: ``cachedContents/{id}``
122
+ """
123
+
124
+ name: str = proto.Field(
125
+ proto.STRING,
126
+ number=1,
127
+ )
128
+
129
+
130
+ class UpdateCachedContentRequest(proto.Message):
131
+ r"""Request to update CachedContent.
132
+
133
+ Attributes:
134
+ cached_content (google.ai.generativelanguage_v1alpha.types.CachedContent):
135
+ Required. The content cache entry to update
136
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
137
+ The list of fields to update.
138
+ """
139
+
140
+ cached_content: gag_cached_content.CachedContent = proto.Field(
141
+ proto.MESSAGE,
142
+ number=1,
143
+ message=gag_cached_content.CachedContent,
144
+ )
145
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
146
+ proto.MESSAGE,
147
+ number=2,
148
+ message=field_mask_pb2.FieldMask,
149
+ )
150
+
151
+
152
+ class DeleteCachedContentRequest(proto.Message):
153
+ r"""Request to delete CachedContent.
154
+
155
+ Attributes:
156
+ name (str):
157
+ Required. The resource name referring to the content cache
158
+ entry Format: ``cachedContents/{id}``
159
+ """
160
+
161
+ name: str = proto.Field(
162
+ proto.STRING,
163
+ number=1,
164
+ )
165
+
166
+
167
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/content.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import struct_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1alpha",
25
+ manifest={
26
+ "Type",
27
+ "Content",
28
+ "Part",
29
+ "Blob",
30
+ "FileData",
31
+ "ExecutableCode",
32
+ "CodeExecutionResult",
33
+ "Tool",
34
+ "GoogleSearchRetrieval",
35
+ "DynamicRetrievalConfig",
36
+ "CodeExecution",
37
+ "ToolConfig",
38
+ "FunctionCallingConfig",
39
+ "FunctionDeclaration",
40
+ "FunctionCall",
41
+ "FunctionResponse",
42
+ "Schema",
43
+ "GroundingPassage",
44
+ "GroundingPassages",
45
+ },
46
+ )
47
+
48
+
49
+ class Type(proto.Enum):
50
+ r"""Type contains the list of OpenAPI data types as defined by
51
+ https://spec.openapis.org/oas/v3.0.3#data-types
52
+
53
+ Values:
54
+ TYPE_UNSPECIFIED (0):
55
+ Not specified, should not be used.
56
+ STRING (1):
57
+ String type.
58
+ NUMBER (2):
59
+ Number type.
60
+ INTEGER (3):
61
+ Integer type.
62
+ BOOLEAN (4):
63
+ Boolean type.
64
+ ARRAY (5):
65
+ Array type.
66
+ OBJECT (6):
67
+ Object type.
68
+ """
69
+ TYPE_UNSPECIFIED = 0
70
+ STRING = 1
71
+ NUMBER = 2
72
+ INTEGER = 3
73
+ BOOLEAN = 4
74
+ ARRAY = 5
75
+ OBJECT = 6
76
+
77
+
78
+ class Content(proto.Message):
79
+ r"""The base structured datatype containing multi-part content of a
80
+ message.
81
+
82
+ A ``Content`` includes a ``role`` field designating the producer of
83
+ the ``Content`` and a ``parts`` field containing multi-part data
84
+ that contains the content of the message turn.
85
+
86
+ Attributes:
87
+ parts (MutableSequence[google.ai.generativelanguage_v1alpha.types.Part]):
88
+ Ordered ``Parts`` that constitute a single message. Parts
89
+ may have different MIME types.
90
+ role (str):
91
+ Optional. The producer of the content. Must
92
+ be either 'user' or 'model'.
93
+ Useful to set for multi-turn conversations,
94
+ otherwise can be left blank or unset.
95
+ """
96
+
97
+ parts: MutableSequence["Part"] = proto.RepeatedField(
98
+ proto.MESSAGE,
99
+ number=1,
100
+ message="Part",
101
+ )
102
+ role: str = proto.Field(
103
+ proto.STRING,
104
+ number=2,
105
+ )
106
+
107
+
108
+ class Part(proto.Message):
109
+ r"""A datatype containing media that is part of a multi-part ``Content``
110
+ message.
111
+
112
+ A ``Part`` consists of data which has an associated datatype. A
113
+ ``Part`` can only contain one of the accepted types in
114
+ ``Part.data``.
115
+
116
+ A ``Part`` must have a fixed IANA MIME type identifying the type and
117
+ subtype of the media if the ``inline_data`` field is filled with raw
118
+ bytes.
119
+
120
+ This message has `oneof`_ fields (mutually exclusive fields).
121
+ For each oneof, at most one member field can be set at the same time.
122
+ Setting any member of the oneof automatically clears all other
123
+ members.
124
+
125
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
126
+
127
+ Attributes:
128
+ text (str):
129
+ Inline text.
130
+
131
+ This field is a member of `oneof`_ ``data``.
132
+ inline_data (google.ai.generativelanguage_v1alpha.types.Blob):
133
+ Inline media bytes.
134
+
135
+ This field is a member of `oneof`_ ``data``.
136
+ function_call (google.ai.generativelanguage_v1alpha.types.FunctionCall):
137
+ A predicted ``FunctionCall`` returned from the model that
138
+ contains a string representing the
139
+ ``FunctionDeclaration.name`` with the arguments and their
140
+ values.
141
+
142
+ This field is a member of `oneof`_ ``data``.
143
+ function_response (google.ai.generativelanguage_v1alpha.types.FunctionResponse):
144
+ The result output of a ``FunctionCall`` that contains a
145
+ string representing the ``FunctionDeclaration.name`` and a
146
+ structured JSON object containing any output from the
147
+ function is used as context to the model.
148
+
149
+ This field is a member of `oneof`_ ``data``.
150
+ file_data (google.ai.generativelanguage_v1alpha.types.FileData):
151
+ URI based data.
152
+
153
+ This field is a member of `oneof`_ ``data``.
154
+ executable_code (google.ai.generativelanguage_v1alpha.types.ExecutableCode):
155
+ Code generated by the model that is meant to
156
+ be executed.
157
+
158
+ This field is a member of `oneof`_ ``data``.
159
+ code_execution_result (google.ai.generativelanguage_v1alpha.types.CodeExecutionResult):
160
+ Result of executing the ``ExecutableCode``.
161
+
162
+ This field is a member of `oneof`_ ``data``.
163
+ """
164
+
165
+ text: str = proto.Field(
166
+ proto.STRING,
167
+ number=2,
168
+ oneof="data",
169
+ )
170
+ inline_data: "Blob" = proto.Field(
171
+ proto.MESSAGE,
172
+ number=3,
173
+ oneof="data",
174
+ message="Blob",
175
+ )
176
+ function_call: "FunctionCall" = proto.Field(
177
+ proto.MESSAGE,
178
+ number=4,
179
+ oneof="data",
180
+ message="FunctionCall",
181
+ )
182
+ function_response: "FunctionResponse" = proto.Field(
183
+ proto.MESSAGE,
184
+ number=5,
185
+ oneof="data",
186
+ message="FunctionResponse",
187
+ )
188
+ file_data: "FileData" = proto.Field(
189
+ proto.MESSAGE,
190
+ number=6,
191
+ oneof="data",
192
+ message="FileData",
193
+ )
194
+ executable_code: "ExecutableCode" = proto.Field(
195
+ proto.MESSAGE,
196
+ number=9,
197
+ oneof="data",
198
+ message="ExecutableCode",
199
+ )
200
+ code_execution_result: "CodeExecutionResult" = proto.Field(
201
+ proto.MESSAGE,
202
+ number=10,
203
+ oneof="data",
204
+ message="CodeExecutionResult",
205
+ )
206
+
207
+
208
+ class Blob(proto.Message):
209
+ r"""Raw media bytes.
210
+
211
+ Text should not be sent as raw bytes, use the 'text' field.
212
+
213
+ Attributes:
214
+ mime_type (str):
215
+ The IANA standard MIME type of the source data. Examples:
216
+
217
+ - image/png
218
+ - image/jpeg If an unsupported MIME type is provided, an
219
+ error will be returned. For a complete list of supported
220
+ types, see `Supported file
221
+ formats <https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats>`__.
222
+ data (bytes):
223
+ Raw bytes for media formats.
224
+ """
225
+
226
+ mime_type: str = proto.Field(
227
+ proto.STRING,
228
+ number=1,
229
+ )
230
+ data: bytes = proto.Field(
231
+ proto.BYTES,
232
+ number=2,
233
+ )
234
+
235
+
236
+ class FileData(proto.Message):
237
+ r"""URI based data.
238
+
239
+ Attributes:
240
+ mime_type (str):
241
+ Optional. The IANA standard MIME type of the
242
+ source data.
243
+ file_uri (str):
244
+ Required. URI.
245
+ """
246
+
247
+ mime_type: str = proto.Field(
248
+ proto.STRING,
249
+ number=1,
250
+ )
251
+ file_uri: str = proto.Field(
252
+ proto.STRING,
253
+ number=2,
254
+ )
255
+
256
+
257
+ class ExecutableCode(proto.Message):
258
+ r"""Code generated by the model that is meant to be executed, and the
259
+ result returned to the model.
260
+
261
+ Only generated when using the ``CodeExecution`` tool, in which the
262
+ code will be automatically executed, and a corresponding
263
+ ``CodeExecutionResult`` will also be generated.
264
+
265
+ Attributes:
266
+ language (google.ai.generativelanguage_v1alpha.types.ExecutableCode.Language):
267
+ Required. Programming language of the ``code``.
268
+ code (str):
269
+ Required. The code to be executed.
270
+ """
271
+
272
+ class Language(proto.Enum):
273
+ r"""Supported programming languages for the generated code.
274
+
275
+ Values:
276
+ LANGUAGE_UNSPECIFIED (0):
277
+ Unspecified language. This value should not
278
+ be used.
279
+ PYTHON (1):
280
+ Python >= 3.10, with numpy and simpy
281
+ available.
282
+ """
283
+ LANGUAGE_UNSPECIFIED = 0
284
+ PYTHON = 1
285
+
286
+ language: Language = proto.Field(
287
+ proto.ENUM,
288
+ number=1,
289
+ enum=Language,
290
+ )
291
+ code: str = proto.Field(
292
+ proto.STRING,
293
+ number=2,
294
+ )
295
+
296
+
297
+ class CodeExecutionResult(proto.Message):
298
+ r"""Result of executing the ``ExecutableCode``.
299
+
300
+ Only generated when using the ``CodeExecution``, and always follows
301
+ a ``part`` containing the ``ExecutableCode``.
302
+
303
+ Attributes:
304
+ outcome (google.ai.generativelanguage_v1alpha.types.CodeExecutionResult.Outcome):
305
+ Required. Outcome of the code execution.
306
+ output (str):
307
+ Optional. Contains stdout when code execution
308
+ is successful, stderr or other description
309
+ otherwise.
310
+ """
311
+
312
+ class Outcome(proto.Enum):
313
+ r"""Enumeration of possible outcomes of the code execution.
314
+
315
+ Values:
316
+ OUTCOME_UNSPECIFIED (0):
317
+ Unspecified status. This value should not be
318
+ used.
319
+ OUTCOME_OK (1):
320
+ Code execution completed successfully.
321
+ OUTCOME_FAILED (2):
322
+ Code execution finished but with a failure. ``stderr``
323
+ should contain the reason.
324
+ OUTCOME_DEADLINE_EXCEEDED (3):
325
+ Code execution ran for too long, and was
326
+ cancelled. There may or may not be a partial
327
+ output present.
328
+ """
329
+ OUTCOME_UNSPECIFIED = 0
330
+ OUTCOME_OK = 1
331
+ OUTCOME_FAILED = 2
332
+ OUTCOME_DEADLINE_EXCEEDED = 3
333
+
334
+ outcome: Outcome = proto.Field(
335
+ proto.ENUM,
336
+ number=1,
337
+ enum=Outcome,
338
+ )
339
+ output: str = proto.Field(
340
+ proto.STRING,
341
+ number=2,
342
+ )
343
+
344
+
345
+ class Tool(proto.Message):
346
+ r"""Tool details that the model may use to generate response.
347
+
348
+ A ``Tool`` is a piece of code that enables the system to interact
349
+ with external systems to perform an action, or set of actions,
350
+ outside of knowledge and scope of the model.
351
+
352
+ Attributes:
353
+ function_declarations (MutableSequence[google.ai.generativelanguage_v1alpha.types.FunctionDeclaration]):
354
+ Optional. A list of ``FunctionDeclarations`` available to
355
+ the model that can be used for function calling.
356
+
357
+ The model or system does not execute the function. Instead
358
+ the defined function may be returned as a
359
+ [FunctionCall][google.ai.generativelanguage.v1alpha.Part.function_call]
360
+ with arguments to the client side for execution. The model
361
+ may decide to call a subset of these functions by populating
362
+ [FunctionCall][google.ai.generativelanguage.v1alpha.Part.function_call]
363
+ in the response. The next conversation turn may contain a
364
+ [FunctionResponse][google.ai.generativelanguage.v1alpha.Part.function_response]
365
+ with the
366
+ [Content.role][google.ai.generativelanguage.v1alpha.Content.role]
367
+ "function" generation context for the next model turn.
368
+ google_search_retrieval (google.ai.generativelanguage_v1alpha.types.GoogleSearchRetrieval):
369
+ Optional. Retrieval tool that is powered by
370
+ Google search.
371
+ code_execution (google.ai.generativelanguage_v1alpha.types.CodeExecution):
372
+ Optional. Enables the model to execute code
373
+ as part of generation.
374
+ google_search (google.ai.generativelanguage_v1alpha.types.Tool.GoogleSearch):
375
+ Optional. GoogleSearch tool type.
376
+ Tool to support Google Search in Model. Powered
377
+ by Google.
378
+ """
379
+
380
+ class GoogleSearch(proto.Message):
381
+ r"""GoogleSearch tool type.
382
+ Tool to support Google Search in Model. Powered by Google.
383
+
384
+ """
385
+
386
+ function_declarations: MutableSequence["FunctionDeclaration"] = proto.RepeatedField(
387
+ proto.MESSAGE,
388
+ number=1,
389
+ message="FunctionDeclaration",
390
+ )
391
+ google_search_retrieval: "GoogleSearchRetrieval" = proto.Field(
392
+ proto.MESSAGE,
393
+ number=2,
394
+ message="GoogleSearchRetrieval",
395
+ )
396
+ code_execution: "CodeExecution" = proto.Field(
397
+ proto.MESSAGE,
398
+ number=3,
399
+ message="CodeExecution",
400
+ )
401
+ google_search: GoogleSearch = proto.Field(
402
+ proto.MESSAGE,
403
+ number=4,
404
+ message=GoogleSearch,
405
+ )
406
+
407
+
408
+ class GoogleSearchRetrieval(proto.Message):
409
+ r"""Tool to retrieve public web data for grounding, powered by
410
+ Google.
411
+
412
+ Attributes:
413
+ dynamic_retrieval_config (google.ai.generativelanguage_v1alpha.types.DynamicRetrievalConfig):
414
+ Specifies the dynamic retrieval configuration
415
+ for the given source.
416
+ """
417
+
418
+ dynamic_retrieval_config: "DynamicRetrievalConfig" = proto.Field(
419
+ proto.MESSAGE,
420
+ number=1,
421
+ message="DynamicRetrievalConfig",
422
+ )
423
+
424
+
425
+ class DynamicRetrievalConfig(proto.Message):
426
+ r"""Describes the options to customize dynamic retrieval.
427
+
428
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
429
+
430
+ Attributes:
431
+ mode (google.ai.generativelanguage_v1alpha.types.DynamicRetrievalConfig.Mode):
432
+ The mode of the predictor to be used in
433
+ dynamic retrieval.
434
+ dynamic_threshold (float):
435
+ The threshold to be used in dynamic
436
+ retrieval. If not set, a system default value is
437
+ used.
438
+
439
+ This field is a member of `oneof`_ ``_dynamic_threshold``.
440
+ """
441
+
442
+ class Mode(proto.Enum):
443
+ r"""The mode of the predictor to be used in dynamic retrieval.
444
+
445
+ Values:
446
+ MODE_UNSPECIFIED (0):
447
+ Always trigger retrieval.
448
+ MODE_DYNAMIC (1):
449
+ Run retrieval only when system decides it is
450
+ necessary.
451
+ """
452
+ MODE_UNSPECIFIED = 0
453
+ MODE_DYNAMIC = 1
454
+
455
+ mode: Mode = proto.Field(
456
+ proto.ENUM,
457
+ number=1,
458
+ enum=Mode,
459
+ )
460
+ dynamic_threshold: float = proto.Field(
461
+ proto.FLOAT,
462
+ number=2,
463
+ optional=True,
464
+ )
465
+
466
+
467
+ class CodeExecution(proto.Message):
468
+ r"""Tool that executes code generated by the model, and automatically
469
+ returns the result to the model.
470
+
471
+ See also ``ExecutableCode`` and ``CodeExecutionResult`` which are
472
+ only generated when using this tool.
473
+
474
+ """
475
+
476
+
477
+ class ToolConfig(proto.Message):
478
+ r"""The Tool configuration containing parameters for specifying ``Tool``
479
+ use in the request.
480
+
481
+ Attributes:
482
+ function_calling_config (google.ai.generativelanguage_v1alpha.types.FunctionCallingConfig):
483
+ Optional. Function calling config.
484
+ """
485
+
486
+ function_calling_config: "FunctionCallingConfig" = proto.Field(
487
+ proto.MESSAGE,
488
+ number=1,
489
+ message="FunctionCallingConfig",
490
+ )
491
+
492
+
493
+ class FunctionCallingConfig(proto.Message):
494
+ r"""Configuration for specifying function calling behavior.
495
+
496
+ Attributes:
497
+ mode (google.ai.generativelanguage_v1alpha.types.FunctionCallingConfig.Mode):
498
+ Optional. Specifies the mode in which
499
+ function calling should execute. If unspecified,
500
+ the default value will be set to AUTO.
501
+ allowed_function_names (MutableSequence[str]):
502
+ Optional. A set of function names that, when provided,
503
+ limits the functions the model will call.
504
+
505
+ This should only be set when the Mode is ANY. Function names
506
+ should match [FunctionDeclaration.name]. With mode set to
507
+ ANY, model will predict a function call from the set of
508
+ function names provided.
509
+ """
510
+
511
+ class Mode(proto.Enum):
512
+ r"""Defines the execution behavior for function calling by
513
+ defining the execution mode.
514
+
515
+ Values:
516
+ MODE_UNSPECIFIED (0):
517
+ Unspecified function calling mode. This value
518
+ should not be used.
519
+ AUTO (1):
520
+ Default model behavior, model decides to
521
+ predict either a function call or a natural
522
+ language response.
523
+ ANY (2):
524
+ Model is constrained to always predicting a function call
525
+ only. If "allowed_function_names" are set, the predicted
526
+ function call will be limited to any one of
527
+ "allowed_function_names", else the predicted function call
528
+ will be any one of the provided "function_declarations".
529
+ NONE (3):
530
+ Model will not predict any function call.
531
+ Model behavior is same as when not passing any
532
+ function declarations.
533
+ """
534
+ MODE_UNSPECIFIED = 0
535
+ AUTO = 1
536
+ ANY = 2
537
+ NONE = 3
538
+
539
+ mode: Mode = proto.Field(
540
+ proto.ENUM,
541
+ number=1,
542
+ enum=Mode,
543
+ )
544
+ allowed_function_names: MutableSequence[str] = proto.RepeatedField(
545
+ proto.STRING,
546
+ number=2,
547
+ )
548
+
549
+
550
+ class FunctionDeclaration(proto.Message):
551
+ r"""Structured representation of a function declaration as defined by
552
+ the `OpenAPI 3.03
553
+ specification <https://spec.openapis.org/oas/v3.0.3>`__. Included in
554
+ this declaration are the function name and parameters. This
555
+ FunctionDeclaration is a representation of a block of code that can
556
+ be used as a ``Tool`` by the model and executed by the client.
557
+
558
+
559
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
560
+
561
+ Attributes:
562
+ name (str):
563
+ Required. The name of the function.
564
+ Must be a-z, A-Z, 0-9, or contain underscores
565
+ and dashes, with a maximum length of 63.
566
+ description (str):
567
+ Required. A brief description of the
568
+ function.
569
+ parameters (google.ai.generativelanguage_v1alpha.types.Schema):
570
+ Optional. Describes the parameters to this
571
+ function. Reflects the Open API 3.03 Parameter
572
+ Object string Key: the name of the parameter.
573
+ Parameter names are case sensitive. Schema
574
+ Value: the Schema defining the type used for the
575
+ parameter.
576
+
577
+ This field is a member of `oneof`_ ``_parameters``.
578
+ response (google.ai.generativelanguage_v1alpha.types.Schema):
579
+ Optional. Describes the output from this
580
+ function in JSON Schema format. Reflects the
581
+ Open API 3.03 Response Object. The Schema
582
+ defines the type used for the response value of
583
+ the function.
584
+
585
+ This field is a member of `oneof`_ ``_response``.
586
+ """
587
+
588
+ name: str = proto.Field(
589
+ proto.STRING,
590
+ number=1,
591
+ )
592
+ description: str = proto.Field(
593
+ proto.STRING,
594
+ number=2,
595
+ )
596
+ parameters: "Schema" = proto.Field(
597
+ proto.MESSAGE,
598
+ number=3,
599
+ optional=True,
600
+ message="Schema",
601
+ )
602
+ response: "Schema" = proto.Field(
603
+ proto.MESSAGE,
604
+ number=4,
605
+ optional=True,
606
+ message="Schema",
607
+ )
608
+
609
+
610
+ class FunctionCall(proto.Message):
611
+ r"""A predicted ``FunctionCall`` returned from the model that contains a
612
+ string representing the ``FunctionDeclaration.name`` with the
613
+ arguments and their values.
614
+
615
+
616
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
617
+
618
+ Attributes:
619
+ id (str):
620
+ Optional. The unique id of the function call. If populated,
621
+ the client to execute the ``function_call`` and return the
622
+ response with the matching ``id``.
623
+ name (str):
624
+ Required. The name of the function to call.
625
+ Must be a-z, A-Z, 0-9, or contain underscores
626
+ and dashes, with a maximum length of 63.
627
+ args (google.protobuf.struct_pb2.Struct):
628
+ Optional. The function parameters and values
629
+ in JSON object format.
630
+
631
+ This field is a member of `oneof`_ ``_args``.
632
+ """
633
+
634
+ id: str = proto.Field(
635
+ proto.STRING,
636
+ number=3,
637
+ )
638
+ name: str = proto.Field(
639
+ proto.STRING,
640
+ number=1,
641
+ )
642
+ args: struct_pb2.Struct = proto.Field(
643
+ proto.MESSAGE,
644
+ number=2,
645
+ optional=True,
646
+ message=struct_pb2.Struct,
647
+ )
648
+
649
+
650
+ class FunctionResponse(proto.Message):
651
+ r"""The result output from a ``FunctionCall`` that contains a string
652
+ representing the ``FunctionDeclaration.name`` and a structured JSON
653
+ object containing any output from the function is used as context to
654
+ the model. This should contain the result of a\ ``FunctionCall``
655
+ made based on model prediction.
656
+
657
+ Attributes:
658
+ id (str):
659
+ Optional. The id of the function call this response is for.
660
+ Populated by the client to match the corresponding function
661
+ call ``id``.
662
+ name (str):
663
+ Required. The name of the function to call.
664
+ Must be a-z, A-Z, 0-9, or contain underscores
665
+ and dashes, with a maximum length of 63.
666
+ response (google.protobuf.struct_pb2.Struct):
667
+ Required. The function response in JSON
668
+ object format.
669
+ """
670
+
671
+ id: str = proto.Field(
672
+ proto.STRING,
673
+ number=3,
674
+ )
675
+ name: str = proto.Field(
676
+ proto.STRING,
677
+ number=1,
678
+ )
679
+ response: struct_pb2.Struct = proto.Field(
680
+ proto.MESSAGE,
681
+ number=2,
682
+ message=struct_pb2.Struct,
683
+ )
684
+
685
+
686
+ class Schema(proto.Message):
687
+ r"""The ``Schema`` object allows the definition of input and output data
688
+ types. These types can be objects, but also primitives and arrays.
689
+ Represents a select subset of an `OpenAPI 3.0 schema
690
+ object <https://spec.openapis.org/oas/v3.0.3#schema>`__.
691
+
692
+
693
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
694
+
695
+ Attributes:
696
+ type_ (google.ai.generativelanguage_v1alpha.types.Type):
697
+ Required. Data type.
698
+ format_ (str):
699
+ Optional. The format of the data. This is
700
+ used only for primitive datatypes. Supported
701
+ formats:
702
+
703
+ for NUMBER type: float, double
704
+ for INTEGER type: int32, int64
705
+ for STRING type: enum
706
+ description (str):
707
+ Optional. A brief description of the
708
+ parameter. This could contain examples of use.
709
+ Parameter description may be formatted as
710
+ Markdown.
711
+ nullable (bool):
712
+ Optional. Indicates if the value may be null.
713
+ enum (MutableSequence[str]):
714
+ Optional. Possible values of the element of Type.STRING with
715
+ enum format. For example we can define an Enum Direction as
716
+ : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH",
717
+ "WEST"]}
718
+ items (google.ai.generativelanguage_v1alpha.types.Schema):
719
+ Optional. Schema of the elements of
720
+ Type.ARRAY.
721
+
722
+ This field is a member of `oneof`_ ``_items``.
723
+ max_items (int):
724
+ Optional. Maximum number of the elements for
725
+ Type.ARRAY.
726
+ min_items (int):
727
+ Optional. Minimum number of the elements for
728
+ Type.ARRAY.
729
+ properties (MutableMapping[str, google.ai.generativelanguage_v1alpha.types.Schema]):
730
+ Optional. Properties of Type.OBJECT.
731
+ required (MutableSequence[str]):
732
+ Optional. Required properties of Type.OBJECT.
733
+ """
734
+
735
+ type_: "Type" = proto.Field(
736
+ proto.ENUM,
737
+ number=1,
738
+ enum="Type",
739
+ )
740
+ format_: str = proto.Field(
741
+ proto.STRING,
742
+ number=2,
743
+ )
744
+ description: str = proto.Field(
745
+ proto.STRING,
746
+ number=3,
747
+ )
748
+ nullable: bool = proto.Field(
749
+ proto.BOOL,
750
+ number=4,
751
+ )
752
+ enum: MutableSequence[str] = proto.RepeatedField(
753
+ proto.STRING,
754
+ number=5,
755
+ )
756
+ items: "Schema" = proto.Field(
757
+ proto.MESSAGE,
758
+ number=6,
759
+ optional=True,
760
+ message="Schema",
761
+ )
762
+ max_items: int = proto.Field(
763
+ proto.INT64,
764
+ number=21,
765
+ )
766
+ min_items: int = proto.Field(
767
+ proto.INT64,
768
+ number=22,
769
+ )
770
+ properties: MutableMapping[str, "Schema"] = proto.MapField(
771
+ proto.STRING,
772
+ proto.MESSAGE,
773
+ number=7,
774
+ message="Schema",
775
+ )
776
+ required: MutableSequence[str] = proto.RepeatedField(
777
+ proto.STRING,
778
+ number=8,
779
+ )
780
+
781
+
782
+ class GroundingPassage(proto.Message):
783
+ r"""Passage included inline with a grounding configuration.
784
+
785
+ Attributes:
786
+ id (str):
787
+ Identifier for the passage for attributing
788
+ this passage in grounded answers.
789
+ content (google.ai.generativelanguage_v1alpha.types.Content):
790
+ Content of the passage.
791
+ """
792
+
793
+ id: str = proto.Field(
794
+ proto.STRING,
795
+ number=1,
796
+ )
797
+ content: "Content" = proto.Field(
798
+ proto.MESSAGE,
799
+ number=2,
800
+ message="Content",
801
+ )
802
+
803
+
804
+ class GroundingPassages(proto.Message):
805
+ r"""A repeated list of passages.
806
+
807
+ Attributes:
808
+ passages (MutableSequence[google.ai.generativelanguage_v1alpha.types.GroundingPassage]):
809
+ List of passages.
810
+ """
811
+
812
+ passages: MutableSequence["GroundingPassage"] = proto.RepeatedField(
813
+ proto.MESSAGE,
814
+ number=1,
815
+ message="GroundingPassage",
816
+ )
817
+
818
+
819
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/model.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1alpha",
24
+ manifest={
25
+ "Model",
26
+ },
27
+ )
28
+
29
+
30
+ class Model(proto.Message):
31
+ r"""Information about a Generative Language Model.
32
+
33
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
34
+
35
+ Attributes:
36
+ name (str):
37
+ Required. The resource name of the ``Model``. Refer to
38
+ `Model
39
+ variants <https://ai.google.dev/gemini-api/docs/models/gemini#model-variations>`__
40
+ for all allowed values.
41
+
42
+ Format: ``models/{model}`` with a ``{model}`` naming
43
+ convention of:
44
+
45
+ - "{base_model_id}-{version}"
46
+
47
+ Examples:
48
+
49
+ - ``models/gemini-1.5-flash-001``
50
+ base_model_id (str):
51
+ Required. The name of the base model, pass this to the
52
+ generation request.
53
+
54
+ Examples:
55
+
56
+ - ``gemini-1.5-flash``
57
+ version (str):
58
+ Required. The version number of the model.
59
+
60
+ This represents the major version (``1.0`` or ``1.5``)
61
+ display_name (str):
62
+ The human-readable name of the model. E.g.
63
+ "Gemini 1.5 Flash".
64
+ The name can be up to 128 characters long and
65
+ can consist of any UTF-8 characters.
66
+ description (str):
67
+ A short description of the model.
68
+ input_token_limit (int):
69
+ Maximum number of input tokens allowed for
70
+ this model.
71
+ output_token_limit (int):
72
+ Maximum number of output tokens available for
73
+ this model.
74
+ supported_generation_methods (MutableSequence[str]):
75
+ The model's supported generation methods.
76
+
77
+ The corresponding API method names are defined as Pascal
78
+ case strings, such as ``generateMessage`` and
79
+ ``generateContent``.
80
+ temperature (float):
81
+ Controls the randomness of the output.
82
+
83
+ Values can range over ``[0.0,max_temperature]``, inclusive.
84
+ A higher value will produce responses that are more varied,
85
+ while a value closer to ``0.0`` will typically result in
86
+ less surprising responses from the model. This value
87
+ specifies default to be used by the backend while making the
88
+ call to the model.
89
+
90
+ This field is a member of `oneof`_ ``_temperature``.
91
+ max_temperature (float):
92
+ The maximum temperature this model can use.
93
+
94
+ This field is a member of `oneof`_ ``_max_temperature``.
95
+ top_p (float):
96
+ For `Nucleus
97
+ sampling <https://ai.google.dev/gemini-api/docs/prompting-strategies#top-p>`__.
98
+
99
+ Nucleus sampling considers the smallest set of tokens whose
100
+ probability sum is at least ``top_p``. This value specifies
101
+ default to be used by the backend while making the call to
102
+ the model.
103
+
104
+ This field is a member of `oneof`_ ``_top_p``.
105
+ top_k (int):
106
+ For Top-k sampling.
107
+
108
+ Top-k sampling considers the set of ``top_k`` most probable
109
+ tokens. This value specifies default to be used by the
110
+ backend while making the call to the model. If empty,
111
+ indicates the model doesn't use top-k sampling, and
112
+ ``top_k`` isn't allowed as a generation parameter.
113
+
114
+ This field is a member of `oneof`_ ``_top_k``.
115
+ """
116
+
117
+ name: str = proto.Field(
118
+ proto.STRING,
119
+ number=1,
120
+ )
121
+ base_model_id: str = proto.Field(
122
+ proto.STRING,
123
+ number=2,
124
+ )
125
+ version: str = proto.Field(
126
+ proto.STRING,
127
+ number=3,
128
+ )
129
+ display_name: str = proto.Field(
130
+ proto.STRING,
131
+ number=4,
132
+ )
133
+ description: str = proto.Field(
134
+ proto.STRING,
135
+ number=5,
136
+ )
137
+ input_token_limit: int = proto.Field(
138
+ proto.INT32,
139
+ number=6,
140
+ )
141
+ output_token_limit: int = proto.Field(
142
+ proto.INT32,
143
+ number=7,
144
+ )
145
+ supported_generation_methods: MutableSequence[str] = proto.RepeatedField(
146
+ proto.STRING,
147
+ number=8,
148
+ )
149
+ temperature: float = proto.Field(
150
+ proto.FLOAT,
151
+ number=9,
152
+ optional=True,
153
+ )
154
+ max_temperature: float = proto.Field(
155
+ proto.FLOAT,
156
+ number=13,
157
+ optional=True,
158
+ )
159
+ top_p: float = proto.Field(
160
+ proto.FLOAT,
161
+ number=10,
162
+ optional=True,
163
+ )
164
+ top_k: int = proto.Field(
165
+ proto.INT32,
166
+ number=11,
167
+ optional=True,
168
+ )
169
+
170
+
171
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/model_service.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
24
+ from google.ai.generativelanguage_v1alpha.types import model
25
+
26
+ __protobuf__ = proto.module(
27
+ package="google.ai.generativelanguage.v1alpha",
28
+ manifest={
29
+ "GetModelRequest",
30
+ "ListModelsRequest",
31
+ "ListModelsResponse",
32
+ "GetTunedModelRequest",
33
+ "ListTunedModelsRequest",
34
+ "ListTunedModelsResponse",
35
+ "CreateTunedModelRequest",
36
+ "CreateTunedModelMetadata",
37
+ "UpdateTunedModelRequest",
38
+ "DeleteTunedModelRequest",
39
+ },
40
+ )
41
+
42
+
43
+ class GetModelRequest(proto.Message):
44
+ r"""Request for getting information about a specific Model.
45
+
46
+ Attributes:
47
+ name (str):
48
+ Required. The resource name of the model.
49
+
50
+ This name should match a model name returned by the
51
+ ``ListModels`` method.
52
+
53
+ Format: ``models/{model}``
54
+ """
55
+
56
+ name: str = proto.Field(
57
+ proto.STRING,
58
+ number=1,
59
+ )
60
+
61
+
62
+ class ListModelsRequest(proto.Message):
63
+ r"""Request for listing all Models.
64
+
65
+ Attributes:
66
+ page_size (int):
67
+ The maximum number of ``Models`` to return (per page).
68
+
69
+ If unspecified, 50 models will be returned per page. This
70
+ method returns at most 1000 models per page, even if you
71
+ pass a larger page_size.
72
+ page_token (str):
73
+ A page token, received from a previous ``ListModels`` call.
74
+
75
+ Provide the ``page_token`` returned by one request as an
76
+ argument to the next request to retrieve the next page.
77
+
78
+ When paginating, all other parameters provided to
79
+ ``ListModels`` must match the call that provided the page
80
+ token.
81
+ """
82
+
83
+ page_size: int = proto.Field(
84
+ proto.INT32,
85
+ number=2,
86
+ )
87
+ page_token: str = proto.Field(
88
+ proto.STRING,
89
+ number=3,
90
+ )
91
+
92
+
93
+ class ListModelsResponse(proto.Message):
94
+ r"""Response from ``ListModel`` containing a paginated list of Models.
95
+
96
+ Attributes:
97
+ models (MutableSequence[google.ai.generativelanguage_v1alpha.types.Model]):
98
+ The returned Models.
99
+ next_page_token (str):
100
+ A token, which can be sent as ``page_token`` to retrieve the
101
+ next page.
102
+
103
+ If this field is omitted, there are no more pages.
104
+ """
105
+
106
+ @property
107
+ def raw_page(self):
108
+ return self
109
+
110
+ models: MutableSequence[model.Model] = proto.RepeatedField(
111
+ proto.MESSAGE,
112
+ number=1,
113
+ message=model.Model,
114
+ )
115
+ next_page_token: str = proto.Field(
116
+ proto.STRING,
117
+ number=2,
118
+ )
119
+
120
+
121
+ class GetTunedModelRequest(proto.Message):
122
+ r"""Request for getting information about a specific Model.
123
+
124
+ Attributes:
125
+ name (str):
126
+ Required. The resource name of the model.
127
+
128
+ Format: ``tunedModels/my-model-id``
129
+ """
130
+
131
+ name: str = proto.Field(
132
+ proto.STRING,
133
+ number=1,
134
+ )
135
+
136
+
137
+ class ListTunedModelsRequest(proto.Message):
138
+ r"""Request for listing TunedModels.
139
+
140
+ Attributes:
141
+ page_size (int):
142
+ Optional. The maximum number of ``TunedModels`` to return
143
+ (per page). The service may return fewer tuned models.
144
+
145
+ If unspecified, at most 10 tuned models will be returned.
146
+ This method returns at most 1000 models per page, even if
147
+ you pass a larger page_size.
148
+ page_token (str):
149
+ Optional. A page token, received from a previous
150
+ ``ListTunedModels`` call.
151
+
152
+ Provide the ``page_token`` returned by one request as an
153
+ argument to the next request to retrieve the next page.
154
+
155
+ When paginating, all other parameters provided to
156
+ ``ListTunedModels`` must match the call that provided the
157
+ page token.
158
+ filter (str):
159
+ Optional. A filter is a full text search over
160
+ the tuned model's description and display name.
161
+ By default, results will not include tuned
162
+ models shared with everyone.
163
+
164
+ Additional operators:
165
+
166
+ - owner:me
167
+ - writers:me
168
+ - readers:me
169
+ - readers:everyone
170
+
171
+ Examples:
172
+
173
+ "owner:me" returns all tuned models to which
174
+ caller has owner role "readers:me" returns all
175
+ tuned models to which caller has reader role
176
+ "readers:everyone" returns all tuned models that
177
+ are shared with everyone
178
+ """
179
+
180
+ page_size: int = proto.Field(
181
+ proto.INT32,
182
+ number=1,
183
+ )
184
+ page_token: str = proto.Field(
185
+ proto.STRING,
186
+ number=2,
187
+ )
188
+ filter: str = proto.Field(
189
+ proto.STRING,
190
+ number=3,
191
+ )
192
+
193
+
194
+ class ListTunedModelsResponse(proto.Message):
195
+ r"""Response from ``ListTunedModels`` containing a paginated list of
196
+ Models.
197
+
198
+ Attributes:
199
+ tuned_models (MutableSequence[google.ai.generativelanguage_v1alpha.types.TunedModel]):
200
+ The returned Models.
201
+ next_page_token (str):
202
+ A token, which can be sent as ``page_token`` to retrieve the
203
+ next page.
204
+
205
+ If this field is omitted, there are no more pages.
206
+ """
207
+
208
+ @property
209
+ def raw_page(self):
210
+ return self
211
+
212
+ tuned_models: MutableSequence[gag_tuned_model.TunedModel] = proto.RepeatedField(
213
+ proto.MESSAGE,
214
+ number=1,
215
+ message=gag_tuned_model.TunedModel,
216
+ )
217
+ next_page_token: str = proto.Field(
218
+ proto.STRING,
219
+ number=2,
220
+ )
221
+
222
+
223
+ class CreateTunedModelRequest(proto.Message):
224
+ r"""Request to create a TunedModel.
225
+
226
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
227
+
228
+ Attributes:
229
+ tuned_model_id (str):
230
+ Optional. The unique id for the tuned model if specified.
231
+ This value should be up to 40 characters, the first
232
+ character must be a letter, the last could be a letter or a
233
+ number. The id must match the regular expression:
234
+ ``[a-z]([a-z0-9-]{0,38}[a-z0-9])?``.
235
+
236
+ This field is a member of `oneof`_ ``_tuned_model_id``.
237
+ tuned_model (google.ai.generativelanguage_v1alpha.types.TunedModel):
238
+ Required. The tuned model to create.
239
+ """
240
+
241
+ tuned_model_id: str = proto.Field(
242
+ proto.STRING,
243
+ number=1,
244
+ optional=True,
245
+ )
246
+ tuned_model: gag_tuned_model.TunedModel = proto.Field(
247
+ proto.MESSAGE,
248
+ number=2,
249
+ message=gag_tuned_model.TunedModel,
250
+ )
251
+
252
+
253
+ class CreateTunedModelMetadata(proto.Message):
254
+ r"""Metadata about the state and progress of creating a tuned
255
+ model returned from the long-running operation
256
+
257
+ Attributes:
258
+ tuned_model (str):
259
+ Name of the tuned model associated with the
260
+ tuning operation.
261
+ total_steps (int):
262
+ The total number of tuning steps.
263
+ completed_steps (int):
264
+ The number of steps completed.
265
+ completed_percent (float):
266
+ The completed percentage for the tuning
267
+ operation.
268
+ snapshots (MutableSequence[google.ai.generativelanguage_v1alpha.types.TuningSnapshot]):
269
+ Metrics collected during tuning.
270
+ """
271
+
272
+ tuned_model: str = proto.Field(
273
+ proto.STRING,
274
+ number=5,
275
+ )
276
+ total_steps: int = proto.Field(
277
+ proto.INT32,
278
+ number=1,
279
+ )
280
+ completed_steps: int = proto.Field(
281
+ proto.INT32,
282
+ number=2,
283
+ )
284
+ completed_percent: float = proto.Field(
285
+ proto.FLOAT,
286
+ number=3,
287
+ )
288
+ snapshots: MutableSequence[gag_tuned_model.TuningSnapshot] = proto.RepeatedField(
289
+ proto.MESSAGE,
290
+ number=4,
291
+ message=gag_tuned_model.TuningSnapshot,
292
+ )
293
+
294
+
295
+ class UpdateTunedModelRequest(proto.Message):
296
+ r"""Request to update a TunedModel.
297
+
298
+ Attributes:
299
+ tuned_model (google.ai.generativelanguage_v1alpha.types.TunedModel):
300
+ Required. The tuned model to update.
301
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
302
+ Optional. The list of fields to update.
303
+ """
304
+
305
+ tuned_model: gag_tuned_model.TunedModel = proto.Field(
306
+ proto.MESSAGE,
307
+ number=1,
308
+ message=gag_tuned_model.TunedModel,
309
+ )
310
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
311
+ proto.MESSAGE,
312
+ number=2,
313
+ message=field_mask_pb2.FieldMask,
314
+ )
315
+
316
+
317
+ class DeleteTunedModelRequest(proto.Message):
318
+ r"""Request to delete a TunedModel.
319
+
320
+ Attributes:
321
+ name (str):
322
+ Required. The resource name of the model. Format:
323
+ ``tunedModels/my-model-id``
324
+ """
325
+
326
+ name: str = proto.Field(
327
+ proto.STRING,
328
+ number=1,
329
+ )
330
+
331
+
332
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/prediction_service.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import struct_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1alpha",
25
+ manifest={
26
+ "PredictRequest",
27
+ "PredictResponse",
28
+ },
29
+ )
30
+
31
+
32
+ class PredictRequest(proto.Message):
33
+ r"""Request message for
34
+ [PredictionService.Predict][google.ai.generativelanguage.v1alpha.PredictionService.Predict].
35
+
36
+ Attributes:
37
+ model (str):
38
+ Required. The name of the model for prediction. Format:
39
+ ``name=models/{model}``.
40
+ instances (MutableSequence[google.protobuf.struct_pb2.Value]):
41
+ Required. The instances that are the input to
42
+ the prediction call.
43
+ parameters (google.protobuf.struct_pb2.Value):
44
+ Optional. The parameters that govern the
45
+ prediction call.
46
+ """
47
+
48
+ model: str = proto.Field(
49
+ proto.STRING,
50
+ number=1,
51
+ )
52
+ instances: MutableSequence[struct_pb2.Value] = proto.RepeatedField(
53
+ proto.MESSAGE,
54
+ number=2,
55
+ message=struct_pb2.Value,
56
+ )
57
+ parameters: struct_pb2.Value = proto.Field(
58
+ proto.MESSAGE,
59
+ number=3,
60
+ message=struct_pb2.Value,
61
+ )
62
+
63
+
64
+ class PredictResponse(proto.Message):
65
+ r"""Response message for [PredictionService.Predict].
66
+
67
+ Attributes:
68
+ predictions (MutableSequence[google.protobuf.struct_pb2.Value]):
69
+ The outputs of the prediction call.
70
+ """
71
+
72
+ predictions: MutableSequence[struct_pb2.Value] = proto.RepeatedField(
73
+ proto.MESSAGE,
74
+ number=1,
75
+ message=struct_pb2.Value,
76
+ )
77
+
78
+
79
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/retriever.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import timestamp_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1alpha",
25
+ manifest={
26
+ "Corpus",
27
+ "Document",
28
+ "StringList",
29
+ "CustomMetadata",
30
+ "MetadataFilter",
31
+ "Condition",
32
+ "Chunk",
33
+ "ChunkData",
34
+ },
35
+ )
36
+
37
+
38
+ class Corpus(proto.Message):
39
+ r"""A ``Corpus`` is a collection of ``Document``\ s. A project can
40
+ create up to 5 corpora.
41
+
42
+ Attributes:
43
+ name (str):
44
+ Immutable. Identifier. The ``Corpus`` resource name. The ID
45
+ (name excluding the "corpora/" prefix) can contain up to 40
46
+ characters that are lowercase alphanumeric or dashes (-).
47
+ The ID cannot start or end with a dash. If the name is empty
48
+ on create, a unique name will be derived from
49
+ ``display_name`` along with a 12 character random suffix.
50
+ Example: ``corpora/my-awesome-corpora-123a456b789c``
51
+ display_name (str):
52
+ Optional. The human-readable display name for the
53
+ ``Corpus``. The display name must be no more than 512
54
+ characters in length, including spaces. Example: "Docs on
55
+ Semantic Retriever".
56
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
57
+ Output only. The Timestamp of when the ``Corpus`` was
58
+ created.
59
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
60
+ Output only. The Timestamp of when the ``Corpus`` was last
61
+ updated.
62
+ """
63
+
64
+ name: str = proto.Field(
65
+ proto.STRING,
66
+ number=1,
67
+ )
68
+ display_name: str = proto.Field(
69
+ proto.STRING,
70
+ number=2,
71
+ )
72
+ create_time: timestamp_pb2.Timestamp = proto.Field(
73
+ proto.MESSAGE,
74
+ number=3,
75
+ message=timestamp_pb2.Timestamp,
76
+ )
77
+ update_time: timestamp_pb2.Timestamp = proto.Field(
78
+ proto.MESSAGE,
79
+ number=4,
80
+ message=timestamp_pb2.Timestamp,
81
+ )
82
+
83
+
84
+ class Document(proto.Message):
85
+ r"""A ``Document`` is a collection of ``Chunk``\ s. A ``Corpus`` can
86
+ have a maximum of 10,000 ``Document``\ s.
87
+
88
+ Attributes:
89
+ name (str):
90
+ Immutable. Identifier. The ``Document`` resource name. The
91
+ ID (name excluding the `corpora/*/documents/` prefix) can
92
+ contain up to 40 characters that are lowercase alphanumeric
93
+ or dashes (-). The ID cannot start or end with a dash. If
94
+ the name is empty on create, a unique name will be derived
95
+ from ``display_name`` along with a 12 character random
96
+ suffix. Example:
97
+ ``corpora/{corpus_id}/documents/my-awesome-doc-123a456b789c``
98
+ display_name (str):
99
+ Optional. The human-readable display name for the
100
+ ``Document``. The display name must be no more than 512
101
+ characters in length, including spaces. Example: "Semantic
102
+ Retriever Documentation".
103
+ custom_metadata (MutableSequence[google.ai.generativelanguage_v1alpha.types.CustomMetadata]):
104
+ Optional. User provided custom metadata stored as key-value
105
+ pairs used for querying. A ``Document`` can have a maximum
106
+ of 20 ``CustomMetadata``.
107
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
108
+ Output only. The Timestamp of when the ``Document`` was last
109
+ updated.
110
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
111
+ Output only. The Timestamp of when the ``Document`` was
112
+ created.
113
+ """
114
+
115
+ name: str = proto.Field(
116
+ proto.STRING,
117
+ number=1,
118
+ )
119
+ display_name: str = proto.Field(
120
+ proto.STRING,
121
+ number=2,
122
+ )
123
+ custom_metadata: MutableSequence["CustomMetadata"] = proto.RepeatedField(
124
+ proto.MESSAGE,
125
+ number=3,
126
+ message="CustomMetadata",
127
+ )
128
+ update_time: timestamp_pb2.Timestamp = proto.Field(
129
+ proto.MESSAGE,
130
+ number=4,
131
+ message=timestamp_pb2.Timestamp,
132
+ )
133
+ create_time: timestamp_pb2.Timestamp = proto.Field(
134
+ proto.MESSAGE,
135
+ number=5,
136
+ message=timestamp_pb2.Timestamp,
137
+ )
138
+
139
+
140
+ class StringList(proto.Message):
141
+ r"""User provided string values assigned to a single metadata
142
+ key.
143
+
144
+ Attributes:
145
+ values (MutableSequence[str]):
146
+ The string values of the metadata to store.
147
+ """
148
+
149
+ values: MutableSequence[str] = proto.RepeatedField(
150
+ proto.STRING,
151
+ number=1,
152
+ )
153
+
154
+
155
+ class CustomMetadata(proto.Message):
156
+ r"""User provided metadata stored as key-value pairs.
157
+
158
+ This message has `oneof`_ fields (mutually exclusive fields).
159
+ For each oneof, at most one member field can be set at the same time.
160
+ Setting any member of the oneof automatically clears all other
161
+ members.
162
+
163
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
164
+
165
+ Attributes:
166
+ string_value (str):
167
+ The string value of the metadata to store.
168
+
169
+ This field is a member of `oneof`_ ``value``.
170
+ string_list_value (google.ai.generativelanguage_v1alpha.types.StringList):
171
+ The StringList value of the metadata to
172
+ store.
173
+
174
+ This field is a member of `oneof`_ ``value``.
175
+ numeric_value (float):
176
+ The numeric value of the metadata to store.
177
+
178
+ This field is a member of `oneof`_ ``value``.
179
+ key (str):
180
+ Required. The key of the metadata to store.
181
+ """
182
+
183
+ string_value: str = proto.Field(
184
+ proto.STRING,
185
+ number=2,
186
+ oneof="value",
187
+ )
188
+ string_list_value: "StringList" = proto.Field(
189
+ proto.MESSAGE,
190
+ number=6,
191
+ oneof="value",
192
+ message="StringList",
193
+ )
194
+ numeric_value: float = proto.Field(
195
+ proto.FLOAT,
196
+ number=7,
197
+ oneof="value",
198
+ )
199
+ key: str = proto.Field(
200
+ proto.STRING,
201
+ number=1,
202
+ )
203
+
204
+
205
+ class MetadataFilter(proto.Message):
206
+ r"""User provided filter to limit retrieval based on ``Chunk`` or
207
+ ``Document`` level metadata values. Example (genre = drama OR genre
208
+ = action): key = "document.custom_metadata.genre" conditions =
209
+ [{string_value = "drama", operation = EQUAL}, {string_value =
210
+ "action", operation = EQUAL}]
211
+
212
+ Attributes:
213
+ key (str):
214
+ Required. The key of the metadata to filter
215
+ on.
216
+ conditions (MutableSequence[google.ai.generativelanguage_v1alpha.types.Condition]):
217
+ Required. The ``Condition``\ s for the given key that will
218
+ trigger this filter. Multiple ``Condition``\ s are joined by
219
+ logical ORs.
220
+ """
221
+
222
+ key: str = proto.Field(
223
+ proto.STRING,
224
+ number=1,
225
+ )
226
+ conditions: MutableSequence["Condition"] = proto.RepeatedField(
227
+ proto.MESSAGE,
228
+ number=2,
229
+ message="Condition",
230
+ )
231
+
232
+
233
+ class Condition(proto.Message):
234
+ r"""Filter condition applicable to a single key.
235
+
236
+ This message has `oneof`_ fields (mutually exclusive fields).
237
+ For each oneof, at most one member field can be set at the same time.
238
+ Setting any member of the oneof automatically clears all other
239
+ members.
240
+
241
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
242
+
243
+ Attributes:
244
+ string_value (str):
245
+ The string value to filter the metadata on.
246
+
247
+ This field is a member of `oneof`_ ``value``.
248
+ numeric_value (float):
249
+ The numeric value to filter the metadata on.
250
+
251
+ This field is a member of `oneof`_ ``value``.
252
+ operation (google.ai.generativelanguage_v1alpha.types.Condition.Operator):
253
+ Required. Operator applied to the given
254
+ key-value pair to trigger the condition.
255
+ """
256
+
257
+ class Operator(proto.Enum):
258
+ r"""Defines the valid operators that can be applied to a
259
+ key-value pair.
260
+
261
+ Values:
262
+ OPERATOR_UNSPECIFIED (0):
263
+ The default value. This value is unused.
264
+ LESS (1):
265
+ Supported by numeric.
266
+ LESS_EQUAL (2):
267
+ Supported by numeric.
268
+ EQUAL (3):
269
+ Supported by numeric & string.
270
+ GREATER_EQUAL (4):
271
+ Supported by numeric.
272
+ GREATER (5):
273
+ Supported by numeric.
274
+ NOT_EQUAL (6):
275
+ Supported by numeric & string.
276
+ INCLUDES (7):
277
+ Supported by string only when ``CustomMetadata`` value type
278
+ for the given key has a ``string_list_value``.
279
+ EXCLUDES (8):
280
+ Supported by string only when ``CustomMetadata`` value type
281
+ for the given key has a ``string_list_value``.
282
+ """
283
+ OPERATOR_UNSPECIFIED = 0
284
+ LESS = 1
285
+ LESS_EQUAL = 2
286
+ EQUAL = 3
287
+ GREATER_EQUAL = 4
288
+ GREATER = 5
289
+ NOT_EQUAL = 6
290
+ INCLUDES = 7
291
+ EXCLUDES = 8
292
+
293
+ string_value: str = proto.Field(
294
+ proto.STRING,
295
+ number=1,
296
+ oneof="value",
297
+ )
298
+ numeric_value: float = proto.Field(
299
+ proto.FLOAT,
300
+ number=6,
301
+ oneof="value",
302
+ )
303
+ operation: Operator = proto.Field(
304
+ proto.ENUM,
305
+ number=5,
306
+ enum=Operator,
307
+ )
308
+
309
+
310
+ class Chunk(proto.Message):
311
+ r"""A ``Chunk`` is a subpart of a ``Document`` that is treated as an
312
+ independent unit for the purposes of vector representation and
313
+ storage. A ``Corpus`` can have a maximum of 1 million ``Chunk``\ s.
314
+
315
+ Attributes:
316
+ name (str):
317
+ Immutable. Identifier. The ``Chunk`` resource name. The ID
318
+ (name excluding the "corpora/*/documents/*/chunks/" prefix)
319
+ can contain up to 40 characters that are lowercase
320
+ alphanumeric or dashes (-). The ID cannot start or end with
321
+ a dash. If the name is empty on create, a random
322
+ 12-character unique ID will be generated. Example:
323
+ ``corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c``
324
+ data (google.ai.generativelanguage_v1alpha.types.ChunkData):
325
+ Required. The content for the ``Chunk``, such as the text
326
+ string. The maximum number of tokens per chunk is 2043.
327
+ custom_metadata (MutableSequence[google.ai.generativelanguage_v1alpha.types.CustomMetadata]):
328
+ Optional. User provided custom metadata stored as key-value
329
+ pairs. The maximum number of ``CustomMetadata`` per chunk is
330
+ 20.
331
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
332
+ Output only. The Timestamp of when the ``Chunk`` was
333
+ created.
334
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
335
+ Output only. The Timestamp of when the ``Chunk`` was last
336
+ updated.
337
+ state (google.ai.generativelanguage_v1alpha.types.Chunk.State):
338
+ Output only. Current state of the ``Chunk``.
339
+ """
340
+
341
+ class State(proto.Enum):
342
+ r"""States for the lifecycle of a ``Chunk``.
343
+
344
+ Values:
345
+ STATE_UNSPECIFIED (0):
346
+ The default value. This value is used if the
347
+ state is omitted.
348
+ STATE_PENDING_PROCESSING (1):
349
+ ``Chunk`` is being processed (embedding and vector storage).
350
+ STATE_ACTIVE (2):
351
+ ``Chunk`` is processed and available for querying.
352
+ STATE_FAILED (10):
353
+ ``Chunk`` failed processing.
354
+ """
355
+ STATE_UNSPECIFIED = 0
356
+ STATE_PENDING_PROCESSING = 1
357
+ STATE_ACTIVE = 2
358
+ STATE_FAILED = 10
359
+
360
+ name: str = proto.Field(
361
+ proto.STRING,
362
+ number=1,
363
+ )
364
+ data: "ChunkData" = proto.Field(
365
+ proto.MESSAGE,
366
+ number=2,
367
+ message="ChunkData",
368
+ )
369
+ custom_metadata: MutableSequence["CustomMetadata"] = proto.RepeatedField(
370
+ proto.MESSAGE,
371
+ number=3,
372
+ message="CustomMetadata",
373
+ )
374
+ create_time: timestamp_pb2.Timestamp = proto.Field(
375
+ proto.MESSAGE,
376
+ number=4,
377
+ message=timestamp_pb2.Timestamp,
378
+ )
379
+ update_time: timestamp_pb2.Timestamp = proto.Field(
380
+ proto.MESSAGE,
381
+ number=5,
382
+ message=timestamp_pb2.Timestamp,
383
+ )
384
+ state: State = proto.Field(
385
+ proto.ENUM,
386
+ number=6,
387
+ enum=State,
388
+ )
389
+
390
+
391
+ class ChunkData(proto.Message):
392
+ r"""Extracted data that represents the ``Chunk`` content.
393
+
394
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
395
+
396
+ Attributes:
397
+ string_value (str):
398
+ The ``Chunk`` content as a string. The maximum number of
399
+ tokens per chunk is 2043.
400
+
401
+ This field is a member of `oneof`_ ``data``.
402
+ """
403
+
404
+ string_value: str = proto.Field(
405
+ proto.STRING,
406
+ number=1,
407
+ oneof="data",
408
+ )
409
+
410
+
411
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/retriever_service.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import field_mask_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ from google.ai.generativelanguage_v1alpha.types import retriever
24
+
25
+ __protobuf__ = proto.module(
26
+ package="google.ai.generativelanguage.v1alpha",
27
+ manifest={
28
+ "CreateCorpusRequest",
29
+ "GetCorpusRequest",
30
+ "UpdateCorpusRequest",
31
+ "DeleteCorpusRequest",
32
+ "ListCorporaRequest",
33
+ "ListCorporaResponse",
34
+ "QueryCorpusRequest",
35
+ "QueryCorpusResponse",
36
+ "RelevantChunk",
37
+ "CreateDocumentRequest",
38
+ "GetDocumentRequest",
39
+ "UpdateDocumentRequest",
40
+ "DeleteDocumentRequest",
41
+ "ListDocumentsRequest",
42
+ "ListDocumentsResponse",
43
+ "QueryDocumentRequest",
44
+ "QueryDocumentResponse",
45
+ "CreateChunkRequest",
46
+ "BatchCreateChunksRequest",
47
+ "BatchCreateChunksResponse",
48
+ "GetChunkRequest",
49
+ "UpdateChunkRequest",
50
+ "BatchUpdateChunksRequest",
51
+ "BatchUpdateChunksResponse",
52
+ "DeleteChunkRequest",
53
+ "BatchDeleteChunksRequest",
54
+ "ListChunksRequest",
55
+ "ListChunksResponse",
56
+ },
57
+ )
58
+
59
+
60
+ class CreateCorpusRequest(proto.Message):
61
+ r"""Request to create a ``Corpus``.
62
+
63
+ Attributes:
64
+ corpus (google.ai.generativelanguage_v1alpha.types.Corpus):
65
+ Required. The ``Corpus`` to create.
66
+ """
67
+
68
+ corpus: retriever.Corpus = proto.Field(
69
+ proto.MESSAGE,
70
+ number=1,
71
+ message=retriever.Corpus,
72
+ )
73
+
74
+
75
+ class GetCorpusRequest(proto.Message):
76
+ r"""Request for getting information about a specific ``Corpus``.
77
+
78
+ Attributes:
79
+ name (str):
80
+ Required. The name of the ``Corpus``. Example:
81
+ ``corpora/my-corpus-123``
82
+ """
83
+
84
+ name: str = proto.Field(
85
+ proto.STRING,
86
+ number=1,
87
+ )
88
+
89
+
90
+ class UpdateCorpusRequest(proto.Message):
91
+ r"""Request to update a ``Corpus``.
92
+
93
+ Attributes:
94
+ corpus (google.ai.generativelanguage_v1alpha.types.Corpus):
95
+ Required. The ``Corpus`` to update.
96
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
97
+ Required. The list of fields to update. Currently, this only
98
+ supports updating ``display_name``.
99
+ """
100
+
101
+ corpus: retriever.Corpus = proto.Field(
102
+ proto.MESSAGE,
103
+ number=1,
104
+ message=retriever.Corpus,
105
+ )
106
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
107
+ proto.MESSAGE,
108
+ number=2,
109
+ message=field_mask_pb2.FieldMask,
110
+ )
111
+
112
+
113
+ class DeleteCorpusRequest(proto.Message):
114
+ r"""Request to delete a ``Corpus``.
115
+
116
+ Attributes:
117
+ name (str):
118
+ Required. The resource name of the ``Corpus``. Example:
119
+ ``corpora/my-corpus-123``
120
+ force (bool):
121
+ Optional. If set to true, any ``Document``\ s and objects
122
+ related to this ``Corpus`` will also be deleted.
123
+
124
+ If false (the default), a ``FAILED_PRECONDITION`` error will
125
+ be returned if ``Corpus`` contains any ``Document``\ s.
126
+ """
127
+
128
+ name: str = proto.Field(
129
+ proto.STRING,
130
+ number=1,
131
+ )
132
+ force: bool = proto.Field(
133
+ proto.BOOL,
134
+ number=2,
135
+ )
136
+
137
+
138
+ class ListCorporaRequest(proto.Message):
139
+ r"""Request for listing ``Corpora``.
140
+
141
+ Attributes:
142
+ page_size (int):
143
+ Optional. The maximum number of ``Corpora`` to return (per
144
+ page). The service may return fewer ``Corpora``.
145
+
146
+ If unspecified, at most 10 ``Corpora`` will be returned. The
147
+ maximum size limit is 20 ``Corpora`` per page.
148
+ page_token (str):
149
+ Optional. A page token, received from a previous
150
+ ``ListCorpora`` call.
151
+
152
+ Provide the ``next_page_token`` returned in the response as
153
+ an argument to the next request to retrieve the next page.
154
+
155
+ When paginating, all other parameters provided to
156
+ ``ListCorpora`` must match the call that provided the page
157
+ token.
158
+ """
159
+
160
+ page_size: int = proto.Field(
161
+ proto.INT32,
162
+ number=1,
163
+ )
164
+ page_token: str = proto.Field(
165
+ proto.STRING,
166
+ number=2,
167
+ )
168
+
169
+
170
+ class ListCorporaResponse(proto.Message):
171
+ r"""Response from ``ListCorpora`` containing a paginated list of
172
+ ``Corpora``. The results are sorted by ascending
173
+ ``corpus.create_time``.
174
+
175
+ Attributes:
176
+ corpora (MutableSequence[google.ai.generativelanguage_v1alpha.types.Corpus]):
177
+ The returned corpora.
178
+ next_page_token (str):
179
+ A token, which can be sent as ``page_token`` to retrieve the
180
+ next page. If this field is omitted, there are no more
181
+ pages.
182
+ """
183
+
184
+ @property
185
+ def raw_page(self):
186
+ return self
187
+
188
+ corpora: MutableSequence[retriever.Corpus] = proto.RepeatedField(
189
+ proto.MESSAGE,
190
+ number=1,
191
+ message=retriever.Corpus,
192
+ )
193
+ next_page_token: str = proto.Field(
194
+ proto.STRING,
195
+ number=2,
196
+ )
197
+
198
+
199
+ class QueryCorpusRequest(proto.Message):
200
+ r"""Request for querying a ``Corpus``.
201
+
202
+ Attributes:
203
+ name (str):
204
+ Required. The name of the ``Corpus`` to query. Example:
205
+ ``corpora/my-corpus-123``
206
+ query (str):
207
+ Required. Query string to perform semantic
208
+ search.
209
+ metadata_filters (MutableSequence[google.ai.generativelanguage_v1alpha.types.MetadataFilter]):
210
+ Optional. Filter for ``Chunk`` and ``Document`` metadata.
211
+ Each ``MetadataFilter`` object should correspond to a unique
212
+ key. Multiple ``MetadataFilter`` objects are joined by
213
+ logical "AND"s.
214
+
215
+ Example query at document level: (year >= 2020 OR year <
216
+ 2010) AND (genre = drama OR genre = action)
217
+
218
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
219
+ "document.custom_metadata.year" conditions = [{int_value =
220
+ 2020, operation = GREATER_EQUAL}, {int_value = 2010,
221
+ operation = LESS}]}, {key = "document.custom_metadata.year"
222
+ conditions = [{int_value = 2020, operation = GREATER_EQUAL},
223
+ {int_value = 2010, operation = LESS}]}, {key =
224
+ "document.custom_metadata.genre" conditions = [{string_value
225
+ = "drama", operation = EQUAL}, {string_value = "action",
226
+ operation = EQUAL}]}]
227
+
228
+ Example query at chunk level for a numeric range of values:
229
+ (year > 2015 AND year <= 2020)
230
+
231
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
232
+ "chunk.custom_metadata.year" conditions = [{int_value =
233
+ 2015, operation = GREATER}]}, {key =
234
+ "chunk.custom_metadata.year" conditions = [{int_value =
235
+ 2020, operation = LESS_EQUAL}]}]
236
+
237
+ Note: "AND"s for the same key are only supported for numeric
238
+ values. String values only support "OR"s for the same key.
239
+ results_count (int):
240
+ Optional. The maximum number of ``Chunk``\ s to return. The
241
+ service may return fewer ``Chunk``\ s.
242
+
243
+ If unspecified, at most 10 ``Chunk``\ s will be returned.
244
+ The maximum specified result count is 100.
245
+ """
246
+
247
+ name: str = proto.Field(
248
+ proto.STRING,
249
+ number=1,
250
+ )
251
+ query: str = proto.Field(
252
+ proto.STRING,
253
+ number=2,
254
+ )
255
+ metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField(
256
+ proto.MESSAGE,
257
+ number=3,
258
+ message=retriever.MetadataFilter,
259
+ )
260
+ results_count: int = proto.Field(
261
+ proto.INT32,
262
+ number=4,
263
+ )
264
+
265
+
266
+ class QueryCorpusResponse(proto.Message):
267
+ r"""Response from ``QueryCorpus`` containing a list of relevant chunks.
268
+
269
+ Attributes:
270
+ relevant_chunks (MutableSequence[google.ai.generativelanguage_v1alpha.types.RelevantChunk]):
271
+ The relevant chunks.
272
+ """
273
+
274
+ relevant_chunks: MutableSequence["RelevantChunk"] = proto.RepeatedField(
275
+ proto.MESSAGE,
276
+ number=1,
277
+ message="RelevantChunk",
278
+ )
279
+
280
+
281
+ class RelevantChunk(proto.Message):
282
+ r"""The information for a chunk relevant to a query.
283
+
284
+ Attributes:
285
+ chunk_relevance_score (float):
286
+ ``Chunk`` relevance to the query.
287
+ chunk (google.ai.generativelanguage_v1alpha.types.Chunk):
288
+ ``Chunk`` associated with the query.
289
+ """
290
+
291
+ chunk_relevance_score: float = proto.Field(
292
+ proto.FLOAT,
293
+ number=1,
294
+ )
295
+ chunk: retriever.Chunk = proto.Field(
296
+ proto.MESSAGE,
297
+ number=2,
298
+ message=retriever.Chunk,
299
+ )
300
+
301
+
302
+ class CreateDocumentRequest(proto.Message):
303
+ r"""Request to create a ``Document``.
304
+
305
+ Attributes:
306
+ parent (str):
307
+ Required. The name of the ``Corpus`` where this ``Document``
308
+ will be created. Example: ``corpora/my-corpus-123``
309
+ document (google.ai.generativelanguage_v1alpha.types.Document):
310
+ Required. The ``Document`` to create.
311
+ """
312
+
313
+ parent: str = proto.Field(
314
+ proto.STRING,
315
+ number=1,
316
+ )
317
+ document: retriever.Document = proto.Field(
318
+ proto.MESSAGE,
319
+ number=2,
320
+ message=retriever.Document,
321
+ )
322
+
323
+
324
+ class GetDocumentRequest(proto.Message):
325
+ r"""Request for getting information about a specific ``Document``.
326
+
327
+ Attributes:
328
+ name (str):
329
+ Required. The name of the ``Document`` to retrieve. Example:
330
+ ``corpora/my-corpus-123/documents/the-doc-abc``
331
+ """
332
+
333
+ name: str = proto.Field(
334
+ proto.STRING,
335
+ number=1,
336
+ )
337
+
338
+
339
+ class UpdateDocumentRequest(proto.Message):
340
+ r"""Request to update a ``Document``.
341
+
342
+ Attributes:
343
+ document (google.ai.generativelanguage_v1alpha.types.Document):
344
+ Required. The ``Document`` to update.
345
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
346
+ Required. The list of fields to update. Currently, this only
347
+ supports updating ``display_name`` and ``custom_metadata``.
348
+ """
349
+
350
+ document: retriever.Document = proto.Field(
351
+ proto.MESSAGE,
352
+ number=1,
353
+ message=retriever.Document,
354
+ )
355
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
356
+ proto.MESSAGE,
357
+ number=2,
358
+ message=field_mask_pb2.FieldMask,
359
+ )
360
+
361
+
362
+ class DeleteDocumentRequest(proto.Message):
363
+ r"""Request to delete a ``Document``.
364
+
365
+ Attributes:
366
+ name (str):
367
+ Required. The resource name of the ``Document`` to delete.
368
+ Example: ``corpora/my-corpus-123/documents/the-doc-abc``
369
+ force (bool):
370
+ Optional. If set to true, any ``Chunk``\ s and objects
371
+ related to this ``Document`` will also be deleted.
372
+
373
+ If false (the default), a ``FAILED_PRECONDITION`` error will
374
+ be returned if ``Document`` contains any ``Chunk``\ s.
375
+ """
376
+
377
+ name: str = proto.Field(
378
+ proto.STRING,
379
+ number=1,
380
+ )
381
+ force: bool = proto.Field(
382
+ proto.BOOL,
383
+ number=2,
384
+ )
385
+
386
+
387
+ class ListDocumentsRequest(proto.Message):
388
+ r"""Request for listing ``Document``\ s.
389
+
390
+ Attributes:
391
+ parent (str):
392
+ Required. The name of the ``Corpus`` containing
393
+ ``Document``\ s. Example: ``corpora/my-corpus-123``
394
+ page_size (int):
395
+ Optional. The maximum number of ``Document``\ s to return
396
+ (per page). The service may return fewer ``Document``\ s.
397
+
398
+ If unspecified, at most 10 ``Document``\ s will be returned.
399
+ The maximum size limit is 20 ``Document``\ s per page.
400
+ page_token (str):
401
+ Optional. A page token, received from a previous
402
+ ``ListDocuments`` call.
403
+
404
+ Provide the ``next_page_token`` returned in the response as
405
+ an argument to the next request to retrieve the next page.
406
+
407
+ When paginating, all other parameters provided to
408
+ ``ListDocuments`` must match the call that provided the page
409
+ token.
410
+ """
411
+
412
+ parent: str = proto.Field(
413
+ proto.STRING,
414
+ number=1,
415
+ )
416
+ page_size: int = proto.Field(
417
+ proto.INT32,
418
+ number=2,
419
+ )
420
+ page_token: str = proto.Field(
421
+ proto.STRING,
422
+ number=3,
423
+ )
424
+
425
+
426
+ class ListDocumentsResponse(proto.Message):
427
+ r"""Response from ``ListDocuments`` containing a paginated list of
428
+ ``Document``\ s. The ``Document``\ s are sorted by ascending
429
+ ``document.create_time``.
430
+
431
+ Attributes:
432
+ documents (MutableSequence[google.ai.generativelanguage_v1alpha.types.Document]):
433
+ The returned ``Document``\ s.
434
+ next_page_token (str):
435
+ A token, which can be sent as ``page_token`` to retrieve the
436
+ next page. If this field is omitted, there are no more
437
+ pages.
438
+ """
439
+
440
+ @property
441
+ def raw_page(self):
442
+ return self
443
+
444
+ documents: MutableSequence[retriever.Document] = proto.RepeatedField(
445
+ proto.MESSAGE,
446
+ number=1,
447
+ message=retriever.Document,
448
+ )
449
+ next_page_token: str = proto.Field(
450
+ proto.STRING,
451
+ number=2,
452
+ )
453
+
454
+
455
+ class QueryDocumentRequest(proto.Message):
456
+ r"""Request for querying a ``Document``.
457
+
458
+ Attributes:
459
+ name (str):
460
+ Required. The name of the ``Document`` to query. Example:
461
+ ``corpora/my-corpus-123/documents/the-doc-abc``
462
+ query (str):
463
+ Required. Query string to perform semantic
464
+ search.
465
+ results_count (int):
466
+ Optional. The maximum number of ``Chunk``\ s to return. The
467
+ service may return fewer ``Chunk``\ s.
468
+
469
+ If unspecified, at most 10 ``Chunk``\ s will be returned.
470
+ The maximum specified result count is 100.
471
+ metadata_filters (MutableSequence[google.ai.generativelanguage_v1alpha.types.MetadataFilter]):
472
+ Optional. Filter for ``Chunk`` metadata. Each
473
+ ``MetadataFilter`` object should correspond to a unique key.
474
+ Multiple ``MetadataFilter`` objects are joined by logical
475
+ "AND"s.
476
+
477
+ Note: ``Document``-level filtering is not supported for this
478
+ request because a ``Document`` name is already specified.
479
+
480
+ Example query: (year >= 2020 OR year < 2010) AND (genre =
481
+ drama OR genre = action)
482
+
483
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
484
+ "chunk.custom_metadata.year" conditions = [{int_value =
485
+ 2020, operation = GREATER_EQUAL}, {int_value = 2010,
486
+ operation = LESS}}, {key = "chunk.custom_metadata.genre"
487
+ conditions = [{string_value = "drama", operation = EQUAL},
488
+ {string_value = "action", operation = EQUAL}}]
489
+
490
+ Example query for a numeric range of values: (year > 2015
491
+ AND year <= 2020)
492
+
493
+ ``MetadataFilter`` object list: metadata_filters = [ {key =
494
+ "chunk.custom_metadata.year" conditions = [{int_value =
495
+ 2015, operation = GREATER}]}, {key =
496
+ "chunk.custom_metadata.year" conditions = [{int_value =
497
+ 2020, operation = LESS_EQUAL}]}]
498
+
499
+ Note: "AND"s for the same key are only supported for numeric
500
+ values. String values only support "OR"s for the same key.
501
+ """
502
+
503
+ name: str = proto.Field(
504
+ proto.STRING,
505
+ number=1,
506
+ )
507
+ query: str = proto.Field(
508
+ proto.STRING,
509
+ number=2,
510
+ )
511
+ results_count: int = proto.Field(
512
+ proto.INT32,
513
+ number=3,
514
+ )
515
+ metadata_filters: MutableSequence[retriever.MetadataFilter] = proto.RepeatedField(
516
+ proto.MESSAGE,
517
+ number=4,
518
+ message=retriever.MetadataFilter,
519
+ )
520
+
521
+
522
+ class QueryDocumentResponse(proto.Message):
523
+ r"""Response from ``QueryDocument`` containing a list of relevant
524
+ chunks.
525
+
526
+ Attributes:
527
+ relevant_chunks (MutableSequence[google.ai.generativelanguage_v1alpha.types.RelevantChunk]):
528
+ The returned relevant chunks.
529
+ """
530
+
531
+ relevant_chunks: MutableSequence["RelevantChunk"] = proto.RepeatedField(
532
+ proto.MESSAGE,
533
+ number=1,
534
+ message="RelevantChunk",
535
+ )
536
+
537
+
538
+ class CreateChunkRequest(proto.Message):
539
+ r"""Request to create a ``Chunk``.
540
+
541
+ Attributes:
542
+ parent (str):
543
+ Required. The name of the ``Document`` where this ``Chunk``
544
+ will be created. Example:
545
+ ``corpora/my-corpus-123/documents/the-doc-abc``
546
+ chunk (google.ai.generativelanguage_v1alpha.types.Chunk):
547
+ Required. The ``Chunk`` to create.
548
+ """
549
+
550
+ parent: str = proto.Field(
551
+ proto.STRING,
552
+ number=1,
553
+ )
554
+ chunk: retriever.Chunk = proto.Field(
555
+ proto.MESSAGE,
556
+ number=2,
557
+ message=retriever.Chunk,
558
+ )
559
+
560
+
561
+ class BatchCreateChunksRequest(proto.Message):
562
+ r"""Request to batch create ``Chunk``\ s.
563
+
564
+ Attributes:
565
+ parent (str):
566
+ Optional. The name of the ``Document`` where this batch of
567
+ ``Chunk``\ s will be created. The parent field in every
568
+ ``CreateChunkRequest`` must match this value. Example:
569
+ ``corpora/my-corpus-123/documents/the-doc-abc``
570
+ requests (MutableSequence[google.ai.generativelanguage_v1alpha.types.CreateChunkRequest]):
571
+ Required. The request messages specifying the ``Chunk``\ s
572
+ to create. A maximum of 100 ``Chunk``\ s can be created in a
573
+ batch.
574
+ """
575
+
576
+ parent: str = proto.Field(
577
+ proto.STRING,
578
+ number=1,
579
+ )
580
+ requests: MutableSequence["CreateChunkRequest"] = proto.RepeatedField(
581
+ proto.MESSAGE,
582
+ number=2,
583
+ message="CreateChunkRequest",
584
+ )
585
+
586
+
587
+ class BatchCreateChunksResponse(proto.Message):
588
+ r"""Response from ``BatchCreateChunks`` containing a list of created
589
+ ``Chunk``\ s.
590
+
591
+ Attributes:
592
+ chunks (MutableSequence[google.ai.generativelanguage_v1alpha.types.Chunk]):
593
+ ``Chunk``\ s created.
594
+ """
595
+
596
+ chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField(
597
+ proto.MESSAGE,
598
+ number=1,
599
+ message=retriever.Chunk,
600
+ )
601
+
602
+
603
+ class GetChunkRequest(proto.Message):
604
+ r"""Request for getting information about a specific ``Chunk``.
605
+
606
+ Attributes:
607
+ name (str):
608
+ Required. The name of the ``Chunk`` to retrieve. Example:
609
+ ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
610
+ """
611
+
612
+ name: str = proto.Field(
613
+ proto.STRING,
614
+ number=1,
615
+ )
616
+
617
+
618
+ class UpdateChunkRequest(proto.Message):
619
+ r"""Request to update a ``Chunk``.
620
+
621
+ Attributes:
622
+ chunk (google.ai.generativelanguage_v1alpha.types.Chunk):
623
+ Required. The ``Chunk`` to update.
624
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
625
+ Required. The list of fields to update. Currently, this only
626
+ supports updating ``custom_metadata`` and ``data``.
627
+ """
628
+
629
+ chunk: retriever.Chunk = proto.Field(
630
+ proto.MESSAGE,
631
+ number=1,
632
+ message=retriever.Chunk,
633
+ )
634
+ update_mask: field_mask_pb2.FieldMask = proto.Field(
635
+ proto.MESSAGE,
636
+ number=2,
637
+ message=field_mask_pb2.FieldMask,
638
+ )
639
+
640
+
641
+ class BatchUpdateChunksRequest(proto.Message):
642
+ r"""Request to batch update ``Chunk``\ s.
643
+
644
+ Attributes:
645
+ parent (str):
646
+ Optional. The name of the ``Document`` containing the
647
+ ``Chunk``\ s to update. The parent field in every
648
+ ``UpdateChunkRequest`` must match this value. Example:
649
+ ``corpora/my-corpus-123/documents/the-doc-abc``
650
+ requests (MutableSequence[google.ai.generativelanguage_v1alpha.types.UpdateChunkRequest]):
651
+ Required. The request messages specifying the ``Chunk``\ s
652
+ to update. A maximum of 100 ``Chunk``\ s can be updated in a
653
+ batch.
654
+ """
655
+
656
+ parent: str = proto.Field(
657
+ proto.STRING,
658
+ number=1,
659
+ )
660
+ requests: MutableSequence["UpdateChunkRequest"] = proto.RepeatedField(
661
+ proto.MESSAGE,
662
+ number=2,
663
+ message="UpdateChunkRequest",
664
+ )
665
+
666
+
667
+ class BatchUpdateChunksResponse(proto.Message):
668
+ r"""Response from ``BatchUpdateChunks`` containing a list of updated
669
+ ``Chunk``\ s.
670
+
671
+ Attributes:
672
+ chunks (MutableSequence[google.ai.generativelanguage_v1alpha.types.Chunk]):
673
+ ``Chunk``\ s updated.
674
+ """
675
+
676
+ chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField(
677
+ proto.MESSAGE,
678
+ number=1,
679
+ message=retriever.Chunk,
680
+ )
681
+
682
+
683
+ class DeleteChunkRequest(proto.Message):
684
+ r"""Request to delete a ``Chunk``.
685
+
686
+ Attributes:
687
+ name (str):
688
+ Required. The resource name of the ``Chunk`` to delete.
689
+ Example:
690
+ ``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
691
+ """
692
+
693
+ name: str = proto.Field(
694
+ proto.STRING,
695
+ number=1,
696
+ )
697
+
698
+
699
+ class BatchDeleteChunksRequest(proto.Message):
700
+ r"""Request to batch delete ``Chunk``\ s.
701
+
702
+ Attributes:
703
+ parent (str):
704
+ Optional. The name of the ``Document`` containing the
705
+ ``Chunk``\ s to delete. The parent field in every
706
+ ``DeleteChunkRequest`` must match this value. Example:
707
+ ``corpora/my-corpus-123/documents/the-doc-abc``
708
+ requests (MutableSequence[google.ai.generativelanguage_v1alpha.types.DeleteChunkRequest]):
709
+ Required. The request messages specifying the ``Chunk``\ s
710
+ to delete.
711
+ """
712
+
713
+ parent: str = proto.Field(
714
+ proto.STRING,
715
+ number=1,
716
+ )
717
+ requests: MutableSequence["DeleteChunkRequest"] = proto.RepeatedField(
718
+ proto.MESSAGE,
719
+ number=2,
720
+ message="DeleteChunkRequest",
721
+ )
722
+
723
+
724
+ class ListChunksRequest(proto.Message):
725
+ r"""Request for listing ``Chunk``\ s.
726
+
727
+ Attributes:
728
+ parent (str):
729
+ Required. The name of the ``Document`` containing
730
+ ``Chunk``\ s. Example:
731
+ ``corpora/my-corpus-123/documents/the-doc-abc``
732
+ page_size (int):
733
+ Optional. The maximum number of ``Chunk``\ s to return (per
734
+ page). The service may return fewer ``Chunk``\ s.
735
+
736
+ If unspecified, at most 10 ``Chunk``\ s will be returned.
737
+ The maximum size limit is 100 ``Chunk``\ s per page.
738
+ page_token (str):
739
+ Optional. A page token, received from a previous
740
+ ``ListChunks`` call.
741
+
742
+ Provide the ``next_page_token`` returned in the response as
743
+ an argument to the next request to retrieve the next page.
744
+
745
+ When paginating, all other parameters provided to
746
+ ``ListChunks`` must match the call that provided the page
747
+ token.
748
+ """
749
+
750
+ parent: str = proto.Field(
751
+ proto.STRING,
752
+ number=1,
753
+ )
754
+ page_size: int = proto.Field(
755
+ proto.INT32,
756
+ number=2,
757
+ )
758
+ page_token: str = proto.Field(
759
+ proto.STRING,
760
+ number=3,
761
+ )
762
+
763
+
764
+ class ListChunksResponse(proto.Message):
765
+ r"""Response from ``ListChunks`` containing a paginated list of
766
+ ``Chunk``\ s. The ``Chunk``\ s are sorted by ascending
767
+ ``chunk.create_time``.
768
+
769
+ Attributes:
770
+ chunks (MutableSequence[google.ai.generativelanguage_v1alpha.types.Chunk]):
771
+ The returned ``Chunk``\ s.
772
+ next_page_token (str):
773
+ A token, which can be sent as ``page_token`` to retrieve the
774
+ next page. If this field is omitted, there are no more
775
+ pages.
776
+ """
777
+
778
+ @property
779
+ def raw_page(self):
780
+ return self
781
+
782
+ chunks: MutableSequence[retriever.Chunk] = proto.RepeatedField(
783
+ proto.MESSAGE,
784
+ number=1,
785
+ message=retriever.Chunk,
786
+ )
787
+ next_page_token: str = proto.Field(
788
+ proto.STRING,
789
+ number=2,
790
+ )
791
+
792
+
793
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/text_service.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1alpha.types import citation, safety
23
+
24
+ __protobuf__ = proto.module(
25
+ package="google.ai.generativelanguage.v1alpha",
26
+ manifest={
27
+ "GenerateTextRequest",
28
+ "GenerateTextResponse",
29
+ "TextPrompt",
30
+ "TextCompletion",
31
+ "EmbedTextRequest",
32
+ "EmbedTextResponse",
33
+ "BatchEmbedTextRequest",
34
+ "BatchEmbedTextResponse",
35
+ "Embedding",
36
+ "CountTextTokensRequest",
37
+ "CountTextTokensResponse",
38
+ },
39
+ )
40
+
41
+
42
+ class GenerateTextRequest(proto.Message):
43
+ r"""Request to generate a text completion response from the
44
+ model.
45
+
46
+
47
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
48
+
49
+ Attributes:
50
+ model (str):
51
+ Required. The name of the ``Model`` or ``TunedModel`` to use
52
+ for generating the completion. Examples:
53
+ models/text-bison-001 tunedModels/sentence-translator-u3b7m
54
+ prompt (google.ai.generativelanguage_v1alpha.types.TextPrompt):
55
+ Required. The free-form input text given to
56
+ the model as a prompt.
57
+ Given a prompt, the model will generate a
58
+ TextCompletion response it predicts as the
59
+ completion of the input text.
60
+ temperature (float):
61
+ Optional. Controls the randomness of the output. Note: The
62
+ default value varies by model, see the ``Model.temperature``
63
+ attribute of the ``Model`` returned the ``getModel``
64
+ function.
65
+
66
+ Values can range from [0.0,1.0], inclusive. A value closer
67
+ to 1.0 will produce responses that are more varied and
68
+ creative, while a value closer to 0.0 will typically result
69
+ in more straightforward responses from the model.
70
+
71
+ This field is a member of `oneof`_ ``_temperature``.
72
+ candidate_count (int):
73
+ Optional. Number of generated responses to return.
74
+
75
+ This value must be between [1, 8], inclusive. If unset, this
76
+ will default to 1.
77
+
78
+ This field is a member of `oneof`_ ``_candidate_count``.
79
+ max_output_tokens (int):
80
+ Optional. The maximum number of tokens to include in a
81
+ candidate.
82
+
83
+ If unset, this will default to output_token_limit specified
84
+ in the ``Model`` specification.
85
+
86
+ This field is a member of `oneof`_ ``_max_output_tokens``.
87
+ top_p (float):
88
+ Optional. The maximum cumulative probability of tokens to
89
+ consider when sampling.
90
+
91
+ The model uses combined Top-k and nucleus sampling.
92
+
93
+ Tokens are sorted based on their assigned probabilities so
94
+ that only the most likely tokens are considered. Top-k
95
+ sampling directly limits the maximum number of tokens to
96
+ consider, while Nucleus sampling limits number of tokens
97
+ based on the cumulative probability.
98
+
99
+ Note: The default value varies by model, see the
100
+ ``Model.top_p`` attribute of the ``Model`` returned the
101
+ ``getModel`` function.
102
+
103
+ This field is a member of `oneof`_ ``_top_p``.
104
+ top_k (int):
105
+ Optional. The maximum number of tokens to consider when
106
+ sampling.
107
+
108
+ The model uses combined Top-k and nucleus sampling.
109
+
110
+ Top-k sampling considers the set of ``top_k`` most probable
111
+ tokens. Defaults to 40.
112
+
113
+ Note: The default value varies by model, see the
114
+ ``Model.top_k`` attribute of the ``Model`` returned the
115
+ ``getModel`` function.
116
+
117
+ This field is a member of `oneof`_ ``_top_k``.
118
+ safety_settings (MutableSequence[google.ai.generativelanguage_v1alpha.types.SafetySetting]):
119
+ Optional. A list of unique ``SafetySetting`` instances for
120
+ blocking unsafe content.
121
+
122
+ that will be enforced on the ``GenerateTextRequest.prompt``
123
+ and ``GenerateTextResponse.candidates``. There should not be
124
+ more than one setting for each ``SafetyCategory`` type. The
125
+ API will block any prompts and responses that fail to meet
126
+ the thresholds set by these settings. This list overrides
127
+ the default settings for each ``SafetyCategory`` specified
128
+ in the safety_settings. If there is no ``SafetySetting`` for
129
+ a given ``SafetyCategory`` provided in the list, the API
130
+ will use the default safety setting for that category. Harm
131
+ categories HARM_CATEGORY_DEROGATORY, HARM_CATEGORY_TOXICITY,
132
+ HARM_CATEGORY_VIOLENCE, HARM_CATEGORY_SEXUAL,
133
+ HARM_CATEGORY_MEDICAL, HARM_CATEGORY_DANGEROUS are supported
134
+ in text service.
135
+ stop_sequences (MutableSequence[str]):
136
+ The set of character sequences (up to 5) that
137
+ will stop output generation. If specified, the
138
+ API will stop at the first appearance of a stop
139
+ sequence. The stop sequence will not be included
140
+ as part of the response.
141
+ """
142
+
143
+ model: str = proto.Field(
144
+ proto.STRING,
145
+ number=1,
146
+ )
147
+ prompt: "TextPrompt" = proto.Field(
148
+ proto.MESSAGE,
149
+ number=2,
150
+ message="TextPrompt",
151
+ )
152
+ temperature: float = proto.Field(
153
+ proto.FLOAT,
154
+ number=3,
155
+ optional=True,
156
+ )
157
+ candidate_count: int = proto.Field(
158
+ proto.INT32,
159
+ number=4,
160
+ optional=True,
161
+ )
162
+ max_output_tokens: int = proto.Field(
163
+ proto.INT32,
164
+ number=5,
165
+ optional=True,
166
+ )
167
+ top_p: float = proto.Field(
168
+ proto.FLOAT,
169
+ number=6,
170
+ optional=True,
171
+ )
172
+ top_k: int = proto.Field(
173
+ proto.INT32,
174
+ number=7,
175
+ optional=True,
176
+ )
177
+ safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField(
178
+ proto.MESSAGE,
179
+ number=8,
180
+ message=safety.SafetySetting,
181
+ )
182
+ stop_sequences: MutableSequence[str] = proto.RepeatedField(
183
+ proto.STRING,
184
+ number=9,
185
+ )
186
+
187
+
188
+ class GenerateTextResponse(proto.Message):
189
+ r"""The response from the model, including candidate completions.
190
+
191
+ Attributes:
192
+ candidates (MutableSequence[google.ai.generativelanguage_v1alpha.types.TextCompletion]):
193
+ Candidate responses from the model.
194
+ filters (MutableSequence[google.ai.generativelanguage_v1alpha.types.ContentFilter]):
195
+ A set of content filtering metadata for the prompt and
196
+ response text.
197
+
198
+ This indicates which ``SafetyCategory``\ (s) blocked a
199
+ candidate from this response, the lowest ``HarmProbability``
200
+ that triggered a block, and the HarmThreshold setting for
201
+ that category. This indicates the smallest change to the
202
+ ``SafetySettings`` that would be necessary to unblock at
203
+ least 1 response.
204
+
205
+ The blocking is configured by the ``SafetySettings`` in the
206
+ request (or the default ``SafetySettings`` of the API).
207
+ safety_feedback (MutableSequence[google.ai.generativelanguage_v1alpha.types.SafetyFeedback]):
208
+ Returns any safety feedback related to
209
+ content filtering.
210
+ """
211
+
212
+ candidates: MutableSequence["TextCompletion"] = proto.RepeatedField(
213
+ proto.MESSAGE,
214
+ number=1,
215
+ message="TextCompletion",
216
+ )
217
+ filters: MutableSequence[safety.ContentFilter] = proto.RepeatedField(
218
+ proto.MESSAGE,
219
+ number=3,
220
+ message=safety.ContentFilter,
221
+ )
222
+ safety_feedback: MutableSequence[safety.SafetyFeedback] = proto.RepeatedField(
223
+ proto.MESSAGE,
224
+ number=4,
225
+ message=safety.SafetyFeedback,
226
+ )
227
+
228
+
229
+ class TextPrompt(proto.Message):
230
+ r"""Text given to the model as a prompt.
231
+
232
+ The Model will use this TextPrompt to Generate a text
233
+ completion.
234
+
235
+ Attributes:
236
+ text (str):
237
+ Required. The prompt text.
238
+ """
239
+
240
+ text: str = proto.Field(
241
+ proto.STRING,
242
+ number=1,
243
+ )
244
+
245
+
246
+ class TextCompletion(proto.Message):
247
+ r"""Output text returned from a model.
248
+
249
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
250
+
251
+ Attributes:
252
+ output (str):
253
+ Output only. The generated text returned from
254
+ the model.
255
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1alpha.types.SafetyRating]):
256
+ Ratings for the safety of a response.
257
+
258
+ There is at most one rating per category.
259
+ citation_metadata (google.ai.generativelanguage_v1alpha.types.CitationMetadata):
260
+ Output only. Citation information for model-generated
261
+ ``output`` in this ``TextCompletion``.
262
+
263
+ This field may be populated with attribution information for
264
+ any text included in the ``output``.
265
+
266
+ This field is a member of `oneof`_ ``_citation_metadata``.
267
+ """
268
+
269
+ output: str = proto.Field(
270
+ proto.STRING,
271
+ number=1,
272
+ )
273
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
274
+ proto.MESSAGE,
275
+ number=2,
276
+ message=safety.SafetyRating,
277
+ )
278
+ citation_metadata: citation.CitationMetadata = proto.Field(
279
+ proto.MESSAGE,
280
+ number=3,
281
+ optional=True,
282
+ message=citation.CitationMetadata,
283
+ )
284
+
285
+
286
+ class EmbedTextRequest(proto.Message):
287
+ r"""Request to get a text embedding from the model.
288
+
289
+ Attributes:
290
+ model (str):
291
+ Required. The model name to use with the
292
+ format model=models/{model}.
293
+ text (str):
294
+ Optional. The free-form input text that the
295
+ model will turn into an embedding.
296
+ """
297
+
298
+ model: str = proto.Field(
299
+ proto.STRING,
300
+ number=1,
301
+ )
302
+ text: str = proto.Field(
303
+ proto.STRING,
304
+ number=2,
305
+ )
306
+
307
+
308
+ class EmbedTextResponse(proto.Message):
309
+ r"""The response to a EmbedTextRequest.
310
+
311
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
312
+
313
+ Attributes:
314
+ embedding (google.ai.generativelanguage_v1alpha.types.Embedding):
315
+ Output only. The embedding generated from the
316
+ input text.
317
+
318
+ This field is a member of `oneof`_ ``_embedding``.
319
+ """
320
+
321
+ embedding: "Embedding" = proto.Field(
322
+ proto.MESSAGE,
323
+ number=1,
324
+ optional=True,
325
+ message="Embedding",
326
+ )
327
+
328
+
329
+ class BatchEmbedTextRequest(proto.Message):
330
+ r"""Batch request to get a text embedding from the model.
331
+
332
+ Attributes:
333
+ model (str):
334
+ Required. The name of the ``Model`` to use for generating
335
+ the embedding. Examples: models/embedding-gecko-001
336
+ texts (MutableSequence[str]):
337
+ Optional. The free-form input texts that the
338
+ model will turn into an embedding. The current
339
+ limit is 100 texts, over which an error will be
340
+ thrown.
341
+ requests (MutableSequence[google.ai.generativelanguage_v1alpha.types.EmbedTextRequest]):
342
+ Optional. Embed requests for the batch. Only one of
343
+ ``texts`` or ``requests`` can be set.
344
+ """
345
+
346
+ model: str = proto.Field(
347
+ proto.STRING,
348
+ number=1,
349
+ )
350
+ texts: MutableSequence[str] = proto.RepeatedField(
351
+ proto.STRING,
352
+ number=2,
353
+ )
354
+ requests: MutableSequence["EmbedTextRequest"] = proto.RepeatedField(
355
+ proto.MESSAGE,
356
+ number=3,
357
+ message="EmbedTextRequest",
358
+ )
359
+
360
+
361
+ class BatchEmbedTextResponse(proto.Message):
362
+ r"""The response to a EmbedTextRequest.
363
+
364
+ Attributes:
365
+ embeddings (MutableSequence[google.ai.generativelanguage_v1alpha.types.Embedding]):
366
+ Output only. The embeddings generated from
367
+ the input text.
368
+ """
369
+
370
+ embeddings: MutableSequence["Embedding"] = proto.RepeatedField(
371
+ proto.MESSAGE,
372
+ number=1,
373
+ message="Embedding",
374
+ )
375
+
376
+
377
+ class Embedding(proto.Message):
378
+ r"""A list of floats representing the embedding.
379
+
380
+ Attributes:
381
+ value (MutableSequence[float]):
382
+ The embedding values.
383
+ """
384
+
385
+ value: MutableSequence[float] = proto.RepeatedField(
386
+ proto.FLOAT,
387
+ number=1,
388
+ )
389
+
390
+
391
+ class CountTextTokensRequest(proto.Message):
392
+ r"""Counts the number of tokens in the ``prompt`` sent to a model.
393
+
394
+ Models may tokenize text differently, so each model may return a
395
+ different ``token_count``.
396
+
397
+ Attributes:
398
+ model (str):
399
+ Required. The model's resource name. This serves as an ID
400
+ for the Model to use.
401
+
402
+ This name should match a model name returned by the
403
+ ``ListModels`` method.
404
+
405
+ Format: ``models/{model}``
406
+ prompt (google.ai.generativelanguage_v1alpha.types.TextPrompt):
407
+ Required. The free-form input text given to
408
+ the model as a prompt.
409
+ """
410
+
411
+ model: str = proto.Field(
412
+ proto.STRING,
413
+ number=1,
414
+ )
415
+ prompt: "TextPrompt" = proto.Field(
416
+ proto.MESSAGE,
417
+ number=2,
418
+ message="TextPrompt",
419
+ )
420
+
421
+
422
+ class CountTextTokensResponse(proto.Message):
423
+ r"""A response from ``CountTextTokens``.
424
+
425
+ It returns the model's ``token_count`` for the ``prompt``.
426
+
427
+ Attributes:
428
+ token_count (int):
429
+ The number of tokens that the ``model`` tokenizes the
430
+ ``prompt`` into.
431
+
432
+ Always non-negative.
433
+ """
434
+
435
+ token_count: int = proto.Field(
436
+ proto.INT32,
437
+ number=1,
438
+ )
439
+
440
+
441
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/types/tuned_model.py ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ from google.protobuf import timestamp_pb2 # type: ignore
21
+ import proto # type: ignore
22
+
23
+ __protobuf__ = proto.module(
24
+ package="google.ai.generativelanguage.v1alpha",
25
+ manifest={
26
+ "TunedModel",
27
+ "TunedModelSource",
28
+ "TuningTask",
29
+ "Hyperparameters",
30
+ "Dataset",
31
+ "TuningExamples",
32
+ "TuningPart",
33
+ "TuningContent",
34
+ "TuningMultiturnExample",
35
+ "TuningExample",
36
+ "TuningSnapshot",
37
+ },
38
+ )
39
+
40
+
41
+ class TunedModel(proto.Message):
42
+ r"""A fine-tuned model created using
43
+ ModelService.CreateTunedModel.
44
+
45
+ This message has `oneof`_ fields (mutually exclusive fields).
46
+ For each oneof, at most one member field can be set at the same time.
47
+ Setting any member of the oneof automatically clears all other
48
+ members.
49
+
50
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
51
+
52
+ Attributes:
53
+ tuned_model_source (google.ai.generativelanguage_v1alpha.types.TunedModelSource):
54
+ Optional. TunedModel to use as the starting
55
+ point for training the new model.
56
+
57
+ This field is a member of `oneof`_ ``source_model``.
58
+ base_model (str):
59
+ Immutable. The name of the ``Model`` to tune. Example:
60
+ ``models/gemini-1.5-flash-001``
61
+
62
+ This field is a member of `oneof`_ ``source_model``.
63
+ name (str):
64
+ Output only. The tuned model name. A unique name will be
65
+ generated on create. Example: ``tunedModels/az2mb0bpw6i`` If
66
+ display_name is set on create, the id portion of the name
67
+ will be set by concatenating the words of the display_name
68
+ with hyphens and adding a random portion for uniqueness.
69
+
70
+ Example:
71
+
72
+ - display_name = ``Sentence Translator``
73
+ - name = ``tunedModels/sentence-translator-u3b7m``
74
+ display_name (str):
75
+ Optional. The name to display for this model
76
+ in user interfaces. The display name must be up
77
+ to 40 characters including spaces.
78
+ description (str):
79
+ Optional. A short description of this model.
80
+ temperature (float):
81
+ Optional. Controls the randomness of the output.
82
+
83
+ Values can range over ``[0.0,1.0]``, inclusive. A value
84
+ closer to ``1.0`` will produce responses that are more
85
+ varied, while a value closer to ``0.0`` will typically
86
+ result in less surprising responses from the model.
87
+
88
+ This value specifies default to be the one used by the base
89
+ model while creating the model.
90
+
91
+ This field is a member of `oneof`_ ``_temperature``.
92
+ top_p (float):
93
+ Optional. For Nucleus sampling.
94
+
95
+ Nucleus sampling considers the smallest set of tokens whose
96
+ probability sum is at least ``top_p``.
97
+
98
+ This value specifies default to be the one used by the base
99
+ model while creating the model.
100
+
101
+ This field is a member of `oneof`_ ``_top_p``.
102
+ top_k (int):
103
+ Optional. For Top-k sampling.
104
+
105
+ Top-k sampling considers the set of ``top_k`` most probable
106
+ tokens. This value specifies default to be used by the
107
+ backend while making the call to the model.
108
+
109
+ This value specifies default to be the one used by the base
110
+ model while creating the model.
111
+
112
+ This field is a member of `oneof`_ ``_top_k``.
113
+ state (google.ai.generativelanguage_v1alpha.types.TunedModel.State):
114
+ Output only. The state of the tuned model.
115
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
116
+ Output only. The timestamp when this model
117
+ was created.
118
+ update_time (google.protobuf.timestamp_pb2.Timestamp):
119
+ Output only. The timestamp when this model
120
+ was updated.
121
+ tuning_task (google.ai.generativelanguage_v1alpha.types.TuningTask):
122
+ Required. The tuning task that creates the
123
+ tuned model.
124
+ reader_project_numbers (MutableSequence[int]):
125
+ Optional. List of project numbers that have
126
+ read access to the tuned model.
127
+ """
128
+
129
+ class State(proto.Enum):
130
+ r"""The state of the tuned model.
131
+
132
+ Values:
133
+ STATE_UNSPECIFIED (0):
134
+ The default value. This value is unused.
135
+ CREATING (1):
136
+ The model is being created.
137
+ ACTIVE (2):
138
+ The model is ready to be used.
139
+ FAILED (3):
140
+ The model failed to be created.
141
+ """
142
+ STATE_UNSPECIFIED = 0
143
+ CREATING = 1
144
+ ACTIVE = 2
145
+ FAILED = 3
146
+
147
+ tuned_model_source: "TunedModelSource" = proto.Field(
148
+ proto.MESSAGE,
149
+ number=3,
150
+ oneof="source_model",
151
+ message="TunedModelSource",
152
+ )
153
+ base_model: str = proto.Field(
154
+ proto.STRING,
155
+ number=4,
156
+ oneof="source_model",
157
+ )
158
+ name: str = proto.Field(
159
+ proto.STRING,
160
+ number=1,
161
+ )
162
+ display_name: str = proto.Field(
163
+ proto.STRING,
164
+ number=5,
165
+ )
166
+ description: str = proto.Field(
167
+ proto.STRING,
168
+ number=6,
169
+ )
170
+ temperature: float = proto.Field(
171
+ proto.FLOAT,
172
+ number=11,
173
+ optional=True,
174
+ )
175
+ top_p: float = proto.Field(
176
+ proto.FLOAT,
177
+ number=12,
178
+ optional=True,
179
+ )
180
+ top_k: int = proto.Field(
181
+ proto.INT32,
182
+ number=13,
183
+ optional=True,
184
+ )
185
+ state: State = proto.Field(
186
+ proto.ENUM,
187
+ number=7,
188
+ enum=State,
189
+ )
190
+ create_time: timestamp_pb2.Timestamp = proto.Field(
191
+ proto.MESSAGE,
192
+ number=8,
193
+ message=timestamp_pb2.Timestamp,
194
+ )
195
+ update_time: timestamp_pb2.Timestamp = proto.Field(
196
+ proto.MESSAGE,
197
+ number=9,
198
+ message=timestamp_pb2.Timestamp,
199
+ )
200
+ tuning_task: "TuningTask" = proto.Field(
201
+ proto.MESSAGE,
202
+ number=10,
203
+ message="TuningTask",
204
+ )
205
+ reader_project_numbers: MutableSequence[int] = proto.RepeatedField(
206
+ proto.INT64,
207
+ number=14,
208
+ )
209
+
210
+
211
+ class TunedModelSource(proto.Message):
212
+ r"""Tuned model as a source for training a new model.
213
+
214
+ Attributes:
215
+ tuned_model (str):
216
+ Immutable. The name of the ``TunedModel`` to use as the
217
+ starting point for training the new model. Example:
218
+ ``tunedModels/my-tuned-model``
219
+ base_model (str):
220
+ Output only. The name of the base ``Model`` this
221
+ ``TunedModel`` was tuned from. Example:
222
+ ``models/gemini-1.5-flash-001``
223
+ """
224
+
225
+ tuned_model: str = proto.Field(
226
+ proto.STRING,
227
+ number=1,
228
+ )
229
+ base_model: str = proto.Field(
230
+ proto.STRING,
231
+ number=2,
232
+ )
233
+
234
+
235
+ class TuningTask(proto.Message):
236
+ r"""Tuning tasks that create tuned models.
237
+
238
+ Attributes:
239
+ start_time (google.protobuf.timestamp_pb2.Timestamp):
240
+ Output only. The timestamp when tuning this
241
+ model started.
242
+ complete_time (google.protobuf.timestamp_pb2.Timestamp):
243
+ Output only. The timestamp when tuning this
244
+ model completed.
245
+ snapshots (MutableSequence[google.ai.generativelanguage_v1alpha.types.TuningSnapshot]):
246
+ Output only. Metrics collected during tuning.
247
+ training_data (google.ai.generativelanguage_v1alpha.types.Dataset):
248
+ Required. Input only. Immutable. The model
249
+ training data.
250
+ hyperparameters (google.ai.generativelanguage_v1alpha.types.Hyperparameters):
251
+ Immutable. Hyperparameters controlling the
252
+ tuning process. If not provided, default values
253
+ will be used.
254
+ """
255
+
256
+ start_time: timestamp_pb2.Timestamp = proto.Field(
257
+ proto.MESSAGE,
258
+ number=1,
259
+ message=timestamp_pb2.Timestamp,
260
+ )
261
+ complete_time: timestamp_pb2.Timestamp = proto.Field(
262
+ proto.MESSAGE,
263
+ number=2,
264
+ message=timestamp_pb2.Timestamp,
265
+ )
266
+ snapshots: MutableSequence["TuningSnapshot"] = proto.RepeatedField(
267
+ proto.MESSAGE,
268
+ number=3,
269
+ message="TuningSnapshot",
270
+ )
271
+ training_data: "Dataset" = proto.Field(
272
+ proto.MESSAGE,
273
+ number=4,
274
+ message="Dataset",
275
+ )
276
+ hyperparameters: "Hyperparameters" = proto.Field(
277
+ proto.MESSAGE,
278
+ number=5,
279
+ message="Hyperparameters",
280
+ )
281
+
282
+
283
+ class Hyperparameters(proto.Message):
284
+ r"""Hyperparameters controlling the tuning process. Read more at
285
+ https://ai.google.dev/docs/model_tuning_guidance
286
+
287
+ This message has `oneof`_ fields (mutually exclusive fields).
288
+ For each oneof, at most one member field can be set at the same time.
289
+ Setting any member of the oneof automatically clears all other
290
+ members.
291
+
292
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
293
+
294
+ Attributes:
295
+ learning_rate (float):
296
+ Optional. Immutable. The learning rate
297
+ hyperparameter for tuning. If not set, a default
298
+ of 0.001 or 0.0002 will be calculated based on
299
+ the number of training examples.
300
+
301
+ This field is a member of `oneof`_ ``learning_rate_option``.
302
+ learning_rate_multiplier (float):
303
+ Optional. Immutable. The learning rate multiplier is used to
304
+ calculate a final learning_rate based on the default
305
+ (recommended) value. Actual learning rate :=
306
+ learning_rate_multiplier \* default learning rate Default
307
+ learning rate is dependent on base model and dataset size.
308
+ If not set, a default of 1.0 will be used.
309
+
310
+ This field is a member of `oneof`_ ``learning_rate_option``.
311
+ epoch_count (int):
312
+ Immutable. The number of training epochs. An
313
+ epoch is one pass through the training data. If
314
+ not set, a default of 5 will be used.
315
+
316
+ This field is a member of `oneof`_ ``_epoch_count``.
317
+ batch_size (int):
318
+ Immutable. The batch size hyperparameter for
319
+ tuning. If not set, a default of 4 or 16 will be
320
+ used based on the number of training examples.
321
+
322
+ This field is a member of `oneof`_ ``_batch_size``.
323
+ """
324
+
325
+ learning_rate: float = proto.Field(
326
+ proto.FLOAT,
327
+ number=16,
328
+ oneof="learning_rate_option",
329
+ )
330
+ learning_rate_multiplier: float = proto.Field(
331
+ proto.FLOAT,
332
+ number=17,
333
+ oneof="learning_rate_option",
334
+ )
335
+ epoch_count: int = proto.Field(
336
+ proto.INT32,
337
+ number=14,
338
+ optional=True,
339
+ )
340
+ batch_size: int = proto.Field(
341
+ proto.INT32,
342
+ number=15,
343
+ optional=True,
344
+ )
345
+
346
+
347
+ class Dataset(proto.Message):
348
+ r"""Dataset for training or validation.
349
+
350
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
351
+
352
+ Attributes:
353
+ examples (google.ai.generativelanguage_v1alpha.types.TuningExamples):
354
+ Optional. Inline examples with simple
355
+ input/output text.
356
+
357
+ This field is a member of `oneof`_ ``dataset``.
358
+ """
359
+
360
+ examples: "TuningExamples" = proto.Field(
361
+ proto.MESSAGE,
362
+ number=1,
363
+ oneof="dataset",
364
+ message="TuningExamples",
365
+ )
366
+
367
+
368
+ class TuningExamples(proto.Message):
369
+ r"""A set of tuning examples. Can be training or validation data.
370
+
371
+ Attributes:
372
+ examples (MutableSequence[google.ai.generativelanguage_v1alpha.types.TuningExample]):
373
+ The examples. Example input can be for text
374
+ or discuss, but all examples in a set must be of
375
+ the same type.
376
+ multiturn_examples (MutableSequence[google.ai.generativelanguage_v1alpha.types.TuningMultiturnExample]):
377
+ Content examples. For multiturn
378
+ conversations.
379
+ """
380
+
381
+ examples: MutableSequence["TuningExample"] = proto.RepeatedField(
382
+ proto.MESSAGE,
383
+ number=1,
384
+ message="TuningExample",
385
+ )
386
+ multiturn_examples: MutableSequence["TuningMultiturnExample"] = proto.RepeatedField(
387
+ proto.MESSAGE,
388
+ number=2,
389
+ message="TuningMultiturnExample",
390
+ )
391
+
392
+
393
+ class TuningPart(proto.Message):
394
+ r"""A datatype containing data that is part of a multi-part
395
+ ``TuningContent`` message.
396
+
397
+ This is a subset of the Part used for model inference, with limited
398
+ type support.
399
+
400
+ A ``Part`` consists of data which has an associated datatype. A
401
+ ``Part`` can only contain one of the accepted types in
402
+ ``Part.data``.
403
+
404
+
405
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
406
+
407
+ Attributes:
408
+ text (str):
409
+ Inline text.
410
+
411
+ This field is a member of `oneof`_ ``data``.
412
+ """
413
+
414
+ text: str = proto.Field(
415
+ proto.STRING,
416
+ number=2,
417
+ oneof="data",
418
+ )
419
+
420
+
421
+ class TuningContent(proto.Message):
422
+ r"""The structured datatype containing multi-part content of an example
423
+ message.
424
+
425
+ This is a subset of the Content proto used during model inference
426
+ with limited type support. A ``Content`` includes a ``role`` field
427
+ designating the producer of the ``Content`` and a ``parts`` field
428
+ containing multi-part data that contains the content of the message
429
+ turn.
430
+
431
+ Attributes:
432
+ parts (MutableSequence[google.ai.generativelanguage_v1alpha.types.TuningPart]):
433
+ Ordered ``Parts`` that constitute a single message. Parts
434
+ may have different MIME types.
435
+ role (str):
436
+ Optional. The producer of the content. Must
437
+ be either 'user' or 'model'.
438
+ Useful to set for multi-turn conversations,
439
+ otherwise can be left blank or unset.
440
+ """
441
+
442
+ parts: MutableSequence["TuningPart"] = proto.RepeatedField(
443
+ proto.MESSAGE,
444
+ number=1,
445
+ message="TuningPart",
446
+ )
447
+ role: str = proto.Field(
448
+ proto.STRING,
449
+ number=2,
450
+ )
451
+
452
+
453
+ class TuningMultiturnExample(proto.Message):
454
+ r"""A tuning example with multiturn input.
455
+
456
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
457
+
458
+ Attributes:
459
+ system_instruction (google.ai.generativelanguage_v1alpha.types.TuningContent):
460
+ Optional. Developer set system instructions.
461
+ Currently, text only.
462
+
463
+ This field is a member of `oneof`_ ``_system_instruction``.
464
+ contents (MutableSequence[google.ai.generativelanguage_v1alpha.types.TuningContent]):
465
+ Each Content represents a turn in the
466
+ conversation.
467
+ """
468
+
469
+ system_instruction: "TuningContent" = proto.Field(
470
+ proto.MESSAGE,
471
+ number=8,
472
+ optional=True,
473
+ message="TuningContent",
474
+ )
475
+ contents: MutableSequence["TuningContent"] = proto.RepeatedField(
476
+ proto.MESSAGE,
477
+ number=1,
478
+ message="TuningContent",
479
+ )
480
+
481
+
482
+ class TuningExample(proto.Message):
483
+ r"""A single example for tuning.
484
+
485
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
486
+
487
+ Attributes:
488
+ text_input (str):
489
+ Optional. Text model input.
490
+
491
+ This field is a member of `oneof`_ ``model_input``.
492
+ output (str):
493
+ Required. The expected model output.
494
+ """
495
+
496
+ text_input: str = proto.Field(
497
+ proto.STRING,
498
+ number=1,
499
+ oneof="model_input",
500
+ )
501
+ output: str = proto.Field(
502
+ proto.STRING,
503
+ number=3,
504
+ )
505
+
506
+
507
+ class TuningSnapshot(proto.Message):
508
+ r"""Record for a single tuning step.
509
+
510
+ Attributes:
511
+ step (int):
512
+ Output only. The tuning step.
513
+ epoch (int):
514
+ Output only. The epoch this step was part of.
515
+ mean_loss (float):
516
+ Output only. The mean loss of the training
517
+ examples for this step.
518
+ compute_time (google.protobuf.timestamp_pb2.Timestamp):
519
+ Output only. The timestamp when this metric
520
+ was computed.
521
+ """
522
+
523
+ step: int = proto.Field(
524
+ proto.INT32,
525
+ number=1,
526
+ )
527
+ epoch: int = proto.Field(
528
+ proto.INT32,
529
+ number=2,
530
+ )
531
+ mean_loss: float = proto.Field(
532
+ proto.FLOAT,
533
+ number=3,
534
+ )
535
+ compute_time: timestamp_pb2.Timestamp = proto.Field(
536
+ proto.MESSAGE,
537
+ number=4,
538
+ message=timestamp_pb2.Timestamp,
539
+ )
540
+
541
+
542
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (3.64 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/__pycache__/gapic_version.cpython-311.pyc ADDED
Binary file (237 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (218 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .async_client import DiscussServiceAsyncClient
17
+ from .client import DiscussServiceClient
18
+
19
+ __all__ = (
20
+ "DiscussServiceClient",
21
+ "DiscussServiceAsyncClient",
22
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (409 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (26.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (43.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/async_client.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.longrunning import operations_pb2 # type: ignore
47
+
48
+ from google.ai.generativelanguage_v1beta3.types import discuss_service, safety
49
+
50
+ from .client import DiscussServiceClient
51
+ from .transports.base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
52
+ from .transports.grpc_asyncio import DiscussServiceGrpcAsyncIOTransport
53
+
54
+ try:
55
+ from google.api_core import client_logging # type: ignore
56
+
57
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
58
+ except ImportError: # pragma: NO COVER
59
+ CLIENT_LOGGING_SUPPORTED = False
60
+
61
+ _LOGGER = std_logging.getLogger(__name__)
62
+
63
+
64
+ class DiscussServiceAsyncClient:
65
+ """An API for using Generative Language Models (GLMs) in dialog
66
+ applications.
67
+ Also known as large language models (LLMs), this API provides
68
+ models that are trained for multi-turn dialog.
69
+ """
70
+
71
+ _client: DiscussServiceClient
72
+
73
+ # Copy defaults from the synchronous client for use here.
74
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
75
+ DEFAULT_ENDPOINT = DiscussServiceClient.DEFAULT_ENDPOINT
76
+ DEFAULT_MTLS_ENDPOINT = DiscussServiceClient.DEFAULT_MTLS_ENDPOINT
77
+ _DEFAULT_ENDPOINT_TEMPLATE = DiscussServiceClient._DEFAULT_ENDPOINT_TEMPLATE
78
+ _DEFAULT_UNIVERSE = DiscussServiceClient._DEFAULT_UNIVERSE
79
+
80
+ model_path = staticmethod(DiscussServiceClient.model_path)
81
+ parse_model_path = staticmethod(DiscussServiceClient.parse_model_path)
82
+ common_billing_account_path = staticmethod(
83
+ DiscussServiceClient.common_billing_account_path
84
+ )
85
+ parse_common_billing_account_path = staticmethod(
86
+ DiscussServiceClient.parse_common_billing_account_path
87
+ )
88
+ common_folder_path = staticmethod(DiscussServiceClient.common_folder_path)
89
+ parse_common_folder_path = staticmethod(
90
+ DiscussServiceClient.parse_common_folder_path
91
+ )
92
+ common_organization_path = staticmethod(
93
+ DiscussServiceClient.common_organization_path
94
+ )
95
+ parse_common_organization_path = staticmethod(
96
+ DiscussServiceClient.parse_common_organization_path
97
+ )
98
+ common_project_path = staticmethod(DiscussServiceClient.common_project_path)
99
+ parse_common_project_path = staticmethod(
100
+ DiscussServiceClient.parse_common_project_path
101
+ )
102
+ common_location_path = staticmethod(DiscussServiceClient.common_location_path)
103
+ parse_common_location_path = staticmethod(
104
+ DiscussServiceClient.parse_common_location_path
105
+ )
106
+
107
+ @classmethod
108
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
109
+ """Creates an instance of this client using the provided credentials
110
+ info.
111
+
112
+ Args:
113
+ info (dict): The service account private key info.
114
+ args: Additional arguments to pass to the constructor.
115
+ kwargs: Additional arguments to pass to the constructor.
116
+
117
+ Returns:
118
+ DiscussServiceAsyncClient: The constructed client.
119
+ """
120
+ return DiscussServiceClient.from_service_account_info.__func__(DiscussServiceAsyncClient, info, *args, **kwargs) # type: ignore
121
+
122
+ @classmethod
123
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
124
+ """Creates an instance of this client using the provided credentials
125
+ file.
126
+
127
+ Args:
128
+ filename (str): The path to the service account private key json
129
+ file.
130
+ args: Additional arguments to pass to the constructor.
131
+ kwargs: Additional arguments to pass to the constructor.
132
+
133
+ Returns:
134
+ DiscussServiceAsyncClient: The constructed client.
135
+ """
136
+ return DiscussServiceClient.from_service_account_file.__func__(DiscussServiceAsyncClient, filename, *args, **kwargs) # type: ignore
137
+
138
+ from_service_account_json = from_service_account_file
139
+
140
+ @classmethod
141
+ def get_mtls_endpoint_and_cert_source(
142
+ cls, client_options: Optional[ClientOptions] = None
143
+ ):
144
+ """Return the API endpoint and client cert source for mutual TLS.
145
+
146
+ The client cert source is determined in the following order:
147
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
148
+ client cert source is None.
149
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
150
+ default client cert source exists, use the default one; otherwise the client cert
151
+ source is None.
152
+
153
+ The API endpoint is determined in the following order:
154
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
155
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
156
+ default mTLS endpoint; if the environment variable is "never", use the default API
157
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
158
+ use the default API endpoint.
159
+
160
+ More details can be found at https://google.aip.dev/auth/4114.
161
+
162
+ Args:
163
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
164
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
165
+ in this method.
166
+
167
+ Returns:
168
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
169
+ client cert source to use.
170
+
171
+ Raises:
172
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
173
+ """
174
+ return DiscussServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
175
+
176
+ @property
177
+ def transport(self) -> DiscussServiceTransport:
178
+ """Returns the transport used by the client instance.
179
+
180
+ Returns:
181
+ DiscussServiceTransport: The transport used by the client instance.
182
+ """
183
+ return self._client.transport
184
+
185
+ @property
186
+ def api_endpoint(self):
187
+ """Return the API endpoint used by the client instance.
188
+
189
+ Returns:
190
+ str: The API endpoint used by the client instance.
191
+ """
192
+ return self._client._api_endpoint
193
+
194
+ @property
195
+ def universe_domain(self) -> str:
196
+ """Return the universe domain used by the client instance.
197
+
198
+ Returns:
199
+ str: The universe domain used
200
+ by the client instance.
201
+ """
202
+ return self._client._universe_domain
203
+
204
+ get_transport_class = DiscussServiceClient.get_transport_class
205
+
206
+ def __init__(
207
+ self,
208
+ *,
209
+ credentials: Optional[ga_credentials.Credentials] = None,
210
+ transport: Optional[
211
+ Union[str, DiscussServiceTransport, Callable[..., DiscussServiceTransport]]
212
+ ] = "grpc_asyncio",
213
+ client_options: Optional[ClientOptions] = None,
214
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
215
+ ) -> None:
216
+ """Instantiates the discuss service async client.
217
+
218
+ Args:
219
+ credentials (Optional[google.auth.credentials.Credentials]): The
220
+ authorization credentials to attach to requests. These
221
+ credentials identify the application to the service; if none
222
+ are specified, the client will attempt to ascertain the
223
+ credentials from the environment.
224
+ transport (Optional[Union[str,DiscussServiceTransport,Callable[..., DiscussServiceTransport]]]):
225
+ The transport to use, or a Callable that constructs and returns a new transport to use.
226
+ If a Callable is given, it will be called with the same set of initialization
227
+ arguments as used in the DiscussServiceTransport constructor.
228
+ If set to None, a transport is chosen automatically.
229
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
230
+ Custom options for the client.
231
+
232
+ 1. The ``api_endpoint`` property can be used to override the
233
+ default endpoint provided by the client when ``transport`` is
234
+ not explicitly provided. Only if this property is not set and
235
+ ``transport`` was not explicitly provided, the endpoint is
236
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
237
+ variable, which have one of the following values:
238
+ "always" (always use the default mTLS endpoint), "never" (always
239
+ use the default regular endpoint) and "auto" (auto-switch to the
240
+ default mTLS endpoint if client certificate is present; this is
241
+ the default value).
242
+
243
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
244
+ is "true", then the ``client_cert_source`` property can be used
245
+ to provide a client certificate for mTLS transport. If
246
+ not provided, the default SSL client certificate will be used if
247
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
248
+ set, no client certificate will be used.
249
+
250
+ 3. The ``universe_domain`` property can be used to override the
251
+ default "googleapis.com" universe. Note that ``api_endpoint``
252
+ property still takes precedence; and ``universe_domain`` is
253
+ currently not supported for mTLS.
254
+
255
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
256
+ The client info used to send a user-agent string along with
257
+ API requests. If ``None``, then default info will be used.
258
+ Generally, you only need to set this if you're developing
259
+ your own client library.
260
+
261
+ Raises:
262
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
263
+ creation failed for any reason.
264
+ """
265
+ self._client = DiscussServiceClient(
266
+ credentials=credentials,
267
+ transport=transport,
268
+ client_options=client_options,
269
+ client_info=client_info,
270
+ )
271
+
272
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
273
+ std_logging.DEBUG
274
+ ): # pragma: NO COVER
275
+ _LOGGER.debug(
276
+ "Created client `google.ai.generativelanguage_v1beta3.DiscussServiceAsyncClient`.",
277
+ extra={
278
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
279
+ "universeDomain": getattr(
280
+ self._client._transport._credentials, "universe_domain", ""
281
+ ),
282
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
283
+ "credentialsInfo": getattr(
284
+ self.transport._credentials, "get_cred_info", lambda: None
285
+ )(),
286
+ }
287
+ if hasattr(self._client._transport, "_credentials")
288
+ else {
289
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
290
+ "credentialsType": None,
291
+ },
292
+ )
293
+
294
+ async def generate_message(
295
+ self,
296
+ request: Optional[Union[discuss_service.GenerateMessageRequest, dict]] = None,
297
+ *,
298
+ model: Optional[str] = None,
299
+ prompt: Optional[discuss_service.MessagePrompt] = None,
300
+ temperature: Optional[float] = None,
301
+ candidate_count: Optional[int] = None,
302
+ top_p: Optional[float] = None,
303
+ top_k: Optional[int] = None,
304
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
305
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
306
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
307
+ ) -> discuss_service.GenerateMessageResponse:
308
+ r"""Generates a response from the model given an input
309
+ ``MessagePrompt``.
310
+
311
+ .. code-block:: python
312
+
313
+ # This snippet has been automatically generated and should be regarded as a
314
+ # code template only.
315
+ # It will require modifications to work:
316
+ # - It may require correct/in-range values for request initialization.
317
+ # - It may require specifying regional endpoints when creating the service
318
+ # client as shown in:
319
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
320
+ from google.ai import generativelanguage_v1beta3
321
+
322
+ async def sample_generate_message():
323
+ # Create a client
324
+ client = generativelanguage_v1beta3.DiscussServiceAsyncClient()
325
+
326
+ # Initialize request argument(s)
327
+ prompt = generativelanguage_v1beta3.MessagePrompt()
328
+ prompt.messages.content = "content_value"
329
+
330
+ request = generativelanguage_v1beta3.GenerateMessageRequest(
331
+ model="model_value",
332
+ prompt=prompt,
333
+ )
334
+
335
+ # Make the request
336
+ response = await client.generate_message(request=request)
337
+
338
+ # Handle the response
339
+ print(response)
340
+
341
+ Args:
342
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.GenerateMessageRequest, dict]]):
343
+ The request object. Request to generate a message
344
+ response from the model.
345
+ model (:class:`str`):
346
+ Required. The name of the model to use.
347
+
348
+ Format: ``name=models/{model}``.
349
+
350
+ This corresponds to the ``model`` field
351
+ on the ``request`` instance; if ``request`` is provided, this
352
+ should not be set.
353
+ prompt (:class:`google.ai.generativelanguage_v1beta3.types.MessagePrompt`):
354
+ Required. The structured textual
355
+ input given to the model as a prompt.
356
+ Given a
357
+ prompt, the model will return what it
358
+ predicts is the next message in the
359
+ discussion.
360
+
361
+ This corresponds to the ``prompt`` field
362
+ on the ``request`` instance; if ``request`` is provided, this
363
+ should not be set.
364
+ temperature (:class:`float`):
365
+ Optional. Controls the randomness of the output.
366
+
367
+ Values can range over ``[0.0,1.0]``, inclusive. A value
368
+ closer to ``1.0`` will produce responses that are more
369
+ varied, while a value closer to ``0.0`` will typically
370
+ result in less surprising responses from the model.
371
+
372
+ This corresponds to the ``temperature`` field
373
+ on the ``request`` instance; if ``request`` is provided, this
374
+ should not be set.
375
+ candidate_count (:class:`int`):
376
+ Optional. The number of generated response messages to
377
+ return.
378
+
379
+ This value must be between ``[1, 8]``, inclusive. If
380
+ unset, this will default to ``1``.
381
+
382
+ This corresponds to the ``candidate_count`` field
383
+ on the ``request`` instance; if ``request`` is provided, this
384
+ should not be set.
385
+ top_p (:class:`float`):
386
+ Optional. The maximum cumulative probability of tokens
387
+ to consider when sampling.
388
+
389
+ The model uses combined Top-k and nucleus sampling.
390
+
391
+ Nucleus sampling considers the smallest set of tokens
392
+ whose probability sum is at least ``top_p``.
393
+
394
+ This corresponds to the ``top_p`` field
395
+ on the ``request`` instance; if ``request`` is provided, this
396
+ should not be set.
397
+ top_k (:class:`int`):
398
+ Optional. The maximum number of tokens to consider when
399
+ sampling.
400
+
401
+ The model uses combined Top-k and nucleus sampling.
402
+
403
+ Top-k sampling considers the set of ``top_k`` most
404
+ probable tokens.
405
+
406
+ This corresponds to the ``top_k`` field
407
+ on the ``request`` instance; if ``request`` is provided, this
408
+ should not be set.
409
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
410
+ should be retried.
411
+ timeout (float): The timeout for this request.
412
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
413
+ sent along with the request as metadata. Normally, each value must be of type `str`,
414
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
415
+ be of type `bytes`.
416
+
417
+ Returns:
418
+ google.ai.generativelanguage_v1beta3.types.GenerateMessageResponse:
419
+ The response from the model.
420
+
421
+ This includes candidate messages and
422
+ conversation history in the form of
423
+ chronologically-ordered messages.
424
+
425
+ """
426
+ # Create or coerce a protobuf request object.
427
+ # - Quick check: If we got a request object, we should *not* have
428
+ # gotten any keyword arguments that map to the request.
429
+ has_flattened_params = any(
430
+ [model, prompt, temperature, candidate_count, top_p, top_k]
431
+ )
432
+ if request is not None and has_flattened_params:
433
+ raise ValueError(
434
+ "If the `request` argument is set, then none of "
435
+ "the individual field arguments should be set."
436
+ )
437
+
438
+ # - Use the request object if provided (there's no risk of modifying the input as
439
+ # there are no flattened fields), or create one.
440
+ if not isinstance(request, discuss_service.GenerateMessageRequest):
441
+ request = discuss_service.GenerateMessageRequest(request)
442
+
443
+ # If we have keyword arguments corresponding to fields on the
444
+ # request, apply these.
445
+ if model is not None:
446
+ request.model = model
447
+ if prompt is not None:
448
+ request.prompt = prompt
449
+ if temperature is not None:
450
+ request.temperature = temperature
451
+ if candidate_count is not None:
452
+ request.candidate_count = candidate_count
453
+ if top_p is not None:
454
+ request.top_p = top_p
455
+ if top_k is not None:
456
+ request.top_k = top_k
457
+
458
+ # Wrap the RPC method; this adds retry and timeout information,
459
+ # and friendly error handling.
460
+ rpc = self._client._transport._wrapped_methods[
461
+ self._client._transport.generate_message
462
+ ]
463
+
464
+ # Certain fields should be provided within the metadata header;
465
+ # add these here.
466
+ metadata = tuple(metadata) + (
467
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
468
+ )
469
+
470
+ # Validate the universe domain.
471
+ self._client._validate_universe_domain()
472
+
473
+ # Send the request.
474
+ response = await rpc(
475
+ request,
476
+ retry=retry,
477
+ timeout=timeout,
478
+ metadata=metadata,
479
+ )
480
+
481
+ # Done; return the response.
482
+ return response
483
+
484
+ async def count_message_tokens(
485
+ self,
486
+ request: Optional[
487
+ Union[discuss_service.CountMessageTokensRequest, dict]
488
+ ] = None,
489
+ *,
490
+ model: Optional[str] = None,
491
+ prompt: Optional[discuss_service.MessagePrompt] = None,
492
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
493
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
494
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
495
+ ) -> discuss_service.CountMessageTokensResponse:
496
+ r"""Runs a model's tokenizer on a string and returns the
497
+ token count.
498
+
499
+ .. code-block:: python
500
+
501
+ # This snippet has been automatically generated and should be regarded as a
502
+ # code template only.
503
+ # It will require modifications to work:
504
+ # - It may require correct/in-range values for request initialization.
505
+ # - It may require specifying regional endpoints when creating the service
506
+ # client as shown in:
507
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
508
+ from google.ai import generativelanguage_v1beta3
509
+
510
+ async def sample_count_message_tokens():
511
+ # Create a client
512
+ client = generativelanguage_v1beta3.DiscussServiceAsyncClient()
513
+
514
+ # Initialize request argument(s)
515
+ prompt = generativelanguage_v1beta3.MessagePrompt()
516
+ prompt.messages.content = "content_value"
517
+
518
+ request = generativelanguage_v1beta3.CountMessageTokensRequest(
519
+ model="model_value",
520
+ prompt=prompt,
521
+ )
522
+
523
+ # Make the request
524
+ response = await client.count_message_tokens(request=request)
525
+
526
+ # Handle the response
527
+ print(response)
528
+
529
+ Args:
530
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.CountMessageTokensRequest, dict]]):
531
+ The request object. Counts the number of tokens in the ``prompt`` sent to a
532
+ model.
533
+
534
+ Models may tokenize text differently, so each model may
535
+ return a different ``token_count``.
536
+ model (:class:`str`):
537
+ Required. The model's resource name. This serves as an
538
+ ID for the Model to use.
539
+
540
+ This name should match a model name returned by the
541
+ ``ListModels`` method.
542
+
543
+ Format: ``models/{model}``
544
+
545
+ This corresponds to the ``model`` field
546
+ on the ``request`` instance; if ``request`` is provided, this
547
+ should not be set.
548
+ prompt (:class:`google.ai.generativelanguage_v1beta3.types.MessagePrompt`):
549
+ Required. The prompt, whose token
550
+ count is to be returned.
551
+
552
+ This corresponds to the ``prompt`` field
553
+ on the ``request`` instance; if ``request`` is provided, this
554
+ should not be set.
555
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
556
+ should be retried.
557
+ timeout (float): The timeout for this request.
558
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
559
+ sent along with the request as metadata. Normally, each value must be of type `str`,
560
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
561
+ be of type `bytes`.
562
+
563
+ Returns:
564
+ google.ai.generativelanguage_v1beta3.types.CountMessageTokensResponse:
565
+ A response from CountMessageTokens.
566
+
567
+ It returns the model's token_count for the prompt.
568
+
569
+ """
570
+ # Create or coerce a protobuf request object.
571
+ # - Quick check: If we got a request object, we should *not* have
572
+ # gotten any keyword arguments that map to the request.
573
+ has_flattened_params = any([model, prompt])
574
+ if request is not None and has_flattened_params:
575
+ raise ValueError(
576
+ "If the `request` argument is set, then none of "
577
+ "the individual field arguments should be set."
578
+ )
579
+
580
+ # - Use the request object if provided (there's no risk of modifying the input as
581
+ # there are no flattened fields), or create one.
582
+ if not isinstance(request, discuss_service.CountMessageTokensRequest):
583
+ request = discuss_service.CountMessageTokensRequest(request)
584
+
585
+ # If we have keyword arguments corresponding to fields on the
586
+ # request, apply these.
587
+ if model is not None:
588
+ request.model = model
589
+ if prompt is not None:
590
+ request.prompt = prompt
591
+
592
+ # Wrap the RPC method; this adds retry and timeout information,
593
+ # and friendly error handling.
594
+ rpc = self._client._transport._wrapped_methods[
595
+ self._client._transport.count_message_tokens
596
+ ]
597
+
598
+ # Certain fields should be provided within the metadata header;
599
+ # add these here.
600
+ metadata = tuple(metadata) + (
601
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
602
+ )
603
+
604
+ # Validate the universe domain.
605
+ self._client._validate_universe_domain()
606
+
607
+ # Send the request.
608
+ response = await rpc(
609
+ request,
610
+ retry=retry,
611
+ timeout=timeout,
612
+ metadata=metadata,
613
+ )
614
+
615
+ # Done; return the response.
616
+ return response
617
+
618
+ async def __aenter__(self) -> "DiscussServiceAsyncClient":
619
+ return self
620
+
621
+ async def __aexit__(self, exc_type, exc, tb):
622
+ await self.transport.close()
623
+
624
+
625
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
626
+ gapic_version=package_version.__version__
627
+ )
628
+
629
+
630
+ __all__ = ("DiscussServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/client.py ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.longrunning import operations_pb2 # type: ignore
62
+
63
+ from google.ai.generativelanguage_v1beta3.types import discuss_service, safety
64
+
65
+ from .transports.base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
66
+ from .transports.grpc import DiscussServiceGrpcTransport
67
+ from .transports.grpc_asyncio import DiscussServiceGrpcAsyncIOTransport
68
+ from .transports.rest import DiscussServiceRestTransport
69
+
70
+
71
+ class DiscussServiceClientMeta(type):
72
+ """Metaclass for the DiscussService client.
73
+
74
+ This provides class-level methods for building and retrieving
75
+ support objects (e.g. transport) without polluting the client instance
76
+ objects.
77
+ """
78
+
79
+ _transport_registry = (
80
+ OrderedDict()
81
+ ) # type: Dict[str, Type[DiscussServiceTransport]]
82
+ _transport_registry["grpc"] = DiscussServiceGrpcTransport
83
+ _transport_registry["grpc_asyncio"] = DiscussServiceGrpcAsyncIOTransport
84
+ _transport_registry["rest"] = DiscussServiceRestTransport
85
+
86
+ def get_transport_class(
87
+ cls,
88
+ label: Optional[str] = None,
89
+ ) -> Type[DiscussServiceTransport]:
90
+ """Returns an appropriate transport class.
91
+
92
+ Args:
93
+ label: The name of the desired transport. If none is
94
+ provided, then the first transport in the registry is used.
95
+
96
+ Returns:
97
+ The transport class to use.
98
+ """
99
+ # If a specific transport is requested, return that one.
100
+ if label:
101
+ return cls._transport_registry[label]
102
+
103
+ # No transport is requested; return the default (that is, the first one
104
+ # in the dictionary).
105
+ return next(iter(cls._transport_registry.values()))
106
+
107
+
108
+ class DiscussServiceClient(metaclass=DiscussServiceClientMeta):
109
+ """An API for using Generative Language Models (GLMs) in dialog
110
+ applications.
111
+ Also known as large language models (LLMs), this API provides
112
+ models that are trained for multi-turn dialog.
113
+ """
114
+
115
+ @staticmethod
116
+ def _get_default_mtls_endpoint(api_endpoint):
117
+ """Converts api endpoint to mTLS endpoint.
118
+
119
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
120
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
121
+ Args:
122
+ api_endpoint (Optional[str]): the api endpoint to convert.
123
+ Returns:
124
+ str: converted mTLS api endpoint.
125
+ """
126
+ if not api_endpoint:
127
+ return api_endpoint
128
+
129
+ mtls_endpoint_re = re.compile(
130
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
131
+ )
132
+
133
+ m = mtls_endpoint_re.match(api_endpoint)
134
+ name, mtls, sandbox, googledomain = m.groups()
135
+ if mtls or not googledomain:
136
+ return api_endpoint
137
+
138
+ if sandbox:
139
+ return api_endpoint.replace(
140
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
141
+ )
142
+
143
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
144
+
145
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
146
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
147
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
148
+ DEFAULT_ENDPOINT
149
+ )
150
+
151
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
152
+ _DEFAULT_UNIVERSE = "googleapis.com"
153
+
154
+ @classmethod
155
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
156
+ """Creates an instance of this client using the provided credentials
157
+ info.
158
+
159
+ Args:
160
+ info (dict): The service account private key info.
161
+ args: Additional arguments to pass to the constructor.
162
+ kwargs: Additional arguments to pass to the constructor.
163
+
164
+ Returns:
165
+ DiscussServiceClient: The constructed client.
166
+ """
167
+ credentials = service_account.Credentials.from_service_account_info(info)
168
+ kwargs["credentials"] = credentials
169
+ return cls(*args, **kwargs)
170
+
171
+ @classmethod
172
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
173
+ """Creates an instance of this client using the provided credentials
174
+ file.
175
+
176
+ Args:
177
+ filename (str): The path to the service account private key json
178
+ file.
179
+ args: Additional arguments to pass to the constructor.
180
+ kwargs: Additional arguments to pass to the constructor.
181
+
182
+ Returns:
183
+ DiscussServiceClient: The constructed client.
184
+ """
185
+ credentials = service_account.Credentials.from_service_account_file(filename)
186
+ kwargs["credentials"] = credentials
187
+ return cls(*args, **kwargs)
188
+
189
+ from_service_account_json = from_service_account_file
190
+
191
+ @property
192
+ def transport(self) -> DiscussServiceTransport:
193
+ """Returns the transport used by the client instance.
194
+
195
+ Returns:
196
+ DiscussServiceTransport: The transport used by the client
197
+ instance.
198
+ """
199
+ return self._transport
200
+
201
+ @staticmethod
202
+ def model_path(
203
+ model: str,
204
+ ) -> str:
205
+ """Returns a fully-qualified model string."""
206
+ return "models/{model}".format(
207
+ model=model,
208
+ )
209
+
210
+ @staticmethod
211
+ def parse_model_path(path: str) -> Dict[str, str]:
212
+ """Parses a model path into its component segments."""
213
+ m = re.match(r"^models/(?P<model>.+?)$", path)
214
+ return m.groupdict() if m else {}
215
+
216
+ @staticmethod
217
+ def common_billing_account_path(
218
+ billing_account: str,
219
+ ) -> str:
220
+ """Returns a fully-qualified billing_account string."""
221
+ return "billingAccounts/{billing_account}".format(
222
+ billing_account=billing_account,
223
+ )
224
+
225
+ @staticmethod
226
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
227
+ """Parse a billing_account path into its component segments."""
228
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
229
+ return m.groupdict() if m else {}
230
+
231
+ @staticmethod
232
+ def common_folder_path(
233
+ folder: str,
234
+ ) -> str:
235
+ """Returns a fully-qualified folder string."""
236
+ return "folders/{folder}".format(
237
+ folder=folder,
238
+ )
239
+
240
+ @staticmethod
241
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
242
+ """Parse a folder path into its component segments."""
243
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
244
+ return m.groupdict() if m else {}
245
+
246
+ @staticmethod
247
+ def common_organization_path(
248
+ organization: str,
249
+ ) -> str:
250
+ """Returns a fully-qualified organization string."""
251
+ return "organizations/{organization}".format(
252
+ organization=organization,
253
+ )
254
+
255
+ @staticmethod
256
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
257
+ """Parse a organization path into its component segments."""
258
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
259
+ return m.groupdict() if m else {}
260
+
261
+ @staticmethod
262
+ def common_project_path(
263
+ project: str,
264
+ ) -> str:
265
+ """Returns a fully-qualified project string."""
266
+ return "projects/{project}".format(
267
+ project=project,
268
+ )
269
+
270
+ @staticmethod
271
+ def parse_common_project_path(path: str) -> Dict[str, str]:
272
+ """Parse a project path into its component segments."""
273
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
274
+ return m.groupdict() if m else {}
275
+
276
+ @staticmethod
277
+ def common_location_path(
278
+ project: str,
279
+ location: str,
280
+ ) -> str:
281
+ """Returns a fully-qualified location string."""
282
+ return "projects/{project}/locations/{location}".format(
283
+ project=project,
284
+ location=location,
285
+ )
286
+
287
+ @staticmethod
288
+ def parse_common_location_path(path: str) -> Dict[str, str]:
289
+ """Parse a location path into its component segments."""
290
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
291
+ return m.groupdict() if m else {}
292
+
293
+ @classmethod
294
+ def get_mtls_endpoint_and_cert_source(
295
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
296
+ ):
297
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
298
+
299
+ The client cert source is determined in the following order:
300
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
301
+ client cert source is None.
302
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
303
+ default client cert source exists, use the default one; otherwise the client cert
304
+ source is None.
305
+
306
+ The API endpoint is determined in the following order:
307
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
308
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
309
+ default mTLS endpoint; if the environment variable is "never", use the default API
310
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
311
+ use the default API endpoint.
312
+
313
+ More details can be found at https://google.aip.dev/auth/4114.
314
+
315
+ Args:
316
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
317
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
318
+ in this method.
319
+
320
+ Returns:
321
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
322
+ client cert source to use.
323
+
324
+ Raises:
325
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
326
+ """
327
+
328
+ warnings.warn(
329
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
330
+ DeprecationWarning,
331
+ )
332
+ if client_options is None:
333
+ client_options = client_options_lib.ClientOptions()
334
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
335
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
336
+ if use_client_cert not in ("true", "false"):
337
+ raise ValueError(
338
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
339
+ )
340
+ if use_mtls_endpoint not in ("auto", "never", "always"):
341
+ raise MutualTLSChannelError(
342
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
343
+ )
344
+
345
+ # Figure out the client cert source to use.
346
+ client_cert_source = None
347
+ if use_client_cert == "true":
348
+ if client_options.client_cert_source:
349
+ client_cert_source = client_options.client_cert_source
350
+ elif mtls.has_default_client_cert_source():
351
+ client_cert_source = mtls.default_client_cert_source()
352
+
353
+ # Figure out which api endpoint to use.
354
+ if client_options.api_endpoint is not None:
355
+ api_endpoint = client_options.api_endpoint
356
+ elif use_mtls_endpoint == "always" or (
357
+ use_mtls_endpoint == "auto" and client_cert_source
358
+ ):
359
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
360
+ else:
361
+ api_endpoint = cls.DEFAULT_ENDPOINT
362
+
363
+ return api_endpoint, client_cert_source
364
+
365
+ @staticmethod
366
+ def _read_environment_variables():
367
+ """Returns the environment variables used by the client.
368
+
369
+ Returns:
370
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
371
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
372
+
373
+ Raises:
374
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
375
+ any of ["true", "false"].
376
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
377
+ is not any of ["auto", "never", "always"].
378
+ """
379
+ use_client_cert = os.getenv(
380
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
381
+ ).lower()
382
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
383
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
384
+ if use_client_cert not in ("true", "false"):
385
+ raise ValueError(
386
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
387
+ )
388
+ if use_mtls_endpoint not in ("auto", "never", "always"):
389
+ raise MutualTLSChannelError(
390
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
391
+ )
392
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
393
+
394
+ @staticmethod
395
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
396
+ """Return the client cert source to be used by the client.
397
+
398
+ Args:
399
+ provided_cert_source (bytes): The client certificate source provided.
400
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
401
+
402
+ Returns:
403
+ bytes or None: The client cert source to be used by the client.
404
+ """
405
+ client_cert_source = None
406
+ if use_cert_flag:
407
+ if provided_cert_source:
408
+ client_cert_source = provided_cert_source
409
+ elif mtls.has_default_client_cert_source():
410
+ client_cert_source = mtls.default_client_cert_source()
411
+ return client_cert_source
412
+
413
+ @staticmethod
414
+ def _get_api_endpoint(
415
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
416
+ ):
417
+ """Return the API endpoint used by the client.
418
+
419
+ Args:
420
+ api_override (str): The API endpoint override. If specified, this is always
421
+ the return value of this function and the other arguments are not used.
422
+ client_cert_source (bytes): The client certificate source used by the client.
423
+ universe_domain (str): The universe domain used by the client.
424
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
425
+ Possible values are "always", "auto", or "never".
426
+
427
+ Returns:
428
+ str: The API endpoint to be used by the client.
429
+ """
430
+ if api_override is not None:
431
+ api_endpoint = api_override
432
+ elif use_mtls_endpoint == "always" or (
433
+ use_mtls_endpoint == "auto" and client_cert_source
434
+ ):
435
+ _default_universe = DiscussServiceClient._DEFAULT_UNIVERSE
436
+ if universe_domain != _default_universe:
437
+ raise MutualTLSChannelError(
438
+ f"mTLS is not supported in any universe other than {_default_universe}."
439
+ )
440
+ api_endpoint = DiscussServiceClient.DEFAULT_MTLS_ENDPOINT
441
+ else:
442
+ api_endpoint = DiscussServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
443
+ UNIVERSE_DOMAIN=universe_domain
444
+ )
445
+ return api_endpoint
446
+
447
+ @staticmethod
448
+ def _get_universe_domain(
449
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
450
+ ) -> str:
451
+ """Return the universe domain used by the client.
452
+
453
+ Args:
454
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
455
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
456
+
457
+ Returns:
458
+ str: The universe domain to be used by the client.
459
+
460
+ Raises:
461
+ ValueError: If the universe domain is an empty string.
462
+ """
463
+ universe_domain = DiscussServiceClient._DEFAULT_UNIVERSE
464
+ if client_universe_domain is not None:
465
+ universe_domain = client_universe_domain
466
+ elif universe_domain_env is not None:
467
+ universe_domain = universe_domain_env
468
+ if len(universe_domain.strip()) == 0:
469
+ raise ValueError("Universe Domain cannot be an empty string.")
470
+ return universe_domain
471
+
472
+ def _validate_universe_domain(self):
473
+ """Validates client's and credentials' universe domains are consistent.
474
+
475
+ Returns:
476
+ bool: True iff the configured universe domain is valid.
477
+
478
+ Raises:
479
+ ValueError: If the configured universe domain is not valid.
480
+ """
481
+
482
+ # NOTE (b/349488459): universe validation is disabled until further notice.
483
+ return True
484
+
485
+ @property
486
+ def api_endpoint(self):
487
+ """Return the API endpoint used by the client instance.
488
+
489
+ Returns:
490
+ str: The API endpoint used by the client instance.
491
+ """
492
+ return self._api_endpoint
493
+
494
+ @property
495
+ def universe_domain(self) -> str:
496
+ """Return the universe domain used by the client instance.
497
+
498
+ Returns:
499
+ str: The universe domain used by the client instance.
500
+ """
501
+ return self._universe_domain
502
+
503
+ def __init__(
504
+ self,
505
+ *,
506
+ credentials: Optional[ga_credentials.Credentials] = None,
507
+ transport: Optional[
508
+ Union[str, DiscussServiceTransport, Callable[..., DiscussServiceTransport]]
509
+ ] = None,
510
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
511
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
512
+ ) -> None:
513
+ """Instantiates the discuss service client.
514
+
515
+ Args:
516
+ credentials (Optional[google.auth.credentials.Credentials]): The
517
+ authorization credentials to attach to requests. These
518
+ credentials identify the application to the service; if none
519
+ are specified, the client will attempt to ascertain the
520
+ credentials from the environment.
521
+ transport (Optional[Union[str,DiscussServiceTransport,Callable[..., DiscussServiceTransport]]]):
522
+ The transport to use, or a Callable that constructs and returns a new transport.
523
+ If a Callable is given, it will be called with the same set of initialization
524
+ arguments as used in the DiscussServiceTransport constructor.
525
+ If set to None, a transport is chosen automatically.
526
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
527
+ Custom options for the client.
528
+
529
+ 1. The ``api_endpoint`` property can be used to override the
530
+ default endpoint provided by the client when ``transport`` is
531
+ not explicitly provided. Only if this property is not set and
532
+ ``transport`` was not explicitly provided, the endpoint is
533
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
534
+ variable, which have one of the following values:
535
+ "always" (always use the default mTLS endpoint), "never" (always
536
+ use the default regular endpoint) and "auto" (auto-switch to the
537
+ default mTLS endpoint if client certificate is present; this is
538
+ the default value).
539
+
540
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
541
+ is "true", then the ``client_cert_source`` property can be used
542
+ to provide a client certificate for mTLS transport. If
543
+ not provided, the default SSL client certificate will be used if
544
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
545
+ set, no client certificate will be used.
546
+
547
+ 3. The ``universe_domain`` property can be used to override the
548
+ default "googleapis.com" universe. Note that the ``api_endpoint``
549
+ property still takes precedence; and ``universe_domain`` is
550
+ currently not supported for mTLS.
551
+
552
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
553
+ The client info used to send a user-agent string along with
554
+ API requests. If ``None``, then default info will be used.
555
+ Generally, you only need to set this if you're developing
556
+ your own client library.
557
+
558
+ Raises:
559
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
560
+ creation failed for any reason.
561
+ """
562
+ self._client_options = client_options
563
+ if isinstance(self._client_options, dict):
564
+ self._client_options = client_options_lib.from_dict(self._client_options)
565
+ if self._client_options is None:
566
+ self._client_options = client_options_lib.ClientOptions()
567
+ self._client_options = cast(
568
+ client_options_lib.ClientOptions, self._client_options
569
+ )
570
+
571
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
572
+
573
+ (
574
+ self._use_client_cert,
575
+ self._use_mtls_endpoint,
576
+ self._universe_domain_env,
577
+ ) = DiscussServiceClient._read_environment_variables()
578
+ self._client_cert_source = DiscussServiceClient._get_client_cert_source(
579
+ self._client_options.client_cert_source, self._use_client_cert
580
+ )
581
+ self._universe_domain = DiscussServiceClient._get_universe_domain(
582
+ universe_domain_opt, self._universe_domain_env
583
+ )
584
+ self._api_endpoint = None # updated below, depending on `transport`
585
+
586
+ # Initialize the universe domain validation.
587
+ self._is_universe_domain_valid = False
588
+
589
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
590
+ # Setup logging.
591
+ client_logging.initialize_logging()
592
+
593
+ api_key_value = getattr(self._client_options, "api_key", None)
594
+ if api_key_value and credentials:
595
+ raise ValueError(
596
+ "client_options.api_key and credentials are mutually exclusive"
597
+ )
598
+
599
+ # Save or instantiate the transport.
600
+ # Ordinarily, we provide the transport, but allowing a custom transport
601
+ # instance provides an extensibility point for unusual situations.
602
+ transport_provided = isinstance(transport, DiscussServiceTransport)
603
+ if transport_provided:
604
+ # transport is a DiscussServiceTransport instance.
605
+ if credentials or self._client_options.credentials_file or api_key_value:
606
+ raise ValueError(
607
+ "When providing a transport instance, "
608
+ "provide its credentials directly."
609
+ )
610
+ if self._client_options.scopes:
611
+ raise ValueError(
612
+ "When providing a transport instance, provide its scopes "
613
+ "directly."
614
+ )
615
+ self._transport = cast(DiscussServiceTransport, transport)
616
+ self._api_endpoint = self._transport.host
617
+
618
+ self._api_endpoint = (
619
+ self._api_endpoint
620
+ or DiscussServiceClient._get_api_endpoint(
621
+ self._client_options.api_endpoint,
622
+ self._client_cert_source,
623
+ self._universe_domain,
624
+ self._use_mtls_endpoint,
625
+ )
626
+ )
627
+
628
+ if not transport_provided:
629
+ import google.auth._default # type: ignore
630
+
631
+ if api_key_value and hasattr(
632
+ google.auth._default, "get_api_key_credentials"
633
+ ):
634
+ credentials = google.auth._default.get_api_key_credentials(
635
+ api_key_value
636
+ )
637
+
638
+ transport_init: Union[
639
+ Type[DiscussServiceTransport], Callable[..., DiscussServiceTransport]
640
+ ] = (
641
+ DiscussServiceClient.get_transport_class(transport)
642
+ if isinstance(transport, str) or transport is None
643
+ else cast(Callable[..., DiscussServiceTransport], transport)
644
+ )
645
+ # initialize with the provided callable or the passed in class
646
+ self._transport = transport_init(
647
+ credentials=credentials,
648
+ credentials_file=self._client_options.credentials_file,
649
+ host=self._api_endpoint,
650
+ scopes=self._client_options.scopes,
651
+ client_cert_source_for_mtls=self._client_cert_source,
652
+ quota_project_id=self._client_options.quota_project_id,
653
+ client_info=client_info,
654
+ always_use_jwt_access=True,
655
+ api_audience=self._client_options.api_audience,
656
+ )
657
+
658
+ if "async" not in str(self._transport):
659
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
660
+ std_logging.DEBUG
661
+ ): # pragma: NO COVER
662
+ _LOGGER.debug(
663
+ "Created client `google.ai.generativelanguage_v1beta3.DiscussServiceClient`.",
664
+ extra={
665
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
666
+ "universeDomain": getattr(
667
+ self._transport._credentials, "universe_domain", ""
668
+ ),
669
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
670
+ "credentialsInfo": getattr(
671
+ self.transport._credentials, "get_cred_info", lambda: None
672
+ )(),
673
+ }
674
+ if hasattr(self._transport, "_credentials")
675
+ else {
676
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
677
+ "credentialsType": None,
678
+ },
679
+ )
680
+
681
+ def generate_message(
682
+ self,
683
+ request: Optional[Union[discuss_service.GenerateMessageRequest, dict]] = None,
684
+ *,
685
+ model: Optional[str] = None,
686
+ prompt: Optional[discuss_service.MessagePrompt] = None,
687
+ temperature: Optional[float] = None,
688
+ candidate_count: Optional[int] = None,
689
+ top_p: Optional[float] = None,
690
+ top_k: Optional[int] = None,
691
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
692
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
693
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
694
+ ) -> discuss_service.GenerateMessageResponse:
695
+ r"""Generates a response from the model given an input
696
+ ``MessagePrompt``.
697
+
698
+ .. code-block:: python
699
+
700
+ # This snippet has been automatically generated and should be regarded as a
701
+ # code template only.
702
+ # It will require modifications to work:
703
+ # - It may require correct/in-range values for request initialization.
704
+ # - It may require specifying regional endpoints when creating the service
705
+ # client as shown in:
706
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
707
+ from google.ai import generativelanguage_v1beta3
708
+
709
+ def sample_generate_message():
710
+ # Create a client
711
+ client = generativelanguage_v1beta3.DiscussServiceClient()
712
+
713
+ # Initialize request argument(s)
714
+ prompt = generativelanguage_v1beta3.MessagePrompt()
715
+ prompt.messages.content = "content_value"
716
+
717
+ request = generativelanguage_v1beta3.GenerateMessageRequest(
718
+ model="model_value",
719
+ prompt=prompt,
720
+ )
721
+
722
+ # Make the request
723
+ response = client.generate_message(request=request)
724
+
725
+ # Handle the response
726
+ print(response)
727
+
728
+ Args:
729
+ request (Union[google.ai.generativelanguage_v1beta3.types.GenerateMessageRequest, dict]):
730
+ The request object. Request to generate a message
731
+ response from the model.
732
+ model (str):
733
+ Required. The name of the model to use.
734
+
735
+ Format: ``name=models/{model}``.
736
+
737
+ This corresponds to the ``model`` field
738
+ on the ``request`` instance; if ``request`` is provided, this
739
+ should not be set.
740
+ prompt (google.ai.generativelanguage_v1beta3.types.MessagePrompt):
741
+ Required. The structured textual
742
+ input given to the model as a prompt.
743
+ Given a
744
+ prompt, the model will return what it
745
+ predicts is the next message in the
746
+ discussion.
747
+
748
+ This corresponds to the ``prompt`` field
749
+ on the ``request`` instance; if ``request`` is provided, this
750
+ should not be set.
751
+ temperature (float):
752
+ Optional. Controls the randomness of the output.
753
+
754
+ Values can range over ``[0.0,1.0]``, inclusive. A value
755
+ closer to ``1.0`` will produce responses that are more
756
+ varied, while a value closer to ``0.0`` will typically
757
+ result in less surprising responses from the model.
758
+
759
+ This corresponds to the ``temperature`` field
760
+ on the ``request`` instance; if ``request`` is provided, this
761
+ should not be set.
762
+ candidate_count (int):
763
+ Optional. The number of generated response messages to
764
+ return.
765
+
766
+ This value must be between ``[1, 8]``, inclusive. If
767
+ unset, this will default to ``1``.
768
+
769
+ This corresponds to the ``candidate_count`` field
770
+ on the ``request`` instance; if ``request`` is provided, this
771
+ should not be set.
772
+ top_p (float):
773
+ Optional. The maximum cumulative probability of tokens
774
+ to consider when sampling.
775
+
776
+ The model uses combined Top-k and nucleus sampling.
777
+
778
+ Nucleus sampling considers the smallest set of tokens
779
+ whose probability sum is at least ``top_p``.
780
+
781
+ This corresponds to the ``top_p`` field
782
+ on the ``request`` instance; if ``request`` is provided, this
783
+ should not be set.
784
+ top_k (int):
785
+ Optional. The maximum number of tokens to consider when
786
+ sampling.
787
+
788
+ The model uses combined Top-k and nucleus sampling.
789
+
790
+ Top-k sampling considers the set of ``top_k`` most
791
+ probable tokens.
792
+
793
+ This corresponds to the ``top_k`` field
794
+ on the ``request`` instance; if ``request`` is provided, this
795
+ should not be set.
796
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
797
+ should be retried.
798
+ timeout (float): The timeout for this request.
799
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
800
+ sent along with the request as metadata. Normally, each value must be of type `str`,
801
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
802
+ be of type `bytes`.
803
+
804
+ Returns:
805
+ google.ai.generativelanguage_v1beta3.types.GenerateMessageResponse:
806
+ The response from the model.
807
+
808
+ This includes candidate messages and
809
+ conversation history in the form of
810
+ chronologically-ordered messages.
811
+
812
+ """
813
+ # Create or coerce a protobuf request object.
814
+ # - Quick check: If we got a request object, we should *not* have
815
+ # gotten any keyword arguments that map to the request.
816
+ has_flattened_params = any(
817
+ [model, prompt, temperature, candidate_count, top_p, top_k]
818
+ )
819
+ if request is not None and has_flattened_params:
820
+ raise ValueError(
821
+ "If the `request` argument is set, then none of "
822
+ "the individual field arguments should be set."
823
+ )
824
+
825
+ # - Use the request object if provided (there's no risk of modifying the input as
826
+ # there are no flattened fields), or create one.
827
+ if not isinstance(request, discuss_service.GenerateMessageRequest):
828
+ request = discuss_service.GenerateMessageRequest(request)
829
+ # If we have keyword arguments corresponding to fields on the
830
+ # request, apply these.
831
+ if model is not None:
832
+ request.model = model
833
+ if prompt is not None:
834
+ request.prompt = prompt
835
+ if temperature is not None:
836
+ request.temperature = temperature
837
+ if candidate_count is not None:
838
+ request.candidate_count = candidate_count
839
+ if top_p is not None:
840
+ request.top_p = top_p
841
+ if top_k is not None:
842
+ request.top_k = top_k
843
+
844
+ # Wrap the RPC method; this adds retry and timeout information,
845
+ # and friendly error handling.
846
+ rpc = self._transport._wrapped_methods[self._transport.generate_message]
847
+
848
+ # Certain fields should be provided within the metadata header;
849
+ # add these here.
850
+ metadata = tuple(metadata) + (
851
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
852
+ )
853
+
854
+ # Validate the universe domain.
855
+ self._validate_universe_domain()
856
+
857
+ # Send the request.
858
+ response = rpc(
859
+ request,
860
+ retry=retry,
861
+ timeout=timeout,
862
+ metadata=metadata,
863
+ )
864
+
865
+ # Done; return the response.
866
+ return response
867
+
868
+ def count_message_tokens(
869
+ self,
870
+ request: Optional[
871
+ Union[discuss_service.CountMessageTokensRequest, dict]
872
+ ] = None,
873
+ *,
874
+ model: Optional[str] = None,
875
+ prompt: Optional[discuss_service.MessagePrompt] = None,
876
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
877
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
878
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
879
+ ) -> discuss_service.CountMessageTokensResponse:
880
+ r"""Runs a model's tokenizer on a string and returns the
881
+ token count.
882
+
883
+ .. code-block:: python
884
+
885
+ # This snippet has been automatically generated and should be regarded as a
886
+ # code template only.
887
+ # It will require modifications to work:
888
+ # - It may require correct/in-range values for request initialization.
889
+ # - It may require specifying regional endpoints when creating the service
890
+ # client as shown in:
891
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
892
+ from google.ai import generativelanguage_v1beta3
893
+
894
+ def sample_count_message_tokens():
895
+ # Create a client
896
+ client = generativelanguage_v1beta3.DiscussServiceClient()
897
+
898
+ # Initialize request argument(s)
899
+ prompt = generativelanguage_v1beta3.MessagePrompt()
900
+ prompt.messages.content = "content_value"
901
+
902
+ request = generativelanguage_v1beta3.CountMessageTokensRequest(
903
+ model="model_value",
904
+ prompt=prompt,
905
+ )
906
+
907
+ # Make the request
908
+ response = client.count_message_tokens(request=request)
909
+
910
+ # Handle the response
911
+ print(response)
912
+
913
+ Args:
914
+ request (Union[google.ai.generativelanguage_v1beta3.types.CountMessageTokensRequest, dict]):
915
+ The request object. Counts the number of tokens in the ``prompt`` sent to a
916
+ model.
917
+
918
+ Models may tokenize text differently, so each model may
919
+ return a different ``token_count``.
920
+ model (str):
921
+ Required. The model's resource name. This serves as an
922
+ ID for the Model to use.
923
+
924
+ This name should match a model name returned by the
925
+ ``ListModels`` method.
926
+
927
+ Format: ``models/{model}``
928
+
929
+ This corresponds to the ``model`` field
930
+ on the ``request`` instance; if ``request`` is provided, this
931
+ should not be set.
932
+ prompt (google.ai.generativelanguage_v1beta3.types.MessagePrompt):
933
+ Required. The prompt, whose token
934
+ count is to be returned.
935
+
936
+ This corresponds to the ``prompt`` field
937
+ on the ``request`` instance; if ``request`` is provided, this
938
+ should not be set.
939
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
940
+ should be retried.
941
+ timeout (float): The timeout for this request.
942
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
943
+ sent along with the request as metadata. Normally, each value must be of type `str`,
944
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
945
+ be of type `bytes`.
946
+
947
+ Returns:
948
+ google.ai.generativelanguage_v1beta3.types.CountMessageTokensResponse:
949
+ A response from CountMessageTokens.
950
+
951
+ It returns the model's token_count for the prompt.
952
+
953
+ """
954
+ # Create or coerce a protobuf request object.
955
+ # - Quick check: If we got a request object, we should *not* have
956
+ # gotten any keyword arguments that map to the request.
957
+ has_flattened_params = any([model, prompt])
958
+ if request is not None and has_flattened_params:
959
+ raise ValueError(
960
+ "If the `request` argument is set, then none of "
961
+ "the individual field arguments should be set."
962
+ )
963
+
964
+ # - Use the request object if provided (there's no risk of modifying the input as
965
+ # there are no flattened fields), or create one.
966
+ if not isinstance(request, discuss_service.CountMessageTokensRequest):
967
+ request = discuss_service.CountMessageTokensRequest(request)
968
+ # If we have keyword arguments corresponding to fields on the
969
+ # request, apply these.
970
+ if model is not None:
971
+ request.model = model
972
+ if prompt is not None:
973
+ request.prompt = prompt
974
+
975
+ # Wrap the RPC method; this adds retry and timeout information,
976
+ # and friendly error handling.
977
+ rpc = self._transport._wrapped_methods[self._transport.count_message_tokens]
978
+
979
+ # Certain fields should be provided within the metadata header;
980
+ # add these here.
981
+ metadata = tuple(metadata) + (
982
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
983
+ )
984
+
985
+ # Validate the universe domain.
986
+ self._validate_universe_domain()
987
+
988
+ # Send the request.
989
+ response = rpc(
990
+ request,
991
+ retry=retry,
992
+ timeout=timeout,
993
+ metadata=metadata,
994
+ )
995
+
996
+ # Done; return the response.
997
+ return response
998
+
999
+ def __enter__(self) -> "DiscussServiceClient":
1000
+ return self
1001
+
1002
+ def __exit__(self, type, value, traceback):
1003
+ """Releases underlying transport's resources.
1004
+
1005
+ .. warning::
1006
+ ONLY use as a context manager if the transport is NOT shared
1007
+ with other clients! Exiting the with block will CLOSE the transport
1008
+ and may cause errors in other clients!
1009
+ """
1010
+ self.transport.close()
1011
+
1012
+
1013
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1014
+ gapic_version=package_version.__version__
1015
+ )
1016
+
1017
+
1018
+ __all__ = ("DiscussServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import DiscussServiceTransport
20
+ from .grpc import DiscussServiceGrpcTransport
21
+ from .grpc_asyncio import DiscussServiceGrpcAsyncIOTransport
22
+ from .rest import DiscussServiceRestInterceptor, DiscussServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[DiscussServiceTransport]]
26
+ _transport_registry["grpc"] = DiscussServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = DiscussServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = DiscussServiceRestTransport
29
+
30
+ __all__ = (
31
+ "DiscussServiceTransport",
32
+ "DiscussServiceGrpcTransport",
33
+ "DiscussServiceGrpcAsyncIOTransport",
34
+ "DiscussServiceRestTransport",
35
+ "DiscussServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (908 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (7.46 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (17.9 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (19.7 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (23.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (9.83 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/base.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+
28
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
29
+ from google.ai.generativelanguage_v1beta3.types import discuss_service
30
+
31
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
32
+ gapic_version=package_version.__version__
33
+ )
34
+
35
+
36
+ class DiscussServiceTransport(abc.ABC):
37
+ """Abstract transport class for DiscussService."""
38
+
39
+ AUTH_SCOPES = ()
40
+
41
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ host: str = DEFAULT_HOST,
47
+ credentials: Optional[ga_credentials.Credentials] = None,
48
+ credentials_file: Optional[str] = None,
49
+ scopes: Optional[Sequence[str]] = None,
50
+ quota_project_id: Optional[str] = None,
51
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
52
+ always_use_jwt_access: Optional[bool] = False,
53
+ api_audience: Optional[str] = None,
54
+ **kwargs,
55
+ ) -> None:
56
+ """Instantiate the transport.
57
+
58
+ Args:
59
+ host (Optional[str]):
60
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
61
+ credentials (Optional[google.auth.credentials.Credentials]): The
62
+ authorization credentials to attach to requests. These
63
+ credentials identify the application to the service; if none
64
+ are specified, the client will attempt to ascertain the
65
+ credentials from the environment.
66
+ credentials_file (Optional[str]): A file with credentials that can
67
+ be loaded with :func:`google.auth.load_credentials_from_file`.
68
+ This argument is mutually exclusive with credentials.
69
+ scopes (Optional[Sequence[str]]): A list of scopes.
70
+ quota_project_id (Optional[str]): An optional project to use for billing
71
+ and quota.
72
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
73
+ The client info used to send a user-agent string along with
74
+ API requests. If ``None``, then default info will be used.
75
+ Generally, you only need to set this if you're developing
76
+ your own client library.
77
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
78
+ be used for service account credentials.
79
+ """
80
+
81
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
82
+
83
+ # Save the scopes.
84
+ self._scopes = scopes
85
+ if not hasattr(self, "_ignore_credentials"):
86
+ self._ignore_credentials: bool = False
87
+
88
+ # If no credentials are provided, then determine the appropriate
89
+ # defaults.
90
+ if credentials and credentials_file:
91
+ raise core_exceptions.DuplicateCredentialArgs(
92
+ "'credentials_file' and 'credentials' are mutually exclusive"
93
+ )
94
+
95
+ if credentials_file is not None:
96
+ credentials, _ = google.auth.load_credentials_from_file(
97
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
98
+ )
99
+ elif credentials is None and not self._ignore_credentials:
100
+ credentials, _ = google.auth.default(
101
+ **scopes_kwargs, quota_project_id=quota_project_id
102
+ )
103
+ # Don't apply audience if the credentials file passed from user.
104
+ if hasattr(credentials, "with_gdch_audience"):
105
+ credentials = credentials.with_gdch_audience(
106
+ api_audience if api_audience else host
107
+ )
108
+
109
+ # If the credentials are service account credentials, then always try to use self signed JWT.
110
+ if (
111
+ always_use_jwt_access
112
+ and isinstance(credentials, service_account.Credentials)
113
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
114
+ ):
115
+ credentials = credentials.with_always_use_jwt_access(True)
116
+
117
+ # Save the credentials.
118
+ self._credentials = credentials
119
+
120
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
121
+ if ":" not in host:
122
+ host += ":443"
123
+ self._host = host
124
+
125
+ @property
126
+ def host(self):
127
+ return self._host
128
+
129
+ def _prep_wrapped_messages(self, client_info):
130
+ # Precompute the wrapped methods.
131
+ self._wrapped_methods = {
132
+ self.generate_message: gapic_v1.method.wrap_method(
133
+ self.generate_message,
134
+ default_timeout=None,
135
+ client_info=client_info,
136
+ ),
137
+ self.count_message_tokens: gapic_v1.method.wrap_method(
138
+ self.count_message_tokens,
139
+ default_timeout=None,
140
+ client_info=client_info,
141
+ ),
142
+ }
143
+
144
+ def close(self):
145
+ """Closes resources associated with the transport.
146
+
147
+ .. warning::
148
+ Only call this method if the transport is NOT shared
149
+ with other clients - this may cause errors in other clients!
150
+ """
151
+ raise NotImplementedError()
152
+
153
+ @property
154
+ def generate_message(
155
+ self,
156
+ ) -> Callable[
157
+ [discuss_service.GenerateMessageRequest],
158
+ Union[
159
+ discuss_service.GenerateMessageResponse,
160
+ Awaitable[discuss_service.GenerateMessageResponse],
161
+ ],
162
+ ]:
163
+ raise NotImplementedError()
164
+
165
+ @property
166
+ def count_message_tokens(
167
+ self,
168
+ ) -> Callable[
169
+ [discuss_service.CountMessageTokensRequest],
170
+ Union[
171
+ discuss_service.CountMessageTokensResponse,
172
+ Awaitable[discuss_service.CountMessageTokensResponse],
173
+ ],
174
+ ]:
175
+ raise NotImplementedError()
176
+
177
+ @property
178
+ def kind(self) -> str:
179
+ raise NotImplementedError()
180
+
181
+
182
+ __all__ = ("DiscussServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/grpc.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json
17
+ import logging as std_logging
18
+ import pickle
19
+ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, grpc_helpers
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.auth.transport.grpc import SslCredentials # type: ignore
26
+ from google.longrunning import operations_pb2 # type: ignore
27
+ from google.protobuf.json_format import MessageToJson
28
+ import google.protobuf.message
29
+ import grpc # type: ignore
30
+ import proto # type: ignore
31
+
32
+ from google.ai.generativelanguage_v1beta3.types import discuss_service
33
+
34
+ from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
35
+
36
+ try:
37
+ from google.api_core import client_logging # type: ignore
38
+
39
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
40
+ except ImportError: # pragma: NO COVER
41
+ CLIENT_LOGGING_SUPPORTED = False
42
+
43
+ _LOGGER = std_logging.getLogger(__name__)
44
+
45
+
46
+ class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
47
+ def intercept_unary_unary(self, continuation, client_call_details, request):
48
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
49
+ std_logging.DEBUG
50
+ )
51
+ if logging_enabled: # pragma: NO COVER
52
+ request_metadata = client_call_details.metadata
53
+ if isinstance(request, proto.Message):
54
+ request_payload = type(request).to_json(request)
55
+ elif isinstance(request, google.protobuf.message.Message):
56
+ request_payload = MessageToJson(request)
57
+ else:
58
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
59
+
60
+ request_metadata = {
61
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
62
+ for key, value in request_metadata
63
+ }
64
+ grpc_request = {
65
+ "payload": request_payload,
66
+ "requestMethod": "grpc",
67
+ "metadata": dict(request_metadata),
68
+ }
69
+ _LOGGER.debug(
70
+ f"Sending request for {client_call_details.method}",
71
+ extra={
72
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
73
+ "rpcName": client_call_details.method,
74
+ "request": grpc_request,
75
+ "metadata": grpc_request["metadata"],
76
+ },
77
+ )
78
+
79
+ response = continuation(client_call_details, request)
80
+ if logging_enabled: # pragma: NO COVER
81
+ response_metadata = response.trailing_metadata()
82
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
83
+ metadata = (
84
+ dict([(k, str(v)) for k, v in response_metadata])
85
+ if response_metadata
86
+ else None
87
+ )
88
+ result = response.result()
89
+ if isinstance(result, proto.Message):
90
+ response_payload = type(result).to_json(result)
91
+ elif isinstance(result, google.protobuf.message.Message):
92
+ response_payload = MessageToJson(result)
93
+ else:
94
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
95
+ grpc_response = {
96
+ "payload": response_payload,
97
+ "metadata": metadata,
98
+ "status": "OK",
99
+ }
100
+ _LOGGER.debug(
101
+ f"Received response for {client_call_details.method}.",
102
+ extra={
103
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
104
+ "rpcName": client_call_details.method,
105
+ "response": grpc_response,
106
+ "metadata": grpc_response["metadata"],
107
+ },
108
+ )
109
+ return response
110
+
111
+
112
+ class DiscussServiceGrpcTransport(DiscussServiceTransport):
113
+ """gRPC backend transport for DiscussService.
114
+
115
+ An API for using Generative Language Models (GLMs) in dialog
116
+ applications.
117
+ Also known as large language models (LLMs), this API provides
118
+ models that are trained for multi-turn dialog.
119
+
120
+ This class defines the same methods as the primary client, so the
121
+ primary client can load the underlying transport implementation
122
+ and call it.
123
+
124
+ It sends protocol buffers over the wire using gRPC (which is built on
125
+ top of HTTP/2); the ``grpcio`` package must be installed.
126
+ """
127
+
128
+ _stubs: Dict[str, Callable]
129
+
130
+ def __init__(
131
+ self,
132
+ *,
133
+ host: str = "generativelanguage.googleapis.com",
134
+ credentials: Optional[ga_credentials.Credentials] = None,
135
+ credentials_file: Optional[str] = None,
136
+ scopes: Optional[Sequence[str]] = None,
137
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
138
+ api_mtls_endpoint: Optional[str] = None,
139
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
140
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
141
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
142
+ quota_project_id: Optional[str] = None,
143
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
144
+ always_use_jwt_access: Optional[bool] = False,
145
+ api_audience: Optional[str] = None,
146
+ ) -> None:
147
+ """Instantiate the transport.
148
+
149
+ Args:
150
+ host (Optional[str]):
151
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
152
+ credentials (Optional[google.auth.credentials.Credentials]): The
153
+ authorization credentials to attach to requests. These
154
+ credentials identify the application to the service; if none
155
+ are specified, the client will attempt to ascertain the
156
+ credentials from the environment.
157
+ This argument is ignored if a ``channel`` instance is provided.
158
+ credentials_file (Optional[str]): A file with credentials that can
159
+ be loaded with :func:`google.auth.load_credentials_from_file`.
160
+ This argument is ignored if a ``channel`` instance is provided.
161
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
162
+ ignored if a ``channel`` instance is provided.
163
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
164
+ A ``Channel`` instance through which to make calls, or a Callable
165
+ that constructs and returns one. If set to None, ``self.create_channel``
166
+ is used to create the channel. If a Callable is given, it will be called
167
+ with the same arguments as used in ``self.create_channel``.
168
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
169
+ If provided, it overrides the ``host`` argument and tries to create
170
+ a mutual TLS channel with client SSL credentials from
171
+ ``client_cert_source`` or application default SSL credentials.
172
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
173
+ Deprecated. A callback to provide client SSL certificate bytes and
174
+ private key bytes, both in PEM format. It is ignored if
175
+ ``api_mtls_endpoint`` is None.
176
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
177
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
178
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
179
+ A callback to provide client certificate bytes and private key bytes,
180
+ both in PEM format. It is used to configure a mutual TLS channel. It is
181
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
182
+ quota_project_id (Optional[str]): An optional project to use for billing
183
+ and quota.
184
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
185
+ The client info used to send a user-agent string along with
186
+ API requests. If ``None``, then default info will be used.
187
+ Generally, you only need to set this if you're developing
188
+ your own client library.
189
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
190
+ be used for service account credentials.
191
+
192
+ Raises:
193
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
194
+ creation failed for any reason.
195
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
196
+ and ``credentials_file`` are passed.
197
+ """
198
+ self._grpc_channel = None
199
+ self._ssl_channel_credentials = ssl_channel_credentials
200
+ self._stubs: Dict[str, Callable] = {}
201
+
202
+ if api_mtls_endpoint:
203
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
204
+ if client_cert_source:
205
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
206
+
207
+ if isinstance(channel, grpc.Channel):
208
+ # Ignore credentials if a channel was passed.
209
+ credentials = None
210
+ self._ignore_credentials = True
211
+ # If a channel was explicitly provided, set it.
212
+ self._grpc_channel = channel
213
+ self._ssl_channel_credentials = None
214
+
215
+ else:
216
+ if api_mtls_endpoint:
217
+ host = api_mtls_endpoint
218
+
219
+ # Create SSL credentials with client_cert_source or application
220
+ # default SSL credentials.
221
+ if client_cert_source:
222
+ cert, key = client_cert_source()
223
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
224
+ certificate_chain=cert, private_key=key
225
+ )
226
+ else:
227
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
228
+
229
+ else:
230
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
231
+ cert, key = client_cert_source_for_mtls()
232
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
233
+ certificate_chain=cert, private_key=key
234
+ )
235
+
236
+ # The base transport sets the host, credentials and scopes
237
+ super().__init__(
238
+ host=host,
239
+ credentials=credentials,
240
+ credentials_file=credentials_file,
241
+ scopes=scopes,
242
+ quota_project_id=quota_project_id,
243
+ client_info=client_info,
244
+ always_use_jwt_access=always_use_jwt_access,
245
+ api_audience=api_audience,
246
+ )
247
+
248
+ if not self._grpc_channel:
249
+ # initialize with the provided callable or the default channel
250
+ channel_init = channel or type(self).create_channel
251
+ self._grpc_channel = channel_init(
252
+ self._host,
253
+ # use the credentials which are saved
254
+ credentials=self._credentials,
255
+ # Set ``credentials_file`` to ``None`` here as
256
+ # the credentials that we saved earlier should be used.
257
+ credentials_file=None,
258
+ scopes=self._scopes,
259
+ ssl_credentials=self._ssl_channel_credentials,
260
+ quota_project_id=quota_project_id,
261
+ options=[
262
+ ("grpc.max_send_message_length", -1),
263
+ ("grpc.max_receive_message_length", -1),
264
+ ],
265
+ )
266
+
267
+ self._interceptor = _LoggingClientInterceptor()
268
+ self._logged_channel = grpc.intercept_channel(
269
+ self._grpc_channel, self._interceptor
270
+ )
271
+
272
+ # Wrap messages. This must be done after self._logged_channel exists
273
+ self._prep_wrapped_messages(client_info)
274
+
275
+ @classmethod
276
+ def create_channel(
277
+ cls,
278
+ host: str = "generativelanguage.googleapis.com",
279
+ credentials: Optional[ga_credentials.Credentials] = None,
280
+ credentials_file: Optional[str] = None,
281
+ scopes: Optional[Sequence[str]] = None,
282
+ quota_project_id: Optional[str] = None,
283
+ **kwargs,
284
+ ) -> grpc.Channel:
285
+ """Create and return a gRPC channel object.
286
+ Args:
287
+ host (Optional[str]): The host for the channel to use.
288
+ credentials (Optional[~.Credentials]): The
289
+ authorization credentials to attach to requests. These
290
+ credentials identify this application to the service. If
291
+ none are specified, the client will attempt to ascertain
292
+ the credentials from the environment.
293
+ credentials_file (Optional[str]): A file with credentials that can
294
+ be loaded with :func:`google.auth.load_credentials_from_file`.
295
+ This argument is mutually exclusive with credentials.
296
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
297
+ service. These are only used when credentials are not specified and
298
+ are passed to :func:`google.auth.default`.
299
+ quota_project_id (Optional[str]): An optional project to use for billing
300
+ and quota.
301
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
302
+ channel creation.
303
+ Returns:
304
+ grpc.Channel: A gRPC channel object.
305
+
306
+ Raises:
307
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
308
+ and ``credentials_file`` are passed.
309
+ """
310
+
311
+ return grpc_helpers.create_channel(
312
+ host,
313
+ credentials=credentials,
314
+ credentials_file=credentials_file,
315
+ quota_project_id=quota_project_id,
316
+ default_scopes=cls.AUTH_SCOPES,
317
+ scopes=scopes,
318
+ default_host=cls.DEFAULT_HOST,
319
+ **kwargs,
320
+ )
321
+
322
+ @property
323
+ def grpc_channel(self) -> grpc.Channel:
324
+ """Return the channel designed to connect to this service."""
325
+ return self._grpc_channel
326
+
327
+ @property
328
+ def generate_message(
329
+ self,
330
+ ) -> Callable[
331
+ [discuss_service.GenerateMessageRequest],
332
+ discuss_service.GenerateMessageResponse,
333
+ ]:
334
+ r"""Return a callable for the generate message method over gRPC.
335
+
336
+ Generates a response from the model given an input
337
+ ``MessagePrompt``.
338
+
339
+ Returns:
340
+ Callable[[~.GenerateMessageRequest],
341
+ ~.GenerateMessageResponse]:
342
+ A function that, when called, will call the underlying RPC
343
+ on the server.
344
+ """
345
+ # Generate a "stub function" on-the-fly which will actually make
346
+ # the request.
347
+ # gRPC handles serialization and deserialization, so we just need
348
+ # to pass in the functions for each.
349
+ if "generate_message" not in self._stubs:
350
+ self._stubs["generate_message"] = self._logged_channel.unary_unary(
351
+ "/google.ai.generativelanguage.v1beta3.DiscussService/GenerateMessage",
352
+ request_serializer=discuss_service.GenerateMessageRequest.serialize,
353
+ response_deserializer=discuss_service.GenerateMessageResponse.deserialize,
354
+ )
355
+ return self._stubs["generate_message"]
356
+
357
+ @property
358
+ def count_message_tokens(
359
+ self,
360
+ ) -> Callable[
361
+ [discuss_service.CountMessageTokensRequest],
362
+ discuss_service.CountMessageTokensResponse,
363
+ ]:
364
+ r"""Return a callable for the count message tokens method over gRPC.
365
+
366
+ Runs a model's tokenizer on a string and returns the
367
+ token count.
368
+
369
+ Returns:
370
+ Callable[[~.CountMessageTokensRequest],
371
+ ~.CountMessageTokensResponse]:
372
+ A function that, when called, will call the underlying RPC
373
+ on the server.
374
+ """
375
+ # Generate a "stub function" on-the-fly which will actually make
376
+ # the request.
377
+ # gRPC handles serialization and deserialization, so we just need
378
+ # to pass in the functions for each.
379
+ if "count_message_tokens" not in self._stubs:
380
+ self._stubs["count_message_tokens"] = self._logged_channel.unary_unary(
381
+ "/google.ai.generativelanguage.v1beta3.DiscussService/CountMessageTokens",
382
+ request_serializer=discuss_service.CountMessageTokensRequest.serialize,
383
+ response_deserializer=discuss_service.CountMessageTokensResponse.deserialize,
384
+ )
385
+ return self._stubs["count_message_tokens"]
386
+
387
+ def close(self):
388
+ self._logged_channel.close()
389
+
390
+ @property
391
+ def kind(self) -> str:
392
+ return "grpc"
393
+
394
+
395
+ __all__ = ("DiscussServiceGrpcTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.longrunning import operations_pb2 # type: ignore
29
+ from google.protobuf.json_format import MessageToJson
30
+ import google.protobuf.message
31
+ import grpc # type: ignore
32
+ from grpc.experimental import aio # type: ignore
33
+ import proto # type: ignore
34
+
35
+ from google.ai.generativelanguage_v1beta3.types import discuss_service
36
+
37
+ from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
38
+ from .grpc import DiscussServiceGrpcTransport
39
+
40
+ try:
41
+ from google.api_core import client_logging # type: ignore
42
+
43
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
44
+ except ImportError: # pragma: NO COVER
45
+ CLIENT_LOGGING_SUPPORTED = False
46
+
47
+ _LOGGER = std_logging.getLogger(__name__)
48
+
49
+
50
+ class _LoggingClientAIOInterceptor(
51
+ grpc.aio.UnaryUnaryClientInterceptor
52
+ ): # pragma: NO COVER
53
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
54
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
55
+ std_logging.DEBUG
56
+ )
57
+ if logging_enabled: # pragma: NO COVER
58
+ request_metadata = client_call_details.metadata
59
+ if isinstance(request, proto.Message):
60
+ request_payload = type(request).to_json(request)
61
+ elif isinstance(request, google.protobuf.message.Message):
62
+ request_payload = MessageToJson(request)
63
+ else:
64
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
65
+
66
+ request_metadata = {
67
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
68
+ for key, value in request_metadata
69
+ }
70
+ grpc_request = {
71
+ "payload": request_payload,
72
+ "requestMethod": "grpc",
73
+ "metadata": dict(request_metadata),
74
+ }
75
+ _LOGGER.debug(
76
+ f"Sending request for {client_call_details.method}",
77
+ extra={
78
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
79
+ "rpcName": str(client_call_details.method),
80
+ "request": grpc_request,
81
+ "metadata": grpc_request["metadata"],
82
+ },
83
+ )
84
+ response = await continuation(client_call_details, request)
85
+ if logging_enabled: # pragma: NO COVER
86
+ response_metadata = await response.trailing_metadata()
87
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
88
+ metadata = (
89
+ dict([(k, str(v)) for k, v in response_metadata])
90
+ if response_metadata
91
+ else None
92
+ )
93
+ result = await response
94
+ if isinstance(result, proto.Message):
95
+ response_payload = type(result).to_json(result)
96
+ elif isinstance(result, google.protobuf.message.Message):
97
+ response_payload = MessageToJson(result)
98
+ else:
99
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
100
+ grpc_response = {
101
+ "payload": response_payload,
102
+ "metadata": metadata,
103
+ "status": "OK",
104
+ }
105
+ _LOGGER.debug(
106
+ f"Received response to rpc {client_call_details.method}.",
107
+ extra={
108
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
109
+ "rpcName": str(client_call_details.method),
110
+ "response": grpc_response,
111
+ "metadata": grpc_response["metadata"],
112
+ },
113
+ )
114
+ return response
115
+
116
+
117
+ class DiscussServiceGrpcAsyncIOTransport(DiscussServiceTransport):
118
+ """gRPC AsyncIO backend transport for DiscussService.
119
+
120
+ An API for using Generative Language Models (GLMs) in dialog
121
+ applications.
122
+ Also known as large language models (LLMs), this API provides
123
+ models that are trained for multi-turn dialog.
124
+
125
+ This class defines the same methods as the primary client, so the
126
+ primary client can load the underlying transport implementation
127
+ and call it.
128
+
129
+ It sends protocol buffers over the wire using gRPC (which is built on
130
+ top of HTTP/2); the ``grpcio`` package must be installed.
131
+ """
132
+
133
+ _grpc_channel: aio.Channel
134
+ _stubs: Dict[str, Callable] = {}
135
+
136
+ @classmethod
137
+ def create_channel(
138
+ cls,
139
+ host: str = "generativelanguage.googleapis.com",
140
+ credentials: Optional[ga_credentials.Credentials] = None,
141
+ credentials_file: Optional[str] = None,
142
+ scopes: Optional[Sequence[str]] = None,
143
+ quota_project_id: Optional[str] = None,
144
+ **kwargs,
145
+ ) -> aio.Channel:
146
+ """Create and return a gRPC AsyncIO channel object.
147
+ Args:
148
+ host (Optional[str]): The host for the channel to use.
149
+ credentials (Optional[~.Credentials]): The
150
+ authorization credentials to attach to requests. These
151
+ credentials identify this application to the service. If
152
+ none are specified, the client will attempt to ascertain
153
+ the credentials from the environment.
154
+ credentials_file (Optional[str]): A file with credentials that can
155
+ be loaded with :func:`google.auth.load_credentials_from_file`.
156
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
157
+ service. These are only used when credentials are not specified and
158
+ are passed to :func:`google.auth.default`.
159
+ quota_project_id (Optional[str]): An optional project to use for billing
160
+ and quota.
161
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
162
+ channel creation.
163
+ Returns:
164
+ aio.Channel: A gRPC AsyncIO channel object.
165
+ """
166
+
167
+ return grpc_helpers_async.create_channel(
168
+ host,
169
+ credentials=credentials,
170
+ credentials_file=credentials_file,
171
+ quota_project_id=quota_project_id,
172
+ default_scopes=cls.AUTH_SCOPES,
173
+ scopes=scopes,
174
+ default_host=cls.DEFAULT_HOST,
175
+ **kwargs,
176
+ )
177
+
178
+ def __init__(
179
+ self,
180
+ *,
181
+ host: str = "generativelanguage.googleapis.com",
182
+ credentials: Optional[ga_credentials.Credentials] = None,
183
+ credentials_file: Optional[str] = None,
184
+ scopes: Optional[Sequence[str]] = None,
185
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
186
+ api_mtls_endpoint: Optional[str] = None,
187
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
188
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
189
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
190
+ quota_project_id: Optional[str] = None,
191
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
192
+ always_use_jwt_access: Optional[bool] = False,
193
+ api_audience: Optional[str] = None,
194
+ ) -> None:
195
+ """Instantiate the transport.
196
+
197
+ Args:
198
+ host (Optional[str]):
199
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
200
+ credentials (Optional[google.auth.credentials.Credentials]): The
201
+ authorization credentials to attach to requests. These
202
+ credentials identify the application to the service; if none
203
+ are specified, the client will attempt to ascertain the
204
+ credentials from the environment.
205
+ This argument is ignored if a ``channel`` instance is provided.
206
+ credentials_file (Optional[str]): A file with credentials that can
207
+ be loaded with :func:`google.auth.load_credentials_from_file`.
208
+ This argument is ignored if a ``channel`` instance is provided.
209
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
210
+ service. These are only used when credentials are not specified and
211
+ are passed to :func:`google.auth.default`.
212
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
213
+ A ``Channel`` instance through which to make calls, or a Callable
214
+ that constructs and returns one. If set to None, ``self.create_channel``
215
+ is used to create the channel. If a Callable is given, it will be called
216
+ with the same arguments as used in ``self.create_channel``.
217
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
218
+ If provided, it overrides the ``host`` argument and tries to create
219
+ a mutual TLS channel with client SSL credentials from
220
+ ``client_cert_source`` or application default SSL credentials.
221
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
222
+ Deprecated. A callback to provide client SSL certificate bytes and
223
+ private key bytes, both in PEM format. It is ignored if
224
+ ``api_mtls_endpoint`` is None.
225
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
226
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
227
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
228
+ A callback to provide client certificate bytes and private key bytes,
229
+ both in PEM format. It is used to configure a mutual TLS channel. It is
230
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
231
+ quota_project_id (Optional[str]): An optional project to use for billing
232
+ and quota.
233
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
234
+ The client info used to send a user-agent string along with
235
+ API requests. If ``None``, then default info will be used.
236
+ Generally, you only need to set this if you're developing
237
+ your own client library.
238
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
239
+ be used for service account credentials.
240
+
241
+ Raises:
242
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
243
+ creation failed for any reason.
244
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
245
+ and ``credentials_file`` are passed.
246
+ """
247
+ self._grpc_channel = None
248
+ self._ssl_channel_credentials = ssl_channel_credentials
249
+ self._stubs: Dict[str, Callable] = {}
250
+
251
+ if api_mtls_endpoint:
252
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
253
+ if client_cert_source:
254
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
255
+
256
+ if isinstance(channel, aio.Channel):
257
+ # Ignore credentials if a channel was passed.
258
+ credentials = None
259
+ self._ignore_credentials = True
260
+ # If a channel was explicitly provided, set it.
261
+ self._grpc_channel = channel
262
+ self._ssl_channel_credentials = None
263
+ else:
264
+ if api_mtls_endpoint:
265
+ host = api_mtls_endpoint
266
+
267
+ # Create SSL credentials with client_cert_source or application
268
+ # default SSL credentials.
269
+ if client_cert_source:
270
+ cert, key = client_cert_source()
271
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
272
+ certificate_chain=cert, private_key=key
273
+ )
274
+ else:
275
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
276
+
277
+ else:
278
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
279
+ cert, key = client_cert_source_for_mtls()
280
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
281
+ certificate_chain=cert, private_key=key
282
+ )
283
+
284
+ # The base transport sets the host, credentials and scopes
285
+ super().__init__(
286
+ host=host,
287
+ credentials=credentials,
288
+ credentials_file=credentials_file,
289
+ scopes=scopes,
290
+ quota_project_id=quota_project_id,
291
+ client_info=client_info,
292
+ always_use_jwt_access=always_use_jwt_access,
293
+ api_audience=api_audience,
294
+ )
295
+
296
+ if not self._grpc_channel:
297
+ # initialize with the provided callable or the default channel
298
+ channel_init = channel or type(self).create_channel
299
+ self._grpc_channel = channel_init(
300
+ self._host,
301
+ # use the credentials which are saved
302
+ credentials=self._credentials,
303
+ # Set ``credentials_file`` to ``None`` here as
304
+ # the credentials that we saved earlier should be used.
305
+ credentials_file=None,
306
+ scopes=self._scopes,
307
+ ssl_credentials=self._ssl_channel_credentials,
308
+ quota_project_id=quota_project_id,
309
+ options=[
310
+ ("grpc.max_send_message_length", -1),
311
+ ("grpc.max_receive_message_length", -1),
312
+ ],
313
+ )
314
+
315
+ self._interceptor = _LoggingClientAIOInterceptor()
316
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
317
+ self._logged_channel = self._grpc_channel
318
+ self._wrap_with_kind = (
319
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
320
+ )
321
+ # Wrap messages. This must be done after self._logged_channel exists
322
+ self._prep_wrapped_messages(client_info)
323
+
324
+ @property
325
+ def grpc_channel(self) -> aio.Channel:
326
+ """Create the channel designed to connect to this service.
327
+
328
+ This property caches on the instance; repeated calls return
329
+ the same channel.
330
+ """
331
+ # Return the channel from cache.
332
+ return self._grpc_channel
333
+
334
+ @property
335
+ def generate_message(
336
+ self,
337
+ ) -> Callable[
338
+ [discuss_service.GenerateMessageRequest],
339
+ Awaitable[discuss_service.GenerateMessageResponse],
340
+ ]:
341
+ r"""Return a callable for the generate message method over gRPC.
342
+
343
+ Generates a response from the model given an input
344
+ ``MessagePrompt``.
345
+
346
+ Returns:
347
+ Callable[[~.GenerateMessageRequest],
348
+ Awaitable[~.GenerateMessageResponse]]:
349
+ A function that, when called, will call the underlying RPC
350
+ on the server.
351
+ """
352
+ # Generate a "stub function" on-the-fly which will actually make
353
+ # the request.
354
+ # gRPC handles serialization and deserialization, so we just need
355
+ # to pass in the functions for each.
356
+ if "generate_message" not in self._stubs:
357
+ self._stubs["generate_message"] = self._logged_channel.unary_unary(
358
+ "/google.ai.generativelanguage.v1beta3.DiscussService/GenerateMessage",
359
+ request_serializer=discuss_service.GenerateMessageRequest.serialize,
360
+ response_deserializer=discuss_service.GenerateMessageResponse.deserialize,
361
+ )
362
+ return self._stubs["generate_message"]
363
+
364
+ @property
365
+ def count_message_tokens(
366
+ self,
367
+ ) -> Callable[
368
+ [discuss_service.CountMessageTokensRequest],
369
+ Awaitable[discuss_service.CountMessageTokensResponse],
370
+ ]:
371
+ r"""Return a callable for the count message tokens method over gRPC.
372
+
373
+ Runs a model's tokenizer on a string and returns the
374
+ token count.
375
+
376
+ Returns:
377
+ Callable[[~.CountMessageTokensRequest],
378
+ Awaitable[~.CountMessageTokensResponse]]:
379
+ A function that, when called, will call the underlying RPC
380
+ on the server.
381
+ """
382
+ # Generate a "stub function" on-the-fly which will actually make
383
+ # the request.
384
+ # gRPC handles serialization and deserialization, so we just need
385
+ # to pass in the functions for each.
386
+ if "count_message_tokens" not in self._stubs:
387
+ self._stubs["count_message_tokens"] = self._logged_channel.unary_unary(
388
+ "/google.ai.generativelanguage.v1beta3.DiscussService/CountMessageTokens",
389
+ request_serializer=discuss_service.CountMessageTokensRequest.serialize,
390
+ response_deserializer=discuss_service.CountMessageTokensResponse.deserialize,
391
+ )
392
+ return self._stubs["count_message_tokens"]
393
+
394
+ def _prep_wrapped_messages(self, client_info):
395
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
396
+ self._wrapped_methods = {
397
+ self.generate_message: self._wrap_method(
398
+ self.generate_message,
399
+ default_timeout=None,
400
+ client_info=client_info,
401
+ ),
402
+ self.count_message_tokens: self._wrap_method(
403
+ self.count_message_tokens,
404
+ default_timeout=None,
405
+ client_info=client_info,
406
+ ),
407
+ }
408
+
409
+ def _wrap_method(self, func, *args, **kwargs):
410
+ if self._wrap_with_kind: # pragma: NO COVER
411
+ kwargs["kind"] = self.kind
412
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
413
+
414
+ def close(self):
415
+ return self._logged_channel.close()
416
+
417
+ @property
418
+ def kind(self) -> str:
419
+ return "grpc_asyncio"
420
+
421
+
422
+ __all__ = ("DiscussServiceGrpcAsyncIOTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/rest.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import dataclasses
17
+ import json # type: ignore
18
+ import logging
19
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import exceptions as core_exceptions
23
+ from google.api_core import gapic_v1, rest_helpers, rest_streaming
24
+ from google.api_core import retry as retries
25
+ from google.auth import credentials as ga_credentials # type: ignore
26
+ from google.auth.transport.requests import AuthorizedSession # type: ignore
27
+ from google.longrunning import operations_pb2 # type: ignore
28
+ from google.protobuf import json_format
29
+ from requests import __version__ as requests_version
30
+
31
+ from google.ai.generativelanguage_v1beta3.types import discuss_service
32
+
33
+ from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
34
+ from .rest_base import _BaseDiscussServiceRestTransport
35
+
36
+ try:
37
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
38
+ except AttributeError: # pragma: NO COVER
39
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
40
+
41
+ try:
42
+ from google.api_core import client_logging # type: ignore
43
+
44
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
45
+ except ImportError: # pragma: NO COVER
46
+ CLIENT_LOGGING_SUPPORTED = False
47
+
48
+ _LOGGER = logging.getLogger(__name__)
49
+
50
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
51
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
52
+ grpc_version=None,
53
+ rest_version=f"requests@{requests_version}",
54
+ )
55
+
56
+
57
+ class DiscussServiceRestInterceptor:
58
+ """Interceptor for DiscussService.
59
+
60
+ Interceptors are used to manipulate requests, request metadata, and responses
61
+ in arbitrary ways.
62
+ Example use cases include:
63
+ * Logging
64
+ * Verifying requests according to service or custom semantics
65
+ * Stripping extraneous information from responses
66
+
67
+ These use cases and more can be enabled by injecting an
68
+ instance of a custom subclass when constructing the DiscussServiceRestTransport.
69
+
70
+ .. code-block:: python
71
+ class MyCustomDiscussServiceInterceptor(DiscussServiceRestInterceptor):
72
+ def pre_count_message_tokens(self, request, metadata):
73
+ logging.log(f"Received request: {request}")
74
+ return request, metadata
75
+
76
+ def post_count_message_tokens(self, response):
77
+ logging.log(f"Received response: {response}")
78
+ return response
79
+
80
+ def pre_generate_message(self, request, metadata):
81
+ logging.log(f"Received request: {request}")
82
+ return request, metadata
83
+
84
+ def post_generate_message(self, response):
85
+ logging.log(f"Received response: {response}")
86
+ return response
87
+
88
+ transport = DiscussServiceRestTransport(interceptor=MyCustomDiscussServiceInterceptor())
89
+ client = DiscussServiceClient(transport=transport)
90
+
91
+
92
+ """
93
+
94
+ def pre_count_message_tokens(
95
+ self,
96
+ request: discuss_service.CountMessageTokensRequest,
97
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
98
+ ) -> Tuple[
99
+ discuss_service.CountMessageTokensRequest,
100
+ Sequence[Tuple[str, Union[str, bytes]]],
101
+ ]:
102
+ """Pre-rpc interceptor for count_message_tokens
103
+
104
+ Override in a subclass to manipulate the request or metadata
105
+ before they are sent to the DiscussService server.
106
+ """
107
+ return request, metadata
108
+
109
+ def post_count_message_tokens(
110
+ self, response: discuss_service.CountMessageTokensResponse
111
+ ) -> discuss_service.CountMessageTokensResponse:
112
+ """Post-rpc interceptor for count_message_tokens
113
+
114
+ Override in a subclass to manipulate the response
115
+ after it is returned by the DiscussService server but before
116
+ it is returned to user code.
117
+ """
118
+ return response
119
+
120
+ def pre_generate_message(
121
+ self,
122
+ request: discuss_service.GenerateMessageRequest,
123
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
124
+ ) -> Tuple[
125
+ discuss_service.GenerateMessageRequest, Sequence[Tuple[str, Union[str, bytes]]]
126
+ ]:
127
+ """Pre-rpc interceptor for generate_message
128
+
129
+ Override in a subclass to manipulate the request or metadata
130
+ before they are sent to the DiscussService server.
131
+ """
132
+ return request, metadata
133
+
134
+ def post_generate_message(
135
+ self, response: discuss_service.GenerateMessageResponse
136
+ ) -> discuss_service.GenerateMessageResponse:
137
+ """Post-rpc interceptor for generate_message
138
+
139
+ Override in a subclass to manipulate the response
140
+ after it is returned by the DiscussService server but before
141
+ it is returned to user code.
142
+ """
143
+ return response
144
+
145
+
146
+ @dataclasses.dataclass
147
+ class DiscussServiceRestStub:
148
+ _session: AuthorizedSession
149
+ _host: str
150
+ _interceptor: DiscussServiceRestInterceptor
151
+
152
+
153
+ class DiscussServiceRestTransport(_BaseDiscussServiceRestTransport):
154
+ """REST backend synchronous transport for DiscussService.
155
+
156
+ An API for using Generative Language Models (GLMs) in dialog
157
+ applications.
158
+ Also known as large language models (LLMs), this API provides
159
+ models that are trained for multi-turn dialog.
160
+
161
+ This class defines the same methods as the primary client, so the
162
+ primary client can load the underlying transport implementation
163
+ and call it.
164
+
165
+ It sends JSON representations of protocol buffers over HTTP/1.1
166
+ """
167
+
168
+ def __init__(
169
+ self,
170
+ *,
171
+ host: str = "generativelanguage.googleapis.com",
172
+ credentials: Optional[ga_credentials.Credentials] = None,
173
+ credentials_file: Optional[str] = None,
174
+ scopes: Optional[Sequence[str]] = None,
175
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
176
+ quota_project_id: Optional[str] = None,
177
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
178
+ always_use_jwt_access: Optional[bool] = False,
179
+ url_scheme: str = "https",
180
+ interceptor: Optional[DiscussServiceRestInterceptor] = None,
181
+ api_audience: Optional[str] = None,
182
+ ) -> None:
183
+ """Instantiate the transport.
184
+
185
+ Args:
186
+ host (Optional[str]):
187
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
188
+ credentials (Optional[google.auth.credentials.Credentials]): The
189
+ authorization credentials to attach to requests. These
190
+ credentials identify the application to the service; if none
191
+ are specified, the client will attempt to ascertain the
192
+ credentials from the environment.
193
+
194
+ credentials_file (Optional[str]): A file with credentials that can
195
+ be loaded with :func:`google.auth.load_credentials_from_file`.
196
+ This argument is ignored if ``channel`` is provided.
197
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
198
+ ignored if ``channel`` is provided.
199
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
200
+ certificate to configure mutual TLS HTTP channel. It is ignored
201
+ if ``channel`` is provided.
202
+ quota_project_id (Optional[str]): An optional project to use for billing
203
+ and quota.
204
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
205
+ The client info used to send a user-agent string along with
206
+ API requests. If ``None``, then default info will be used.
207
+ Generally, you only need to set this if you are developing
208
+ your own client library.
209
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
210
+ be used for service account credentials.
211
+ url_scheme: the protocol scheme for the API endpoint. Normally
212
+ "https", but for testing or local servers,
213
+ "http" can be specified.
214
+ """
215
+ # Run the base constructor
216
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
217
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
218
+ # credentials object
219
+ super().__init__(
220
+ host=host,
221
+ credentials=credentials,
222
+ client_info=client_info,
223
+ always_use_jwt_access=always_use_jwt_access,
224
+ url_scheme=url_scheme,
225
+ api_audience=api_audience,
226
+ )
227
+ self._session = AuthorizedSession(
228
+ self._credentials, default_host=self.DEFAULT_HOST
229
+ )
230
+ if client_cert_source_for_mtls:
231
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
232
+ self._interceptor = interceptor or DiscussServiceRestInterceptor()
233
+ self._prep_wrapped_messages(client_info)
234
+
235
+ class _CountMessageTokens(
236
+ _BaseDiscussServiceRestTransport._BaseCountMessageTokens, DiscussServiceRestStub
237
+ ):
238
+ def __hash__(self):
239
+ return hash("DiscussServiceRestTransport.CountMessageTokens")
240
+
241
+ @staticmethod
242
+ def _get_response(
243
+ host,
244
+ metadata,
245
+ query_params,
246
+ session,
247
+ timeout,
248
+ transcoded_request,
249
+ body=None,
250
+ ):
251
+ uri = transcoded_request["uri"]
252
+ method = transcoded_request["method"]
253
+ headers = dict(metadata)
254
+ headers["Content-Type"] = "application/json"
255
+ response = getattr(session, method)(
256
+ "{host}{uri}".format(host=host, uri=uri),
257
+ timeout=timeout,
258
+ headers=headers,
259
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
260
+ data=body,
261
+ )
262
+ return response
263
+
264
+ def __call__(
265
+ self,
266
+ request: discuss_service.CountMessageTokensRequest,
267
+ *,
268
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
269
+ timeout: Optional[float] = None,
270
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
271
+ ) -> discuss_service.CountMessageTokensResponse:
272
+ r"""Call the count message tokens method over HTTP.
273
+
274
+ Args:
275
+ request (~.discuss_service.CountMessageTokensRequest):
276
+ The request object. Counts the number of tokens in the ``prompt`` sent to a
277
+ model.
278
+
279
+ Models may tokenize text differently, so each model may
280
+ return a different ``token_count``.
281
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
282
+ should be retried.
283
+ timeout (float): The timeout for this request.
284
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
285
+ sent along with the request as metadata. Normally, each value must be of type `str`,
286
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
287
+ be of type `bytes`.
288
+
289
+ Returns:
290
+ ~.discuss_service.CountMessageTokensResponse:
291
+ A response from ``CountMessageTokens``.
292
+
293
+ It returns the model's ``token_count`` for the
294
+ ``prompt``.
295
+
296
+ """
297
+
298
+ http_options = (
299
+ _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_http_options()
300
+ )
301
+
302
+ request, metadata = self._interceptor.pre_count_message_tokens(
303
+ request, metadata
304
+ )
305
+ transcoded_request = _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_transcoded_request(
306
+ http_options, request
307
+ )
308
+
309
+ body = _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_request_body_json(
310
+ transcoded_request
311
+ )
312
+
313
+ # Jsonify the query params
314
+ query_params = _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_query_params_json(
315
+ transcoded_request
316
+ )
317
+
318
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
319
+ logging.DEBUG
320
+ ): # pragma: NO COVER
321
+ request_url = "{host}{uri}".format(
322
+ host=self._host, uri=transcoded_request["uri"]
323
+ )
324
+ method = transcoded_request["method"]
325
+ try:
326
+ request_payload = type(request).to_json(request)
327
+ except:
328
+ request_payload = None
329
+ http_request = {
330
+ "payload": request_payload,
331
+ "requestMethod": method,
332
+ "requestUrl": request_url,
333
+ "headers": dict(metadata),
334
+ }
335
+ _LOGGER.debug(
336
+ f"Sending request for google.ai.generativelanguage_v1beta3.DiscussServiceClient.CountMessageTokens",
337
+ extra={
338
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
339
+ "rpcName": "CountMessageTokens",
340
+ "httpRequest": http_request,
341
+ "metadata": http_request["headers"],
342
+ },
343
+ )
344
+
345
+ # Send the request
346
+ response = DiscussServiceRestTransport._CountMessageTokens._get_response(
347
+ self._host,
348
+ metadata,
349
+ query_params,
350
+ self._session,
351
+ timeout,
352
+ transcoded_request,
353
+ body,
354
+ )
355
+
356
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
357
+ # subclass.
358
+ if response.status_code >= 400:
359
+ raise core_exceptions.from_http_response(response)
360
+
361
+ # Return the response
362
+ resp = discuss_service.CountMessageTokensResponse()
363
+ pb_resp = discuss_service.CountMessageTokensResponse.pb(resp)
364
+
365
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
366
+
367
+ resp = self._interceptor.post_count_message_tokens(resp)
368
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
369
+ logging.DEBUG
370
+ ): # pragma: NO COVER
371
+ try:
372
+ response_payload = (
373
+ discuss_service.CountMessageTokensResponse.to_json(response)
374
+ )
375
+ except:
376
+ response_payload = None
377
+ http_response = {
378
+ "payload": response_payload,
379
+ "headers": dict(response.headers),
380
+ "status": response.status_code,
381
+ }
382
+ _LOGGER.debug(
383
+ "Received response for google.ai.generativelanguage_v1beta3.DiscussServiceClient.count_message_tokens",
384
+ extra={
385
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
386
+ "rpcName": "CountMessageTokens",
387
+ "metadata": http_response["headers"],
388
+ "httpResponse": http_response,
389
+ },
390
+ )
391
+ return resp
392
+
393
+ class _GenerateMessage(
394
+ _BaseDiscussServiceRestTransport._BaseGenerateMessage, DiscussServiceRestStub
395
+ ):
396
+ def __hash__(self):
397
+ return hash("DiscussServiceRestTransport.GenerateMessage")
398
+
399
+ @staticmethod
400
+ def _get_response(
401
+ host,
402
+ metadata,
403
+ query_params,
404
+ session,
405
+ timeout,
406
+ transcoded_request,
407
+ body=None,
408
+ ):
409
+ uri = transcoded_request["uri"]
410
+ method = transcoded_request["method"]
411
+ headers = dict(metadata)
412
+ headers["Content-Type"] = "application/json"
413
+ response = getattr(session, method)(
414
+ "{host}{uri}".format(host=host, uri=uri),
415
+ timeout=timeout,
416
+ headers=headers,
417
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
418
+ data=body,
419
+ )
420
+ return response
421
+
422
+ def __call__(
423
+ self,
424
+ request: discuss_service.GenerateMessageRequest,
425
+ *,
426
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
427
+ timeout: Optional[float] = None,
428
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
429
+ ) -> discuss_service.GenerateMessageResponse:
430
+ r"""Call the generate message method over HTTP.
431
+
432
+ Args:
433
+ request (~.discuss_service.GenerateMessageRequest):
434
+ The request object. Request to generate a message
435
+ response from the model.
436
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
437
+ should be retried.
438
+ timeout (float): The timeout for this request.
439
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
440
+ sent along with the request as metadata. Normally, each value must be of type `str`,
441
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
442
+ be of type `bytes`.
443
+
444
+ Returns:
445
+ ~.discuss_service.GenerateMessageResponse:
446
+ The response from the model.
447
+
448
+ This includes candidate messages and
449
+ conversation history in the form of
450
+ chronologically-ordered messages.
451
+
452
+ """
453
+
454
+ http_options = (
455
+ _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_http_options()
456
+ )
457
+
458
+ request, metadata = self._interceptor.pre_generate_message(
459
+ request, metadata
460
+ )
461
+ transcoded_request = _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_transcoded_request(
462
+ http_options, request
463
+ )
464
+
465
+ body = _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_request_body_json(
466
+ transcoded_request
467
+ )
468
+
469
+ # Jsonify the query params
470
+ query_params = _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_query_params_json(
471
+ transcoded_request
472
+ )
473
+
474
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
475
+ logging.DEBUG
476
+ ): # pragma: NO COVER
477
+ request_url = "{host}{uri}".format(
478
+ host=self._host, uri=transcoded_request["uri"]
479
+ )
480
+ method = transcoded_request["method"]
481
+ try:
482
+ request_payload = type(request).to_json(request)
483
+ except:
484
+ request_payload = None
485
+ http_request = {
486
+ "payload": request_payload,
487
+ "requestMethod": method,
488
+ "requestUrl": request_url,
489
+ "headers": dict(metadata),
490
+ }
491
+ _LOGGER.debug(
492
+ f"Sending request for google.ai.generativelanguage_v1beta3.DiscussServiceClient.GenerateMessage",
493
+ extra={
494
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
495
+ "rpcName": "GenerateMessage",
496
+ "httpRequest": http_request,
497
+ "metadata": http_request["headers"],
498
+ },
499
+ )
500
+
501
+ # Send the request
502
+ response = DiscussServiceRestTransport._GenerateMessage._get_response(
503
+ self._host,
504
+ metadata,
505
+ query_params,
506
+ self._session,
507
+ timeout,
508
+ transcoded_request,
509
+ body,
510
+ )
511
+
512
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
513
+ # subclass.
514
+ if response.status_code >= 400:
515
+ raise core_exceptions.from_http_response(response)
516
+
517
+ # Return the response
518
+ resp = discuss_service.GenerateMessageResponse()
519
+ pb_resp = discuss_service.GenerateMessageResponse.pb(resp)
520
+
521
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
522
+
523
+ resp = self._interceptor.post_generate_message(resp)
524
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
525
+ logging.DEBUG
526
+ ): # pragma: NO COVER
527
+ try:
528
+ response_payload = discuss_service.GenerateMessageResponse.to_json(
529
+ response
530
+ )
531
+ except:
532
+ response_payload = None
533
+ http_response = {
534
+ "payload": response_payload,
535
+ "headers": dict(response.headers),
536
+ "status": response.status_code,
537
+ }
538
+ _LOGGER.debug(
539
+ "Received response for google.ai.generativelanguage_v1beta3.DiscussServiceClient.generate_message",
540
+ extra={
541
+ "serviceName": "google.ai.generativelanguage.v1beta3.DiscussService",
542
+ "rpcName": "GenerateMessage",
543
+ "metadata": http_response["headers"],
544
+ "httpResponse": http_response,
545
+ },
546
+ )
547
+ return resp
548
+
549
+ @property
550
+ def count_message_tokens(
551
+ self,
552
+ ) -> Callable[
553
+ [discuss_service.CountMessageTokensRequest],
554
+ discuss_service.CountMessageTokensResponse,
555
+ ]:
556
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
557
+ # In C++ this would require a dynamic_cast
558
+ return self._CountMessageTokens(self._session, self._host, self._interceptor) # type: ignore
559
+
560
+ @property
561
+ def generate_message(
562
+ self,
563
+ ) -> Callable[
564
+ [discuss_service.GenerateMessageRequest],
565
+ discuss_service.GenerateMessageResponse,
566
+ ]:
567
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
568
+ # In C++ this would require a dynamic_cast
569
+ return self._GenerateMessage(self._session, self._host, self._interceptor) # type: ignore
570
+
571
+ @property
572
+ def kind(self) -> str:
573
+ return "rest"
574
+
575
+ def close(self):
576
+ self._session.close()
577
+
578
+
579
+ __all__ = ("DiscussServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/discuss_service/transports/rest_base.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json # type: ignore
17
+ import re
18
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
19
+
20
+ from google.api_core import gapic_v1, path_template
21
+ from google.longrunning import operations_pb2 # type: ignore
22
+ from google.protobuf import json_format
23
+
24
+ from google.ai.generativelanguage_v1beta3.types import discuss_service
25
+
26
+ from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
27
+
28
+
29
+ class _BaseDiscussServiceRestTransport(DiscussServiceTransport):
30
+ """Base REST backend transport for DiscussService.
31
+
32
+ Note: This class is not meant to be used directly. Use its sync and
33
+ async sub-classes instead.
34
+
35
+ This class defines the same methods as the primary client, so the
36
+ primary client can load the underlying transport implementation
37
+ and call it.
38
+
39
+ It sends JSON representations of protocol buffers over HTTP/1.1
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ *,
45
+ host: str = "generativelanguage.googleapis.com",
46
+ credentials: Optional[Any] = None,
47
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
48
+ always_use_jwt_access: Optional[bool] = False,
49
+ url_scheme: str = "https",
50
+ api_audience: Optional[str] = None,
51
+ ) -> None:
52
+ """Instantiate the transport.
53
+ Args:
54
+ host (Optional[str]):
55
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
56
+ credentials (Optional[Any]): The
57
+ authorization credentials to attach to requests. These
58
+ credentials identify the application to the service; if none
59
+ are specified, the client will attempt to ascertain the
60
+ credentials from the environment.
61
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
62
+ The client info used to send a user-agent string along with
63
+ API requests. If ``None``, then default info will be used.
64
+ Generally, you only need to set this if you are developing
65
+ your own client library.
66
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
67
+ be used for service account credentials.
68
+ url_scheme: the protocol scheme for the API endpoint. Normally
69
+ "https", but for testing or local servers,
70
+ "http" can be specified.
71
+ """
72
+ # Run the base constructor
73
+ maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
74
+ if maybe_url_match is None:
75
+ raise ValueError(
76
+ f"Unexpected hostname structure: {host}"
77
+ ) # pragma: NO COVER
78
+
79
+ url_match_items = maybe_url_match.groupdict()
80
+
81
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
82
+
83
+ super().__init__(
84
+ host=host,
85
+ credentials=credentials,
86
+ client_info=client_info,
87
+ always_use_jwt_access=always_use_jwt_access,
88
+ api_audience=api_audience,
89
+ )
90
+
91
+ class _BaseCountMessageTokens:
92
+ def __hash__(self): # pragma: NO COVER
93
+ return NotImplementedError("__hash__ must be implemented.")
94
+
95
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
96
+
97
+ @classmethod
98
+ def _get_unset_required_fields(cls, message_dict):
99
+ return {
100
+ k: v
101
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
102
+ if k not in message_dict
103
+ }
104
+
105
+ @staticmethod
106
+ def _get_http_options():
107
+ http_options: List[Dict[str, str]] = [
108
+ {
109
+ "method": "post",
110
+ "uri": "/v1beta3/{model=models/*}:countMessageTokens",
111
+ "body": "*",
112
+ },
113
+ ]
114
+ return http_options
115
+
116
+ @staticmethod
117
+ def _get_transcoded_request(http_options, request):
118
+ pb_request = discuss_service.CountMessageTokensRequest.pb(request)
119
+ transcoded_request = path_template.transcode(http_options, pb_request)
120
+ return transcoded_request
121
+
122
+ @staticmethod
123
+ def _get_request_body_json(transcoded_request):
124
+ # Jsonify the request body
125
+
126
+ body = json_format.MessageToJson(
127
+ transcoded_request["body"], use_integers_for_enums=True
128
+ )
129
+ return body
130
+
131
+ @staticmethod
132
+ def _get_query_params_json(transcoded_request):
133
+ query_params = json.loads(
134
+ json_format.MessageToJson(
135
+ transcoded_request["query_params"],
136
+ use_integers_for_enums=True,
137
+ )
138
+ )
139
+ query_params.update(
140
+ _BaseDiscussServiceRestTransport._BaseCountMessageTokens._get_unset_required_fields(
141
+ query_params
142
+ )
143
+ )
144
+
145
+ query_params["$alt"] = "json;enum-encoding=int"
146
+ return query_params
147
+
148
+ class _BaseGenerateMessage:
149
+ def __hash__(self): # pragma: NO COVER
150
+ return NotImplementedError("__hash__ must be implemented.")
151
+
152
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
153
+
154
+ @classmethod
155
+ def _get_unset_required_fields(cls, message_dict):
156
+ return {
157
+ k: v
158
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
159
+ if k not in message_dict
160
+ }
161
+
162
+ @staticmethod
163
+ def _get_http_options():
164
+ http_options: List[Dict[str, str]] = [
165
+ {
166
+ "method": "post",
167
+ "uri": "/v1beta3/{model=models/*}:generateMessage",
168
+ "body": "*",
169
+ },
170
+ ]
171
+ return http_options
172
+
173
+ @staticmethod
174
+ def _get_transcoded_request(http_options, request):
175
+ pb_request = discuss_service.GenerateMessageRequest.pb(request)
176
+ transcoded_request = path_template.transcode(http_options, pb_request)
177
+ return transcoded_request
178
+
179
+ @staticmethod
180
+ def _get_request_body_json(transcoded_request):
181
+ # Jsonify the request body
182
+
183
+ body = json_format.MessageToJson(
184
+ transcoded_request["body"], use_integers_for_enums=True
185
+ )
186
+ return body
187
+
188
+ @staticmethod
189
+ def _get_query_params_json(transcoded_request):
190
+ query_params = json.loads(
191
+ json_format.MessageToJson(
192
+ transcoded_request["query_params"],
193
+ use_integers_for_enums=True,
194
+ )
195
+ )
196
+ query_params.update(
197
+ _BaseDiscussServiceRestTransport._BaseGenerateMessage._get_unset_required_fields(
198
+ query_params
199
+ )
200
+ )
201
+
202
+ query_params["$alt"] = "json;enum-encoding=int"
203
+ return query_params
204
+
205
+
206
+ __all__ = ("_BaseDiscussServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (403 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (46.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (62.5 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/__pycache__/pagers.cpython-311.pyc ADDED
Binary file (18.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/async_client.py ADDED
@@ -0,0 +1,1156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1beta3 import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.api_core import operation # type: ignore
47
+ from google.api_core import operation_async # type: ignore
48
+ from google.longrunning import operations_pb2 # type: ignore
49
+ from google.protobuf import field_mask_pb2 # type: ignore
50
+ from google.protobuf import timestamp_pb2 # type: ignore
51
+
52
+ from google.ai.generativelanguage_v1beta3.services.model_service import pagers
53
+ from google.ai.generativelanguage_v1beta3.types import tuned_model as gag_tuned_model
54
+ from google.ai.generativelanguage_v1beta3.types import model, model_service
55
+ from google.ai.generativelanguage_v1beta3.types import tuned_model
56
+
57
+ from .client import ModelServiceClient
58
+ from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport
59
+ from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
60
+
61
+ try:
62
+ from google.api_core import client_logging # type: ignore
63
+
64
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
65
+ except ImportError: # pragma: NO COVER
66
+ CLIENT_LOGGING_SUPPORTED = False
67
+
68
+ _LOGGER = std_logging.getLogger(__name__)
69
+
70
+
71
+ class ModelServiceAsyncClient:
72
+ """Provides methods for getting metadata information about
73
+ Generative Models.
74
+ """
75
+
76
+ _client: ModelServiceClient
77
+
78
+ # Copy defaults from the synchronous client for use here.
79
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
80
+ DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT
81
+ DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
82
+ _DEFAULT_ENDPOINT_TEMPLATE = ModelServiceClient._DEFAULT_ENDPOINT_TEMPLATE
83
+ _DEFAULT_UNIVERSE = ModelServiceClient._DEFAULT_UNIVERSE
84
+
85
+ model_path = staticmethod(ModelServiceClient.model_path)
86
+ parse_model_path = staticmethod(ModelServiceClient.parse_model_path)
87
+ tuned_model_path = staticmethod(ModelServiceClient.tuned_model_path)
88
+ parse_tuned_model_path = staticmethod(ModelServiceClient.parse_tuned_model_path)
89
+ common_billing_account_path = staticmethod(
90
+ ModelServiceClient.common_billing_account_path
91
+ )
92
+ parse_common_billing_account_path = staticmethod(
93
+ ModelServiceClient.parse_common_billing_account_path
94
+ )
95
+ common_folder_path = staticmethod(ModelServiceClient.common_folder_path)
96
+ parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path)
97
+ common_organization_path = staticmethod(ModelServiceClient.common_organization_path)
98
+ parse_common_organization_path = staticmethod(
99
+ ModelServiceClient.parse_common_organization_path
100
+ )
101
+ common_project_path = staticmethod(ModelServiceClient.common_project_path)
102
+ parse_common_project_path = staticmethod(
103
+ ModelServiceClient.parse_common_project_path
104
+ )
105
+ common_location_path = staticmethod(ModelServiceClient.common_location_path)
106
+ parse_common_location_path = staticmethod(
107
+ ModelServiceClient.parse_common_location_path
108
+ )
109
+
110
+ @classmethod
111
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
112
+ """Creates an instance of this client using the provided credentials
113
+ info.
114
+
115
+ Args:
116
+ info (dict): The service account private key info.
117
+ args: Additional arguments to pass to the constructor.
118
+ kwargs: Additional arguments to pass to the constructor.
119
+
120
+ Returns:
121
+ ModelServiceAsyncClient: The constructed client.
122
+ """
123
+ return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore
124
+
125
+ @classmethod
126
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
127
+ """Creates an instance of this client using the provided credentials
128
+ file.
129
+
130
+ Args:
131
+ filename (str): The path to the service account private key json
132
+ file.
133
+ args: Additional arguments to pass to the constructor.
134
+ kwargs: Additional arguments to pass to the constructor.
135
+
136
+ Returns:
137
+ ModelServiceAsyncClient: The constructed client.
138
+ """
139
+ return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore
140
+
141
+ from_service_account_json = from_service_account_file
142
+
143
+ @classmethod
144
+ def get_mtls_endpoint_and_cert_source(
145
+ cls, client_options: Optional[ClientOptions] = None
146
+ ):
147
+ """Return the API endpoint and client cert source for mutual TLS.
148
+
149
+ The client cert source is determined in the following order:
150
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
151
+ client cert source is None.
152
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
153
+ default client cert source exists, use the default one; otherwise the client cert
154
+ source is None.
155
+
156
+ The API endpoint is determined in the following order:
157
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
158
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
159
+ default mTLS endpoint; if the environment variable is "never", use the default API
160
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
161
+ use the default API endpoint.
162
+
163
+ More details can be found at https://google.aip.dev/auth/4114.
164
+
165
+ Args:
166
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
167
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
168
+ in this method.
169
+
170
+ Returns:
171
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
172
+ client cert source to use.
173
+
174
+ Raises:
175
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
176
+ """
177
+ return ModelServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
178
+
179
+ @property
180
+ def transport(self) -> ModelServiceTransport:
181
+ """Returns the transport used by the client instance.
182
+
183
+ Returns:
184
+ ModelServiceTransport: The transport used by the client instance.
185
+ """
186
+ return self._client.transport
187
+
188
+ @property
189
+ def api_endpoint(self):
190
+ """Return the API endpoint used by the client instance.
191
+
192
+ Returns:
193
+ str: The API endpoint used by the client instance.
194
+ """
195
+ return self._client._api_endpoint
196
+
197
+ @property
198
+ def universe_domain(self) -> str:
199
+ """Return the universe domain used by the client instance.
200
+
201
+ Returns:
202
+ str: The universe domain used
203
+ by the client instance.
204
+ """
205
+ return self._client._universe_domain
206
+
207
+ get_transport_class = ModelServiceClient.get_transport_class
208
+
209
+ def __init__(
210
+ self,
211
+ *,
212
+ credentials: Optional[ga_credentials.Credentials] = None,
213
+ transport: Optional[
214
+ Union[str, ModelServiceTransport, Callable[..., ModelServiceTransport]]
215
+ ] = "grpc_asyncio",
216
+ client_options: Optional[ClientOptions] = None,
217
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
218
+ ) -> None:
219
+ """Instantiates the model service async client.
220
+
221
+ Args:
222
+ credentials (Optional[google.auth.credentials.Credentials]): The
223
+ authorization credentials to attach to requests. These
224
+ credentials identify the application to the service; if none
225
+ are specified, the client will attempt to ascertain the
226
+ credentials from the environment.
227
+ transport (Optional[Union[str,ModelServiceTransport,Callable[..., ModelServiceTransport]]]):
228
+ The transport to use, or a Callable that constructs and returns a new transport to use.
229
+ If a Callable is given, it will be called with the same set of initialization
230
+ arguments as used in the ModelServiceTransport constructor.
231
+ If set to None, a transport is chosen automatically.
232
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
233
+ Custom options for the client.
234
+
235
+ 1. The ``api_endpoint`` property can be used to override the
236
+ default endpoint provided by the client when ``transport`` is
237
+ not explicitly provided. Only if this property is not set and
238
+ ``transport`` was not explicitly provided, the endpoint is
239
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
240
+ variable, which have one of the following values:
241
+ "always" (always use the default mTLS endpoint), "never" (always
242
+ use the default regular endpoint) and "auto" (auto-switch to the
243
+ default mTLS endpoint if client certificate is present; this is
244
+ the default value).
245
+
246
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
247
+ is "true", then the ``client_cert_source`` property can be used
248
+ to provide a client certificate for mTLS transport. If
249
+ not provided, the default SSL client certificate will be used if
250
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
251
+ set, no client certificate will be used.
252
+
253
+ 3. The ``universe_domain`` property can be used to override the
254
+ default "googleapis.com" universe. Note that ``api_endpoint``
255
+ property still takes precedence; and ``universe_domain`` is
256
+ currently not supported for mTLS.
257
+
258
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
259
+ The client info used to send a user-agent string along with
260
+ API requests. If ``None``, then default info will be used.
261
+ Generally, you only need to set this if you're developing
262
+ your own client library.
263
+
264
+ Raises:
265
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
266
+ creation failed for any reason.
267
+ """
268
+ self._client = ModelServiceClient(
269
+ credentials=credentials,
270
+ transport=transport,
271
+ client_options=client_options,
272
+ client_info=client_info,
273
+ )
274
+
275
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
276
+ std_logging.DEBUG
277
+ ): # pragma: NO COVER
278
+ _LOGGER.debug(
279
+ "Created client `google.ai.generativelanguage_v1beta3.ModelServiceAsyncClient`.",
280
+ extra={
281
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
282
+ "universeDomain": getattr(
283
+ self._client._transport._credentials, "universe_domain", ""
284
+ ),
285
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
286
+ "credentialsInfo": getattr(
287
+ self.transport._credentials, "get_cred_info", lambda: None
288
+ )(),
289
+ }
290
+ if hasattr(self._client._transport, "_credentials")
291
+ else {
292
+ "serviceName": "google.ai.generativelanguage.v1beta3.ModelService",
293
+ "credentialsType": None,
294
+ },
295
+ )
296
+
297
+ async def get_model(
298
+ self,
299
+ request: Optional[Union[model_service.GetModelRequest, dict]] = None,
300
+ *,
301
+ name: Optional[str] = None,
302
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
303
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
304
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
305
+ ) -> model.Model:
306
+ r"""Gets information about a specific Model.
307
+
308
+ .. code-block:: python
309
+
310
+ # This snippet has been automatically generated and should be regarded as a
311
+ # code template only.
312
+ # It will require modifications to work:
313
+ # - It may require correct/in-range values for request initialization.
314
+ # - It may require specifying regional endpoints when creating the service
315
+ # client as shown in:
316
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
317
+ from google.ai import generativelanguage_v1beta3
318
+
319
+ async def sample_get_model():
320
+ # Create a client
321
+ client = generativelanguage_v1beta3.ModelServiceAsyncClient()
322
+
323
+ # Initialize request argument(s)
324
+ request = generativelanguage_v1beta3.GetModelRequest(
325
+ name="name_value",
326
+ )
327
+
328
+ # Make the request
329
+ response = await client.get_model(request=request)
330
+
331
+ # Handle the response
332
+ print(response)
333
+
334
+ Args:
335
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.GetModelRequest, dict]]):
336
+ The request object. Request for getting information about
337
+ a specific Model.
338
+ name (:class:`str`):
339
+ Required. The resource name of the model.
340
+
341
+ This name should match a model name returned by the
342
+ ``ListModels`` method.
343
+
344
+ Format: ``models/{model}``
345
+
346
+ This corresponds to the ``name`` field
347
+ on the ``request`` instance; if ``request`` is provided, this
348
+ should not be set.
349
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
350
+ should be retried.
351
+ timeout (float): The timeout for this request.
352
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
353
+ sent along with the request as metadata. Normally, each value must be of type `str`,
354
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
355
+ be of type `bytes`.
356
+
357
+ Returns:
358
+ google.ai.generativelanguage_v1beta3.types.Model:
359
+ Information about a Generative
360
+ Language Model.
361
+
362
+ """
363
+ # Create or coerce a protobuf request object.
364
+ # - Quick check: If we got a request object, we should *not* have
365
+ # gotten any keyword arguments that map to the request.
366
+ has_flattened_params = any([name])
367
+ if request is not None and has_flattened_params:
368
+ raise ValueError(
369
+ "If the `request` argument is set, then none of "
370
+ "the individual field arguments should be set."
371
+ )
372
+
373
+ # - Use the request object if provided (there's no risk of modifying the input as
374
+ # there are no flattened fields), or create one.
375
+ if not isinstance(request, model_service.GetModelRequest):
376
+ request = model_service.GetModelRequest(request)
377
+
378
+ # If we have keyword arguments corresponding to fields on the
379
+ # request, apply these.
380
+ if name is not None:
381
+ request.name = name
382
+
383
+ # Wrap the RPC method; this adds retry and timeout information,
384
+ # and friendly error handling.
385
+ rpc = self._client._transport._wrapped_methods[
386
+ self._client._transport.get_model
387
+ ]
388
+
389
+ # Certain fields should be provided within the metadata header;
390
+ # add these here.
391
+ metadata = tuple(metadata) + (
392
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
393
+ )
394
+
395
+ # Validate the universe domain.
396
+ self._client._validate_universe_domain()
397
+
398
+ # Send the request.
399
+ response = await rpc(
400
+ request,
401
+ retry=retry,
402
+ timeout=timeout,
403
+ metadata=metadata,
404
+ )
405
+
406
+ # Done; return the response.
407
+ return response
408
+
409
+ async def list_models(
410
+ self,
411
+ request: Optional[Union[model_service.ListModelsRequest, dict]] = None,
412
+ *,
413
+ page_size: Optional[int] = None,
414
+ page_token: Optional[str] = None,
415
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
416
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
417
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
418
+ ) -> pagers.ListModelsAsyncPager:
419
+ r"""Lists models available through the API.
420
+
421
+ .. code-block:: python
422
+
423
+ # This snippet has been automatically generated and should be regarded as a
424
+ # code template only.
425
+ # It will require modifications to work:
426
+ # - It may require correct/in-range values for request initialization.
427
+ # - It may require specifying regional endpoints when creating the service
428
+ # client as shown in:
429
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
430
+ from google.ai import generativelanguage_v1beta3
431
+
432
+ async def sample_list_models():
433
+ # Create a client
434
+ client = generativelanguage_v1beta3.ModelServiceAsyncClient()
435
+
436
+ # Initialize request argument(s)
437
+ request = generativelanguage_v1beta3.ListModelsRequest(
438
+ )
439
+
440
+ # Make the request
441
+ page_result = client.list_models(request=request)
442
+
443
+ # Handle the response
444
+ async for response in page_result:
445
+ print(response)
446
+
447
+ Args:
448
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.ListModelsRequest, dict]]):
449
+ The request object. Request for listing all Models.
450
+ page_size (:class:`int`):
451
+ The maximum number of ``Models`` to return (per page).
452
+
453
+ The service may return fewer models. If unspecified, at
454
+ most 50 models will be returned per page. This method
455
+ returns at most 1000 models per page, even if you pass a
456
+ larger page_size.
457
+
458
+ This corresponds to the ``page_size`` field
459
+ on the ``request`` instance; if ``request`` is provided, this
460
+ should not be set.
461
+ page_token (:class:`str`):
462
+ A page token, received from a previous ``ListModels``
463
+ call.
464
+
465
+ Provide the ``page_token`` returned by one request as an
466
+ argument to the next request to retrieve the next page.
467
+
468
+ When paginating, all other parameters provided to
469
+ ``ListModels`` must match the call that provided the
470
+ page token.
471
+
472
+ This corresponds to the ``page_token`` field
473
+ on the ``request`` instance; if ``request`` is provided, this
474
+ should not be set.
475
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
476
+ should be retried.
477
+ timeout (float): The timeout for this request.
478
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
479
+ sent along with the request as metadata. Normally, each value must be of type `str`,
480
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
481
+ be of type `bytes`.
482
+
483
+ Returns:
484
+ google.ai.generativelanguage_v1beta3.services.model_service.pagers.ListModelsAsyncPager:
485
+ Response from ListModel containing a paginated list of
486
+ Models.
487
+
488
+ Iterating over this object will yield results and
489
+ resolve additional pages automatically.
490
+
491
+ """
492
+ # Create or coerce a protobuf request object.
493
+ # - Quick check: If we got a request object, we should *not* have
494
+ # gotten any keyword arguments that map to the request.
495
+ has_flattened_params = any([page_size, page_token])
496
+ if request is not None and has_flattened_params:
497
+ raise ValueError(
498
+ "If the `request` argument is set, then none of "
499
+ "the individual field arguments should be set."
500
+ )
501
+
502
+ # - Use the request object if provided (there's no risk of modifying the input as
503
+ # there are no flattened fields), or create one.
504
+ if not isinstance(request, model_service.ListModelsRequest):
505
+ request = model_service.ListModelsRequest(request)
506
+
507
+ # If we have keyword arguments corresponding to fields on the
508
+ # request, apply these.
509
+ if page_size is not None:
510
+ request.page_size = page_size
511
+ if page_token is not None:
512
+ request.page_token = page_token
513
+
514
+ # Wrap the RPC method; this adds retry and timeout information,
515
+ # and friendly error handling.
516
+ rpc = self._client._transport._wrapped_methods[
517
+ self._client._transport.list_models
518
+ ]
519
+
520
+ # Validate the universe domain.
521
+ self._client._validate_universe_domain()
522
+
523
+ # Send the request.
524
+ response = await rpc(
525
+ request,
526
+ retry=retry,
527
+ timeout=timeout,
528
+ metadata=metadata,
529
+ )
530
+
531
+ # This method is paged; wrap the response in a pager, which provides
532
+ # an `__aiter__` convenience method.
533
+ response = pagers.ListModelsAsyncPager(
534
+ method=rpc,
535
+ request=request,
536
+ response=response,
537
+ retry=retry,
538
+ timeout=timeout,
539
+ metadata=metadata,
540
+ )
541
+
542
+ # Done; return the response.
543
+ return response
544
+
545
+ async def get_tuned_model(
546
+ self,
547
+ request: Optional[Union[model_service.GetTunedModelRequest, dict]] = None,
548
+ *,
549
+ name: Optional[str] = None,
550
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
551
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
552
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
553
+ ) -> tuned_model.TunedModel:
554
+ r"""Gets information about a specific TunedModel.
555
+
556
+ .. code-block:: python
557
+
558
+ # This snippet has been automatically generated and should be regarded as a
559
+ # code template only.
560
+ # It will require modifications to work:
561
+ # - It may require correct/in-range values for request initialization.
562
+ # - It may require specifying regional endpoints when creating the service
563
+ # client as shown in:
564
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
565
+ from google.ai import generativelanguage_v1beta3
566
+
567
+ async def sample_get_tuned_model():
568
+ # Create a client
569
+ client = generativelanguage_v1beta3.ModelServiceAsyncClient()
570
+
571
+ # Initialize request argument(s)
572
+ request = generativelanguage_v1beta3.GetTunedModelRequest(
573
+ name="name_value",
574
+ )
575
+
576
+ # Make the request
577
+ response = await client.get_tuned_model(request=request)
578
+
579
+ # Handle the response
580
+ print(response)
581
+
582
+ Args:
583
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.GetTunedModelRequest, dict]]):
584
+ The request object. Request for getting information about
585
+ a specific Model.
586
+ name (:class:`str`):
587
+ Required. The resource name of the model.
588
+
589
+ Format: ``tunedModels/my-model-id``
590
+
591
+ This corresponds to the ``name`` field
592
+ on the ``request`` instance; if ``request`` is provided, this
593
+ should not be set.
594
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
595
+ should be retried.
596
+ timeout (float): The timeout for this request.
597
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
598
+ sent along with the request as metadata. Normally, each value must be of type `str`,
599
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
600
+ be of type `bytes`.
601
+
602
+ Returns:
603
+ google.ai.generativelanguage_v1beta3.types.TunedModel:
604
+ A fine-tuned model created using
605
+ ModelService.CreateTunedModel.
606
+
607
+ """
608
+ # Create or coerce a protobuf request object.
609
+ # - Quick check: If we got a request object, we should *not* have
610
+ # gotten any keyword arguments that map to the request.
611
+ has_flattened_params = any([name])
612
+ if request is not None and has_flattened_params:
613
+ raise ValueError(
614
+ "If the `request` argument is set, then none of "
615
+ "the individual field arguments should be set."
616
+ )
617
+
618
+ # - Use the request object if provided (there's no risk of modifying the input as
619
+ # there are no flattened fields), or create one.
620
+ if not isinstance(request, model_service.GetTunedModelRequest):
621
+ request = model_service.GetTunedModelRequest(request)
622
+
623
+ # If we have keyword arguments corresponding to fields on the
624
+ # request, apply these.
625
+ if name is not None:
626
+ request.name = name
627
+
628
+ # Wrap the RPC method; this adds retry and timeout information,
629
+ # and friendly error handling.
630
+ rpc = self._client._transport._wrapped_methods[
631
+ self._client._transport.get_tuned_model
632
+ ]
633
+
634
+ # Certain fields should be provided within the metadata header;
635
+ # add these here.
636
+ metadata = tuple(metadata) + (
637
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
638
+ )
639
+
640
+ # Validate the universe domain.
641
+ self._client._validate_universe_domain()
642
+
643
+ # Send the request.
644
+ response = await rpc(
645
+ request,
646
+ retry=retry,
647
+ timeout=timeout,
648
+ metadata=metadata,
649
+ )
650
+
651
+ # Done; return the response.
652
+ return response
653
+
654
+ async def list_tuned_models(
655
+ self,
656
+ request: Optional[Union[model_service.ListTunedModelsRequest, dict]] = None,
657
+ *,
658
+ page_size: Optional[int] = None,
659
+ page_token: Optional[str] = None,
660
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
661
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
662
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
663
+ ) -> pagers.ListTunedModelsAsyncPager:
664
+ r"""Lists tuned models owned by the user.
665
+
666
+ .. code-block:: python
667
+
668
+ # This snippet has been automatically generated and should be regarded as a
669
+ # code template only.
670
+ # It will require modifications to work:
671
+ # - It may require correct/in-range values for request initialization.
672
+ # - It may require specifying regional endpoints when creating the service
673
+ # client as shown in:
674
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
675
+ from google.ai import generativelanguage_v1beta3
676
+
677
+ async def sample_list_tuned_models():
678
+ # Create a client
679
+ client = generativelanguage_v1beta3.ModelServiceAsyncClient()
680
+
681
+ # Initialize request argument(s)
682
+ request = generativelanguage_v1beta3.ListTunedModelsRequest(
683
+ )
684
+
685
+ # Make the request
686
+ page_result = client.list_tuned_models(request=request)
687
+
688
+ # Handle the response
689
+ async for response in page_result:
690
+ print(response)
691
+
692
+ Args:
693
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.ListTunedModelsRequest, dict]]):
694
+ The request object. Request for listing TunedModels.
695
+ page_size (:class:`int`):
696
+ Optional. The maximum number of ``TunedModels`` to
697
+ return (per page). The service may return fewer tuned
698
+ models.
699
+
700
+ If unspecified, at most 10 tuned models will be
701
+ returned. This method returns at most 1000 models per
702
+ page, even if you pass a larger page_size.
703
+
704
+ This corresponds to the ``page_size`` field
705
+ on the ``request`` instance; if ``request`` is provided, this
706
+ should not be set.
707
+ page_token (:class:`str`):
708
+ Optional. A page token, received from a previous
709
+ ``ListTunedModels`` call.
710
+
711
+ Provide the ``page_token`` returned by one request as an
712
+ argument to the next request to retrieve the next page.
713
+
714
+ When paginating, all other parameters provided to
715
+ ``ListTunedModels`` must match the call that provided
716
+ the page token.
717
+
718
+ This corresponds to the ``page_token`` field
719
+ on the ``request`` instance; if ``request`` is provided, this
720
+ should not be set.
721
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
722
+ should be retried.
723
+ timeout (float): The timeout for this request.
724
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
725
+ sent along with the request as metadata. Normally, each value must be of type `str`,
726
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
727
+ be of type `bytes`.
728
+
729
+ Returns:
730
+ google.ai.generativelanguage_v1beta3.services.model_service.pagers.ListTunedModelsAsyncPager:
731
+ Response from ListTunedModels containing a paginated
732
+ list of Models.
733
+
734
+ Iterating over this object will yield results and
735
+ resolve additional pages automatically.
736
+
737
+ """
738
+ # Create or coerce a protobuf request object.
739
+ # - Quick check: If we got a request object, we should *not* have
740
+ # gotten any keyword arguments that map to the request.
741
+ has_flattened_params = any([page_size, page_token])
742
+ if request is not None and has_flattened_params:
743
+ raise ValueError(
744
+ "If the `request` argument is set, then none of "
745
+ "the individual field arguments should be set."
746
+ )
747
+
748
+ # - Use the request object if provided (there's no risk of modifying the input as
749
+ # there are no flattened fields), or create one.
750
+ if not isinstance(request, model_service.ListTunedModelsRequest):
751
+ request = model_service.ListTunedModelsRequest(request)
752
+
753
+ # If we have keyword arguments corresponding to fields on the
754
+ # request, apply these.
755
+ if page_size is not None:
756
+ request.page_size = page_size
757
+ if page_token is not None:
758
+ request.page_token = page_token
759
+
760
+ # Wrap the RPC method; this adds retry and timeout information,
761
+ # and friendly error handling.
762
+ rpc = self._client._transport._wrapped_methods[
763
+ self._client._transport.list_tuned_models
764
+ ]
765
+
766
+ # Validate the universe domain.
767
+ self._client._validate_universe_domain()
768
+
769
+ # Send the request.
770
+ response = await rpc(
771
+ request,
772
+ retry=retry,
773
+ timeout=timeout,
774
+ metadata=metadata,
775
+ )
776
+
777
+ # This method is paged; wrap the response in a pager, which provides
778
+ # an `__aiter__` convenience method.
779
+ response = pagers.ListTunedModelsAsyncPager(
780
+ method=rpc,
781
+ request=request,
782
+ response=response,
783
+ retry=retry,
784
+ timeout=timeout,
785
+ metadata=metadata,
786
+ )
787
+
788
+ # Done; return the response.
789
+ return response
790
+
791
+ async def create_tuned_model(
792
+ self,
793
+ request: Optional[Union[model_service.CreateTunedModelRequest, dict]] = None,
794
+ *,
795
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
796
+ tuned_model_id: Optional[str] = None,
797
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
798
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
799
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
800
+ ) -> operation_async.AsyncOperation:
801
+ r"""Creates a tuned model. Intermediate tuning progress (if any) is
802
+ accessed through the [google.longrunning.Operations] service.
803
+
804
+ Status and results can be accessed through the Operations
805
+ service. Example: GET
806
+ /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
807
+
808
+ .. code-block:: python
809
+
810
+ # This snippet has been automatically generated and should be regarded as a
811
+ # code template only.
812
+ # It will require modifications to work:
813
+ # - It may require correct/in-range values for request initialization.
814
+ # - It may require specifying regional endpoints when creating the service
815
+ # client as shown in:
816
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
817
+ from google.ai import generativelanguage_v1beta3
818
+
819
+ async def sample_create_tuned_model():
820
+ # Create a client
821
+ client = generativelanguage_v1beta3.ModelServiceAsyncClient()
822
+
823
+ # Initialize request argument(s)
824
+ tuned_model = generativelanguage_v1beta3.TunedModel()
825
+ tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value"
826
+ tuned_model.tuning_task.training_data.examples.examples.output = "output_value"
827
+
828
+ request = generativelanguage_v1beta3.CreateTunedModelRequest(
829
+ tuned_model=tuned_model,
830
+ )
831
+
832
+ # Make the request
833
+ operation = client.create_tuned_model(request=request)
834
+
835
+ print("Waiting for operation to complete...")
836
+
837
+ response = (await operation).result()
838
+
839
+ # Handle the response
840
+ print(response)
841
+
842
+ Args:
843
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.CreateTunedModelRequest, dict]]):
844
+ The request object. Request to create a TunedModel.
845
+ tuned_model (:class:`google.ai.generativelanguage_v1beta3.types.TunedModel`):
846
+ Required. The tuned model to create.
847
+ This corresponds to the ``tuned_model`` field
848
+ on the ``request`` instance; if ``request`` is provided, this
849
+ should not be set.
850
+ tuned_model_id (:class:`str`):
851
+ Optional. The unique id for the tuned model if
852
+ specified. This value should be up to 40 characters, the
853
+ first character must be a letter, the last could be a
854
+ letter or a number. The id must match the regular
855
+ expression: `a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?.
856
+
857
+ This corresponds to the ``tuned_model_id`` field
858
+ on the ``request`` instance; if ``request`` is provided, this
859
+ should not be set.
860
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
861
+ should be retried.
862
+ timeout (float): The timeout for this request.
863
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
864
+ sent along with the request as metadata. Normally, each value must be of type `str`,
865
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
866
+ be of type `bytes`.
867
+
868
+ Returns:
869
+ google.api_core.operation_async.AsyncOperation:
870
+ An object representing a long-running operation.
871
+
872
+ The result type for the operation will be
873
+ :class:`google.ai.generativelanguage_v1beta3.types.TunedModel`
874
+ A fine-tuned model created using
875
+ ModelService.CreateTunedModel.
876
+
877
+ """
878
+ # Create or coerce a protobuf request object.
879
+ # - Quick check: If we got a request object, we should *not* have
880
+ # gotten any keyword arguments that map to the request.
881
+ has_flattened_params = any([tuned_model, tuned_model_id])
882
+ if request is not None and has_flattened_params:
883
+ raise ValueError(
884
+ "If the `request` argument is set, then none of "
885
+ "the individual field arguments should be set."
886
+ )
887
+
888
+ # - Use the request object if provided (there's no risk of modifying the input as
889
+ # there are no flattened fields), or create one.
890
+ if not isinstance(request, model_service.CreateTunedModelRequest):
891
+ request = model_service.CreateTunedModelRequest(request)
892
+
893
+ # If we have keyword arguments corresponding to fields on the
894
+ # request, apply these.
895
+ if tuned_model is not None:
896
+ request.tuned_model = tuned_model
897
+ if tuned_model_id is not None:
898
+ request.tuned_model_id = tuned_model_id
899
+
900
+ # Wrap the RPC method; this adds retry and timeout information,
901
+ # and friendly error handling.
902
+ rpc = self._client._transport._wrapped_methods[
903
+ self._client._transport.create_tuned_model
904
+ ]
905
+
906
+ # Validate the universe domain.
907
+ self._client._validate_universe_domain()
908
+
909
+ # Send the request.
910
+ response = await rpc(
911
+ request,
912
+ retry=retry,
913
+ timeout=timeout,
914
+ metadata=metadata,
915
+ )
916
+
917
+ # Wrap the response in an operation future.
918
+ response = operation_async.from_gapic(
919
+ response,
920
+ self._client._transport.operations_client,
921
+ gag_tuned_model.TunedModel,
922
+ metadata_type=model_service.CreateTunedModelMetadata,
923
+ )
924
+
925
+ # Done; return the response.
926
+ return response
927
+
928
+ async def update_tuned_model(
929
+ self,
930
+ request: Optional[Union[model_service.UpdateTunedModelRequest, dict]] = None,
931
+ *,
932
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
933
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
934
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
935
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
936
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
937
+ ) -> gag_tuned_model.TunedModel:
938
+ r"""Updates a tuned model.
939
+
940
+ .. code-block:: python
941
+
942
+ # This snippet has been automatically generated and should be regarded as a
943
+ # code template only.
944
+ # It will require modifications to work:
945
+ # - It may require correct/in-range values for request initialization.
946
+ # - It may require specifying regional endpoints when creating the service
947
+ # client as shown in:
948
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
949
+ from google.ai import generativelanguage_v1beta3
950
+
951
+ async def sample_update_tuned_model():
952
+ # Create a client
953
+ client = generativelanguage_v1beta3.ModelServiceAsyncClient()
954
+
955
+ # Initialize request argument(s)
956
+ tuned_model = generativelanguage_v1beta3.TunedModel()
957
+ tuned_model.tuning_task.training_data.examples.examples.text_input = "text_input_value"
958
+ tuned_model.tuning_task.training_data.examples.examples.output = "output_value"
959
+
960
+ request = generativelanguage_v1beta3.UpdateTunedModelRequest(
961
+ tuned_model=tuned_model,
962
+ )
963
+
964
+ # Make the request
965
+ response = await client.update_tuned_model(request=request)
966
+
967
+ # Handle the response
968
+ print(response)
969
+
970
+ Args:
971
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.UpdateTunedModelRequest, dict]]):
972
+ The request object. Request to update a TunedModel.
973
+ tuned_model (:class:`google.ai.generativelanguage_v1beta3.types.TunedModel`):
974
+ Required. The tuned model to update.
975
+ This corresponds to the ``tuned_model`` field
976
+ on the ``request`` instance; if ``request`` is provided, this
977
+ should not be set.
978
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
979
+ Required. The list of fields to
980
+ update.
981
+
982
+ This corresponds to the ``update_mask`` field
983
+ on the ``request`` instance; if ``request`` is provided, this
984
+ should not be set.
985
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
986
+ should be retried.
987
+ timeout (float): The timeout for this request.
988
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
989
+ sent along with the request as metadata. Normally, each value must be of type `str`,
990
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
991
+ be of type `bytes`.
992
+
993
+ Returns:
994
+ google.ai.generativelanguage_v1beta3.types.TunedModel:
995
+ A fine-tuned model created using
996
+ ModelService.CreateTunedModel.
997
+
998
+ """
999
+ # Create or coerce a protobuf request object.
1000
+ # - Quick check: If we got a request object, we should *not* have
1001
+ # gotten any keyword arguments that map to the request.
1002
+ has_flattened_params = any([tuned_model, update_mask])
1003
+ if request is not None and has_flattened_params:
1004
+ raise ValueError(
1005
+ "If the `request` argument is set, then none of "
1006
+ "the individual field arguments should be set."
1007
+ )
1008
+
1009
+ # - Use the request object if provided (there's no risk of modifying the input as
1010
+ # there are no flattened fields), or create one.
1011
+ if not isinstance(request, model_service.UpdateTunedModelRequest):
1012
+ request = model_service.UpdateTunedModelRequest(request)
1013
+
1014
+ # If we have keyword arguments corresponding to fields on the
1015
+ # request, apply these.
1016
+ if tuned_model is not None:
1017
+ request.tuned_model = tuned_model
1018
+ if update_mask is not None:
1019
+ request.update_mask = update_mask
1020
+
1021
+ # Wrap the RPC method; this adds retry and timeout information,
1022
+ # and friendly error handling.
1023
+ rpc = self._client._transport._wrapped_methods[
1024
+ self._client._transport.update_tuned_model
1025
+ ]
1026
+
1027
+ # Certain fields should be provided within the metadata header;
1028
+ # add these here.
1029
+ metadata = tuple(metadata) + (
1030
+ gapic_v1.routing_header.to_grpc_metadata(
1031
+ (("tuned_model.name", request.tuned_model.name),)
1032
+ ),
1033
+ )
1034
+
1035
+ # Validate the universe domain.
1036
+ self._client._validate_universe_domain()
1037
+
1038
+ # Send the request.
1039
+ response = await rpc(
1040
+ request,
1041
+ retry=retry,
1042
+ timeout=timeout,
1043
+ metadata=metadata,
1044
+ )
1045
+
1046
+ # Done; return the response.
1047
+ return response
1048
+
1049
+ async def delete_tuned_model(
1050
+ self,
1051
+ request: Optional[Union[model_service.DeleteTunedModelRequest, dict]] = None,
1052
+ *,
1053
+ name: Optional[str] = None,
1054
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1055
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1056
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1057
+ ) -> None:
1058
+ r"""Deletes a tuned model.
1059
+
1060
+ .. code-block:: python
1061
+
1062
+ # This snippet has been automatically generated and should be regarded as a
1063
+ # code template only.
1064
+ # It will require modifications to work:
1065
+ # - It may require correct/in-range values for request initialization.
1066
+ # - It may require specifying regional endpoints when creating the service
1067
+ # client as shown in:
1068
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1069
+ from google.ai import generativelanguage_v1beta3
1070
+
1071
+ async def sample_delete_tuned_model():
1072
+ # Create a client
1073
+ client = generativelanguage_v1beta3.ModelServiceAsyncClient()
1074
+
1075
+ # Initialize request argument(s)
1076
+ request = generativelanguage_v1beta3.DeleteTunedModelRequest(
1077
+ name="name_value",
1078
+ )
1079
+
1080
+ # Make the request
1081
+ await client.delete_tuned_model(request=request)
1082
+
1083
+ Args:
1084
+ request (Optional[Union[google.ai.generativelanguage_v1beta3.types.DeleteTunedModelRequest, dict]]):
1085
+ The request object. Request to delete a TunedModel.
1086
+ name (:class:`str`):
1087
+ Required. The resource name of the model. Format:
1088
+ ``tunedModels/my-model-id``
1089
+
1090
+ This corresponds to the ``name`` field
1091
+ on the ``request`` instance; if ``request`` is provided, this
1092
+ should not be set.
1093
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
1094
+ should be retried.
1095
+ timeout (float): The timeout for this request.
1096
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1097
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1098
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1099
+ be of type `bytes`.
1100
+ """
1101
+ # Create or coerce a protobuf request object.
1102
+ # - Quick check: If we got a request object, we should *not* have
1103
+ # gotten any keyword arguments that map to the request.
1104
+ has_flattened_params = any([name])
1105
+ if request is not None and has_flattened_params:
1106
+ raise ValueError(
1107
+ "If the `request` argument is set, then none of "
1108
+ "the individual field arguments should be set."
1109
+ )
1110
+
1111
+ # - Use the request object if provided (there's no risk of modifying the input as
1112
+ # there are no flattened fields), or create one.
1113
+ if not isinstance(request, model_service.DeleteTunedModelRequest):
1114
+ request = model_service.DeleteTunedModelRequest(request)
1115
+
1116
+ # If we have keyword arguments corresponding to fields on the
1117
+ # request, apply these.
1118
+ if name is not None:
1119
+ request.name = name
1120
+
1121
+ # Wrap the RPC method; this adds retry and timeout information,
1122
+ # and friendly error handling.
1123
+ rpc = self._client._transport._wrapped_methods[
1124
+ self._client._transport.delete_tuned_model
1125
+ ]
1126
+
1127
+ # Certain fields should be provided within the metadata header;
1128
+ # add these here.
1129
+ metadata = tuple(metadata) + (
1130
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1131
+ )
1132
+
1133
+ # Validate the universe domain.
1134
+ self._client._validate_universe_domain()
1135
+
1136
+ # Send the request.
1137
+ await rpc(
1138
+ request,
1139
+ retry=retry,
1140
+ timeout=timeout,
1141
+ metadata=metadata,
1142
+ )
1143
+
1144
+ async def __aenter__(self) -> "ModelServiceAsyncClient":
1145
+ return self
1146
+
1147
+ async def __aexit__(self, exc_type, exc, tb):
1148
+ await self.transport.close()
1149
+
1150
+
1151
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1152
+ gapic_version=package_version.__version__
1153
+ )
1154
+
1155
+
1156
+ __all__ = ("ModelServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/model_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (896 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta3/services/permission_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (418 Bytes). View file