koichi12 commited on
Commit
d99afc4
·
verified ·
1 Parent(s): b292475

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/__pycache__/__init__.cpython-311.pyc +0 -0
  2. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/__pycache__/__init__.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/__pycache__/async_client.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/__pycache__/client.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__init__.py +36 -0
  6. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/base.py +263 -0
  13. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__init__.py +22 -0
  14. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/__init__.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/async_client.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/client.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/pagers.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/async_client.py +1261 -0
  19. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/client.py +1646 -0
  20. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/pagers.py +353 -0
  21. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__init__.py +36 -0
  22. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/base.py +290 -0
  29. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/grpc.py +582 -0
  30. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/grpc_asyncio.py +655 -0
  31. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/rest.py +1819 -0
  32. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/rest_base.py +476 -0
  33. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/__init__.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/async_client.cpython-311.pyc +0 -0
  35. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/client.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/pagers.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/async_client.py +1150 -0
  38. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/client.py +1532 -0
  39. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/pagers.py +197 -0
  40. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  41. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__init__.py +22 -0
  45. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__pycache__/__init__.cpython-311.pyc +0 -0
  46. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__pycache__/async_client.cpython-311.pyc +0 -0
  47. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__pycache__/client.cpython-311.pyc +0 -0
  48. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/async_client.py +535 -0
  49. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/client.py +929 -0
  50. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/transports/__init__.py +36 -0
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (218 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (403 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (38.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (55.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import CacheServiceTransport
20
+ from .grpc import CacheServiceGrpcTransport
21
+ from .grpc_asyncio import CacheServiceGrpcAsyncIOTransport
22
+ from .rest import CacheServiceRestInterceptor, CacheServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[CacheServiceTransport]]
26
+ _transport_registry["grpc"] = CacheServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = CacheServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = CacheServiceRestTransport
29
+
30
+ __all__ = (
31
+ "CacheServiceTransport",
32
+ "CacheServiceGrpcTransport",
33
+ "CacheServiceGrpcAsyncIOTransport",
34
+ "CacheServiceRestTransport",
35
+ "CacheServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (896 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (10.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (22.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (25.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (53.9 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (18.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/cache_service/transports/base.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+ from google.protobuf import empty_pb2 # type: ignore
28
+
29
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
30
+ from google.ai.generativelanguage_v1alpha.types import (
31
+ cached_content as gag_cached_content,
32
+ )
33
+ from google.ai.generativelanguage_v1alpha.types import cache_service
34
+ from google.ai.generativelanguage_v1alpha.types import cached_content
35
+
36
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
37
+ gapic_version=package_version.__version__
38
+ )
39
+
40
+
41
+ class CacheServiceTransport(abc.ABC):
42
+ """Abstract transport class for CacheService."""
43
+
44
+ AUTH_SCOPES = ()
45
+
46
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
47
+
48
+ def __init__(
49
+ self,
50
+ *,
51
+ host: str = DEFAULT_HOST,
52
+ credentials: Optional[ga_credentials.Credentials] = None,
53
+ credentials_file: Optional[str] = None,
54
+ scopes: Optional[Sequence[str]] = None,
55
+ quota_project_id: Optional[str] = None,
56
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
57
+ always_use_jwt_access: Optional[bool] = False,
58
+ api_audience: Optional[str] = None,
59
+ **kwargs,
60
+ ) -> None:
61
+ """Instantiate the transport.
62
+
63
+ Args:
64
+ host (Optional[str]):
65
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
66
+ credentials (Optional[google.auth.credentials.Credentials]): The
67
+ authorization credentials to attach to requests. These
68
+ credentials identify the application to the service; if none
69
+ are specified, the client will attempt to ascertain the
70
+ credentials from the environment.
71
+ credentials_file (Optional[str]): A file with credentials that can
72
+ be loaded with :func:`google.auth.load_credentials_from_file`.
73
+ This argument is mutually exclusive with credentials.
74
+ scopes (Optional[Sequence[str]]): A list of scopes.
75
+ quota_project_id (Optional[str]): An optional project to use for billing
76
+ and quota.
77
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
78
+ The client info used to send a user-agent string along with
79
+ API requests. If ``None``, then default info will be used.
80
+ Generally, you only need to set this if you're developing
81
+ your own client library.
82
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
83
+ be used for service account credentials.
84
+ """
85
+
86
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
87
+
88
+ # Save the scopes.
89
+ self._scopes = scopes
90
+ if not hasattr(self, "_ignore_credentials"):
91
+ self._ignore_credentials: bool = False
92
+
93
+ # If no credentials are provided, then determine the appropriate
94
+ # defaults.
95
+ if credentials and credentials_file:
96
+ raise core_exceptions.DuplicateCredentialArgs(
97
+ "'credentials_file' and 'credentials' are mutually exclusive"
98
+ )
99
+
100
+ if credentials_file is not None:
101
+ credentials, _ = google.auth.load_credentials_from_file(
102
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
103
+ )
104
+ elif credentials is None and not self._ignore_credentials:
105
+ credentials, _ = google.auth.default(
106
+ **scopes_kwargs, quota_project_id=quota_project_id
107
+ )
108
+ # Don't apply audience if the credentials file passed from user.
109
+ if hasattr(credentials, "with_gdch_audience"):
110
+ credentials = credentials.with_gdch_audience(
111
+ api_audience if api_audience else host
112
+ )
113
+
114
+ # If the credentials are service account credentials, then always try to use self signed JWT.
115
+ if (
116
+ always_use_jwt_access
117
+ and isinstance(credentials, service_account.Credentials)
118
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
119
+ ):
120
+ credentials = credentials.with_always_use_jwt_access(True)
121
+
122
+ # Save the credentials.
123
+ self._credentials = credentials
124
+
125
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
126
+ if ":" not in host:
127
+ host += ":443"
128
+ self._host = host
129
+
130
+ @property
131
+ def host(self):
132
+ return self._host
133
+
134
+ def _prep_wrapped_messages(self, client_info):
135
+ # Precompute the wrapped methods.
136
+ self._wrapped_methods = {
137
+ self.list_cached_contents: gapic_v1.method.wrap_method(
138
+ self.list_cached_contents,
139
+ default_timeout=None,
140
+ client_info=client_info,
141
+ ),
142
+ self.create_cached_content: gapic_v1.method.wrap_method(
143
+ self.create_cached_content,
144
+ default_timeout=None,
145
+ client_info=client_info,
146
+ ),
147
+ self.get_cached_content: gapic_v1.method.wrap_method(
148
+ self.get_cached_content,
149
+ default_timeout=None,
150
+ client_info=client_info,
151
+ ),
152
+ self.update_cached_content: gapic_v1.method.wrap_method(
153
+ self.update_cached_content,
154
+ default_timeout=None,
155
+ client_info=client_info,
156
+ ),
157
+ self.delete_cached_content: gapic_v1.method.wrap_method(
158
+ self.delete_cached_content,
159
+ default_timeout=None,
160
+ client_info=client_info,
161
+ ),
162
+ self.get_operation: gapic_v1.method.wrap_method(
163
+ self.get_operation,
164
+ default_timeout=None,
165
+ client_info=client_info,
166
+ ),
167
+ self.list_operations: gapic_v1.method.wrap_method(
168
+ self.list_operations,
169
+ default_timeout=None,
170
+ client_info=client_info,
171
+ ),
172
+ }
173
+
174
+ def close(self):
175
+ """Closes resources associated with the transport.
176
+
177
+ .. warning::
178
+ Only call this method if the transport is NOT shared
179
+ with other clients - this may cause errors in other clients!
180
+ """
181
+ raise NotImplementedError()
182
+
183
+ @property
184
+ def list_cached_contents(
185
+ self,
186
+ ) -> Callable[
187
+ [cache_service.ListCachedContentsRequest],
188
+ Union[
189
+ cache_service.ListCachedContentsResponse,
190
+ Awaitable[cache_service.ListCachedContentsResponse],
191
+ ],
192
+ ]:
193
+ raise NotImplementedError()
194
+
195
+ @property
196
+ def create_cached_content(
197
+ self,
198
+ ) -> Callable[
199
+ [cache_service.CreateCachedContentRequest],
200
+ Union[
201
+ gag_cached_content.CachedContent,
202
+ Awaitable[gag_cached_content.CachedContent],
203
+ ],
204
+ ]:
205
+ raise NotImplementedError()
206
+
207
+ @property
208
+ def get_cached_content(
209
+ self,
210
+ ) -> Callable[
211
+ [cache_service.GetCachedContentRequest],
212
+ Union[cached_content.CachedContent, Awaitable[cached_content.CachedContent]],
213
+ ]:
214
+ raise NotImplementedError()
215
+
216
+ @property
217
+ def update_cached_content(
218
+ self,
219
+ ) -> Callable[
220
+ [cache_service.UpdateCachedContentRequest],
221
+ Union[
222
+ gag_cached_content.CachedContent,
223
+ Awaitable[gag_cached_content.CachedContent],
224
+ ],
225
+ ]:
226
+ raise NotImplementedError()
227
+
228
+ @property
229
+ def delete_cached_content(
230
+ self,
231
+ ) -> Callable[
232
+ [cache_service.DeleteCachedContentRequest],
233
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
234
+ ]:
235
+ raise NotImplementedError()
236
+
237
+ @property
238
+ def list_operations(
239
+ self,
240
+ ) -> Callable[
241
+ [operations_pb2.ListOperationsRequest],
242
+ Union[
243
+ operations_pb2.ListOperationsResponse,
244
+ Awaitable[operations_pb2.ListOperationsResponse],
245
+ ],
246
+ ]:
247
+ raise NotImplementedError()
248
+
249
+ @property
250
+ def get_operation(
251
+ self,
252
+ ) -> Callable[
253
+ [operations_pb2.GetOperationRequest],
254
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
255
+ ]:
256
+ raise NotImplementedError()
257
+
258
+ @property
259
+ def kind(self) -> str:
260
+ raise NotImplementedError()
261
+
262
+
263
+ __all__ = ("CacheServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .async_client import ModelServiceAsyncClient
17
+ from .client import ModelServiceClient
18
+
19
+ __all__ = (
20
+ "ModelServiceClient",
21
+ "ModelServiceAsyncClient",
22
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (403 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (50 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (66.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/__pycache__/pagers.cpython-311.pyc ADDED
Binary file (18.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/async_client.py ADDED
@@ -0,0 +1,1261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.api_core import operation # type: ignore
47
+ from google.api_core import operation_async # type: ignore
48
+ from google.longrunning import operations_pb2 # type: ignore
49
+ from google.protobuf import field_mask_pb2 # type: ignore
50
+ from google.protobuf import timestamp_pb2 # type: ignore
51
+
52
+ from google.ai.generativelanguage_v1alpha.services.model_service import pagers
53
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
54
+ from google.ai.generativelanguage_v1alpha.types import model, model_service
55
+ from google.ai.generativelanguage_v1alpha.types import tuned_model
56
+
57
+ from .client import ModelServiceClient
58
+ from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport
59
+ from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
60
+
61
+ try:
62
+ from google.api_core import client_logging # type: ignore
63
+
64
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
65
+ except ImportError: # pragma: NO COVER
66
+ CLIENT_LOGGING_SUPPORTED = False
67
+
68
+ _LOGGER = std_logging.getLogger(__name__)
69
+
70
+
71
+ class ModelServiceAsyncClient:
72
+ """Provides methods for getting metadata information about
73
+ Generative Models.
74
+ """
75
+
76
+ _client: ModelServiceClient
77
+
78
+ # Copy defaults from the synchronous client for use here.
79
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
80
+ DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT
81
+ DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
82
+ _DEFAULT_ENDPOINT_TEMPLATE = ModelServiceClient._DEFAULT_ENDPOINT_TEMPLATE
83
+ _DEFAULT_UNIVERSE = ModelServiceClient._DEFAULT_UNIVERSE
84
+
85
+ model_path = staticmethod(ModelServiceClient.model_path)
86
+ parse_model_path = staticmethod(ModelServiceClient.parse_model_path)
87
+ tuned_model_path = staticmethod(ModelServiceClient.tuned_model_path)
88
+ parse_tuned_model_path = staticmethod(ModelServiceClient.parse_tuned_model_path)
89
+ common_billing_account_path = staticmethod(
90
+ ModelServiceClient.common_billing_account_path
91
+ )
92
+ parse_common_billing_account_path = staticmethod(
93
+ ModelServiceClient.parse_common_billing_account_path
94
+ )
95
+ common_folder_path = staticmethod(ModelServiceClient.common_folder_path)
96
+ parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path)
97
+ common_organization_path = staticmethod(ModelServiceClient.common_organization_path)
98
+ parse_common_organization_path = staticmethod(
99
+ ModelServiceClient.parse_common_organization_path
100
+ )
101
+ common_project_path = staticmethod(ModelServiceClient.common_project_path)
102
+ parse_common_project_path = staticmethod(
103
+ ModelServiceClient.parse_common_project_path
104
+ )
105
+ common_location_path = staticmethod(ModelServiceClient.common_location_path)
106
+ parse_common_location_path = staticmethod(
107
+ ModelServiceClient.parse_common_location_path
108
+ )
109
+
110
+ @classmethod
111
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
112
+ """Creates an instance of this client using the provided credentials
113
+ info.
114
+
115
+ Args:
116
+ info (dict): The service account private key info.
117
+ args: Additional arguments to pass to the constructor.
118
+ kwargs: Additional arguments to pass to the constructor.
119
+
120
+ Returns:
121
+ ModelServiceAsyncClient: The constructed client.
122
+ """
123
+ return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore
124
+
125
+ @classmethod
126
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
127
+ """Creates an instance of this client using the provided credentials
128
+ file.
129
+
130
+ Args:
131
+ filename (str): The path to the service account private key json
132
+ file.
133
+ args: Additional arguments to pass to the constructor.
134
+ kwargs: Additional arguments to pass to the constructor.
135
+
136
+ Returns:
137
+ ModelServiceAsyncClient: The constructed client.
138
+ """
139
+ return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore
140
+
141
+ from_service_account_json = from_service_account_file
142
+
143
+ @classmethod
144
+ def get_mtls_endpoint_and_cert_source(
145
+ cls, client_options: Optional[ClientOptions] = None
146
+ ):
147
+ """Return the API endpoint and client cert source for mutual TLS.
148
+
149
+ The client cert source is determined in the following order:
150
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
151
+ client cert source is None.
152
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
153
+ default client cert source exists, use the default one; otherwise the client cert
154
+ source is None.
155
+
156
+ The API endpoint is determined in the following order:
157
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
158
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
159
+ default mTLS endpoint; if the environment variable is "never", use the default API
160
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
161
+ use the default API endpoint.
162
+
163
+ More details can be found at https://google.aip.dev/auth/4114.
164
+
165
+ Args:
166
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
167
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
168
+ in this method.
169
+
170
+ Returns:
171
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
172
+ client cert source to use.
173
+
174
+ Raises:
175
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
176
+ """
177
+ return ModelServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
178
+
179
+ @property
180
+ def transport(self) -> ModelServiceTransport:
181
+ """Returns the transport used by the client instance.
182
+
183
+ Returns:
184
+ ModelServiceTransport: The transport used by the client instance.
185
+ """
186
+ return self._client.transport
187
+
188
+ @property
189
+ def api_endpoint(self):
190
+ """Return the API endpoint used by the client instance.
191
+
192
+ Returns:
193
+ str: The API endpoint used by the client instance.
194
+ """
195
+ return self._client._api_endpoint
196
+
197
+ @property
198
+ def universe_domain(self) -> str:
199
+ """Return the universe domain used by the client instance.
200
+
201
+ Returns:
202
+ str: The universe domain used
203
+ by the client instance.
204
+ """
205
+ return self._client._universe_domain
206
+
207
+ get_transport_class = ModelServiceClient.get_transport_class
208
+
209
+ def __init__(
210
+ self,
211
+ *,
212
+ credentials: Optional[ga_credentials.Credentials] = None,
213
+ transport: Optional[
214
+ Union[str, ModelServiceTransport, Callable[..., ModelServiceTransport]]
215
+ ] = "grpc_asyncio",
216
+ client_options: Optional[ClientOptions] = None,
217
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
218
+ ) -> None:
219
+ """Instantiates the model service async client.
220
+
221
+ Args:
222
+ credentials (Optional[google.auth.credentials.Credentials]): The
223
+ authorization credentials to attach to requests. These
224
+ credentials identify the application to the service; if none
225
+ are specified, the client will attempt to ascertain the
226
+ credentials from the environment.
227
+ transport (Optional[Union[str,ModelServiceTransport,Callable[..., ModelServiceTransport]]]):
228
+ The transport to use, or a Callable that constructs and returns a new transport to use.
229
+ If a Callable is given, it will be called with the same set of initialization
230
+ arguments as used in the ModelServiceTransport constructor.
231
+ If set to None, a transport is chosen automatically.
232
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
233
+ Custom options for the client.
234
+
235
+ 1. The ``api_endpoint`` property can be used to override the
236
+ default endpoint provided by the client when ``transport`` is
237
+ not explicitly provided. Only if this property is not set and
238
+ ``transport`` was not explicitly provided, the endpoint is
239
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
240
+ variable, which have one of the following values:
241
+ "always" (always use the default mTLS endpoint), "never" (always
242
+ use the default regular endpoint) and "auto" (auto-switch to the
243
+ default mTLS endpoint if client certificate is present; this is
244
+ the default value).
245
+
246
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
247
+ is "true", then the ``client_cert_source`` property can be used
248
+ to provide a client certificate for mTLS transport. If
249
+ not provided, the default SSL client certificate will be used if
250
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
251
+ set, no client certificate will be used.
252
+
253
+ 3. The ``universe_domain`` property can be used to override the
254
+ default "googleapis.com" universe. Note that ``api_endpoint``
255
+ property still takes precedence; and ``universe_domain`` is
256
+ currently not supported for mTLS.
257
+
258
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
259
+ The client info used to send a user-agent string along with
260
+ API requests. If ``None``, then default info will be used.
261
+ Generally, you only need to set this if you're developing
262
+ your own client library.
263
+
264
+ Raises:
265
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
266
+ creation failed for any reason.
267
+ """
268
+ self._client = ModelServiceClient(
269
+ credentials=credentials,
270
+ transport=transport,
271
+ client_options=client_options,
272
+ client_info=client_info,
273
+ )
274
+
275
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
276
+ std_logging.DEBUG
277
+ ): # pragma: NO COVER
278
+ _LOGGER.debug(
279
+ "Created client `google.ai.generativelanguage_v1alpha.ModelServiceAsyncClient`.",
280
+ extra={
281
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
282
+ "universeDomain": getattr(
283
+ self._client._transport._credentials, "universe_domain", ""
284
+ ),
285
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
286
+ "credentialsInfo": getattr(
287
+ self.transport._credentials, "get_cred_info", lambda: None
288
+ )(),
289
+ }
290
+ if hasattr(self._client._transport, "_credentials")
291
+ else {
292
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
293
+ "credentialsType": None,
294
+ },
295
+ )
296
+
297
+ async def get_model(
298
+ self,
299
+ request: Optional[Union[model_service.GetModelRequest, dict]] = None,
300
+ *,
301
+ name: Optional[str] = None,
302
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
303
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
304
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
305
+ ) -> model.Model:
306
+ r"""Gets information about a specific ``Model`` such as its version
307
+ number, token limits,
308
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
309
+ and other metadata. Refer to the `Gemini models
310
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
311
+ for detailed model information.
312
+
313
+ .. code-block:: python
314
+
315
+ # This snippet has been automatically generated and should be regarded as a
316
+ # code template only.
317
+ # It will require modifications to work:
318
+ # - It may require correct/in-range values for request initialization.
319
+ # - It may require specifying regional endpoints when creating the service
320
+ # client as shown in:
321
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
322
+ from google.ai import generativelanguage_v1alpha
323
+
324
+ async def sample_get_model():
325
+ # Create a client
326
+ client = generativelanguage_v1alpha.ModelServiceAsyncClient()
327
+
328
+ # Initialize request argument(s)
329
+ request = generativelanguage_v1alpha.GetModelRequest(
330
+ name="name_value",
331
+ )
332
+
333
+ # Make the request
334
+ response = await client.get_model(request=request)
335
+
336
+ # Handle the response
337
+ print(response)
338
+
339
+ Args:
340
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.GetModelRequest, dict]]):
341
+ The request object. Request for getting information about
342
+ a specific Model.
343
+ name (:class:`str`):
344
+ Required. The resource name of the model.
345
+
346
+ This name should match a model name returned by the
347
+ ``ListModels`` method.
348
+
349
+ Format: ``models/{model}``
350
+
351
+ This corresponds to the ``name`` field
352
+ on the ``request`` instance; if ``request`` is provided, this
353
+ should not be set.
354
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
355
+ should be retried.
356
+ timeout (float): The timeout for this request.
357
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
358
+ sent along with the request as metadata. Normally, each value must be of type `str`,
359
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
360
+ be of type `bytes`.
361
+
362
+ Returns:
363
+ google.ai.generativelanguage_v1alpha.types.Model:
364
+ Information about a Generative
365
+ Language Model.
366
+
367
+ """
368
+ # Create or coerce a protobuf request object.
369
+ # - Quick check: If we got a request object, we should *not* have
370
+ # gotten any keyword arguments that map to the request.
371
+ has_flattened_params = any([name])
372
+ if request is not None and has_flattened_params:
373
+ raise ValueError(
374
+ "If the `request` argument is set, then none of "
375
+ "the individual field arguments should be set."
376
+ )
377
+
378
+ # - Use the request object if provided (there's no risk of modifying the input as
379
+ # there are no flattened fields), or create one.
380
+ if not isinstance(request, model_service.GetModelRequest):
381
+ request = model_service.GetModelRequest(request)
382
+
383
+ # If we have keyword arguments corresponding to fields on the
384
+ # request, apply these.
385
+ if name is not None:
386
+ request.name = name
387
+
388
+ # Wrap the RPC method; this adds retry and timeout information,
389
+ # and friendly error handling.
390
+ rpc = self._client._transport._wrapped_methods[
391
+ self._client._transport.get_model
392
+ ]
393
+
394
+ # Certain fields should be provided within the metadata header;
395
+ # add these here.
396
+ metadata = tuple(metadata) + (
397
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
398
+ )
399
+
400
+ # Validate the universe domain.
401
+ self._client._validate_universe_domain()
402
+
403
+ # Send the request.
404
+ response = await rpc(
405
+ request,
406
+ retry=retry,
407
+ timeout=timeout,
408
+ metadata=metadata,
409
+ )
410
+
411
+ # Done; return the response.
412
+ return response
413
+
414
+ async def list_models(
415
+ self,
416
+ request: Optional[Union[model_service.ListModelsRequest, dict]] = None,
417
+ *,
418
+ page_size: Optional[int] = None,
419
+ page_token: Optional[str] = None,
420
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
421
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
422
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
423
+ ) -> pagers.ListModelsAsyncPager:
424
+ r"""Lists the
425
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
426
+ available through the Gemini API.
427
+
428
+ .. code-block:: python
429
+
430
+ # This snippet has been automatically generated and should be regarded as a
431
+ # code template only.
432
+ # It will require modifications to work:
433
+ # - It may require correct/in-range values for request initialization.
434
+ # - It may require specifying regional endpoints when creating the service
435
+ # client as shown in:
436
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
437
+ from google.ai import generativelanguage_v1alpha
438
+
439
+ async def sample_list_models():
440
+ # Create a client
441
+ client = generativelanguage_v1alpha.ModelServiceAsyncClient()
442
+
443
+ # Initialize request argument(s)
444
+ request = generativelanguage_v1alpha.ListModelsRequest(
445
+ )
446
+
447
+ # Make the request
448
+ page_result = client.list_models(request=request)
449
+
450
+ # Handle the response
451
+ async for response in page_result:
452
+ print(response)
453
+
454
+ Args:
455
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.ListModelsRequest, dict]]):
456
+ The request object. Request for listing all Models.
457
+ page_size (:class:`int`):
458
+ The maximum number of ``Models`` to return (per page).
459
+
460
+ If unspecified, 50 models will be returned per page.
461
+ This method returns at most 1000 models per page, even
462
+ if you pass a larger page_size.
463
+
464
+ This corresponds to the ``page_size`` field
465
+ on the ``request`` instance; if ``request`` is provided, this
466
+ should not be set.
467
+ page_token (:class:`str`):
468
+ A page token, received from a previous ``ListModels``
469
+ call.
470
+
471
+ Provide the ``page_token`` returned by one request as an
472
+ argument to the next request to retrieve the next page.
473
+
474
+ When paginating, all other parameters provided to
475
+ ``ListModels`` must match the call that provided the
476
+ page token.
477
+
478
+ This corresponds to the ``page_token`` field
479
+ on the ``request`` instance; if ``request`` is provided, this
480
+ should not be set.
481
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
482
+ should be retried.
483
+ timeout (float): The timeout for this request.
484
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
485
+ sent along with the request as metadata. Normally, each value must be of type `str`,
486
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
487
+ be of type `bytes`.
488
+
489
+ Returns:
490
+ google.ai.generativelanguage_v1alpha.services.model_service.pagers.ListModelsAsyncPager:
491
+ Response from ListModel containing a paginated list of
492
+ Models.
493
+
494
+ Iterating over this object will yield results and
495
+ resolve additional pages automatically.
496
+
497
+ """
498
+ # Create or coerce a protobuf request object.
499
+ # - Quick check: If we got a request object, we should *not* have
500
+ # gotten any keyword arguments that map to the request.
501
+ has_flattened_params = any([page_size, page_token])
502
+ if request is not None and has_flattened_params:
503
+ raise ValueError(
504
+ "If the `request` argument is set, then none of "
505
+ "the individual field arguments should be set."
506
+ )
507
+
508
+ # - Use the request object if provided (there's no risk of modifying the input as
509
+ # there are no flattened fields), or create one.
510
+ if not isinstance(request, model_service.ListModelsRequest):
511
+ request = model_service.ListModelsRequest(request)
512
+
513
+ # If we have keyword arguments corresponding to fields on the
514
+ # request, apply these.
515
+ if page_size is not None:
516
+ request.page_size = page_size
517
+ if page_token is not None:
518
+ request.page_token = page_token
519
+
520
+ # Wrap the RPC method; this adds retry and timeout information,
521
+ # and friendly error handling.
522
+ rpc = self._client._transport._wrapped_methods[
523
+ self._client._transport.list_models
524
+ ]
525
+
526
+ # Validate the universe domain.
527
+ self._client._validate_universe_domain()
528
+
529
+ # Send the request.
530
+ response = await rpc(
531
+ request,
532
+ retry=retry,
533
+ timeout=timeout,
534
+ metadata=metadata,
535
+ )
536
+
537
+ # This method is paged; wrap the response in a pager, which provides
538
+ # an `__aiter__` convenience method.
539
+ response = pagers.ListModelsAsyncPager(
540
+ method=rpc,
541
+ request=request,
542
+ response=response,
543
+ retry=retry,
544
+ timeout=timeout,
545
+ metadata=metadata,
546
+ )
547
+
548
+ # Done; return the response.
549
+ return response
550
+
551
+ async def get_tuned_model(
552
+ self,
553
+ request: Optional[Union[model_service.GetTunedModelRequest, dict]] = None,
554
+ *,
555
+ name: Optional[str] = None,
556
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
557
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
558
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
559
+ ) -> tuned_model.TunedModel:
560
+ r"""Gets information about a specific TunedModel.
561
+
562
+ .. code-block:: python
563
+
564
+ # This snippet has been automatically generated and should be regarded as a
565
+ # code template only.
566
+ # It will require modifications to work:
567
+ # - It may require correct/in-range values for request initialization.
568
+ # - It may require specifying regional endpoints when creating the service
569
+ # client as shown in:
570
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
571
+ from google.ai import generativelanguage_v1alpha
572
+
573
+ async def sample_get_tuned_model():
574
+ # Create a client
575
+ client = generativelanguage_v1alpha.ModelServiceAsyncClient()
576
+
577
+ # Initialize request argument(s)
578
+ request = generativelanguage_v1alpha.GetTunedModelRequest(
579
+ name="name_value",
580
+ )
581
+
582
+ # Make the request
583
+ response = await client.get_tuned_model(request=request)
584
+
585
+ # Handle the response
586
+ print(response)
587
+
588
+ Args:
589
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.GetTunedModelRequest, dict]]):
590
+ The request object. Request for getting information about
591
+ a specific Model.
592
+ name (:class:`str`):
593
+ Required. The resource name of the model.
594
+
595
+ Format: ``tunedModels/my-model-id``
596
+
597
+ This corresponds to the ``name`` field
598
+ on the ``request`` instance; if ``request`` is provided, this
599
+ should not be set.
600
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
601
+ should be retried.
602
+ timeout (float): The timeout for this request.
603
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
604
+ sent along with the request as metadata. Normally, each value must be of type `str`,
605
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
606
+ be of type `bytes`.
607
+
608
+ Returns:
609
+ google.ai.generativelanguage_v1alpha.types.TunedModel:
610
+ A fine-tuned model created using
611
+ ModelService.CreateTunedModel.
612
+
613
+ """
614
+ # Create or coerce a protobuf request object.
615
+ # - Quick check: If we got a request object, we should *not* have
616
+ # gotten any keyword arguments that map to the request.
617
+ has_flattened_params = any([name])
618
+ if request is not None and has_flattened_params:
619
+ raise ValueError(
620
+ "If the `request` argument is set, then none of "
621
+ "the individual field arguments should be set."
622
+ )
623
+
624
+ # - Use the request object if provided (there's no risk of modifying the input as
625
+ # there are no flattened fields), or create one.
626
+ if not isinstance(request, model_service.GetTunedModelRequest):
627
+ request = model_service.GetTunedModelRequest(request)
628
+
629
+ # If we have keyword arguments corresponding to fields on the
630
+ # request, apply these.
631
+ if name is not None:
632
+ request.name = name
633
+
634
+ # Wrap the RPC method; this adds retry and timeout information,
635
+ # and friendly error handling.
636
+ rpc = self._client._transport._wrapped_methods[
637
+ self._client._transport.get_tuned_model
638
+ ]
639
+
640
+ # Certain fields should be provided within the metadata header;
641
+ # add these here.
642
+ metadata = tuple(metadata) + (
643
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
644
+ )
645
+
646
+ # Validate the universe domain.
647
+ self._client._validate_universe_domain()
648
+
649
+ # Send the request.
650
+ response = await rpc(
651
+ request,
652
+ retry=retry,
653
+ timeout=timeout,
654
+ metadata=metadata,
655
+ )
656
+
657
+ # Done; return the response.
658
+ return response
659
+
660
+ async def list_tuned_models(
661
+ self,
662
+ request: Optional[Union[model_service.ListTunedModelsRequest, dict]] = None,
663
+ *,
664
+ page_size: Optional[int] = None,
665
+ page_token: Optional[str] = None,
666
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
667
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
668
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
669
+ ) -> pagers.ListTunedModelsAsyncPager:
670
+ r"""Lists created tuned models.
671
+
672
+ .. code-block:: python
673
+
674
+ # This snippet has been automatically generated and should be regarded as a
675
+ # code template only.
676
+ # It will require modifications to work:
677
+ # - It may require correct/in-range values for request initialization.
678
+ # - It may require specifying regional endpoints when creating the service
679
+ # client as shown in:
680
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
681
+ from google.ai import generativelanguage_v1alpha
682
+
683
+ async def sample_list_tuned_models():
684
+ # Create a client
685
+ client = generativelanguage_v1alpha.ModelServiceAsyncClient()
686
+
687
+ # Initialize request argument(s)
688
+ request = generativelanguage_v1alpha.ListTunedModelsRequest(
689
+ )
690
+
691
+ # Make the request
692
+ page_result = client.list_tuned_models(request=request)
693
+
694
+ # Handle the response
695
+ async for response in page_result:
696
+ print(response)
697
+
698
+ Args:
699
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.ListTunedModelsRequest, dict]]):
700
+ The request object. Request for listing TunedModels.
701
+ page_size (:class:`int`):
702
+ Optional. The maximum number of ``TunedModels`` to
703
+ return (per page). The service may return fewer tuned
704
+ models.
705
+
706
+ If unspecified, at most 10 tuned models will be
707
+ returned. This method returns at most 1000 models per
708
+ page, even if you pass a larger page_size.
709
+
710
+ This corresponds to the ``page_size`` field
711
+ on the ``request`` instance; if ``request`` is provided, this
712
+ should not be set.
713
+ page_token (:class:`str`):
714
+ Optional. A page token, received from a previous
715
+ ``ListTunedModels`` call.
716
+
717
+ Provide the ``page_token`` returned by one request as an
718
+ argument to the next request to retrieve the next page.
719
+
720
+ When paginating, all other parameters provided to
721
+ ``ListTunedModels`` must match the call that provided
722
+ the page token.
723
+
724
+ This corresponds to the ``page_token`` field
725
+ on the ``request`` instance; if ``request`` is provided, this
726
+ should not be set.
727
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
728
+ should be retried.
729
+ timeout (float): The timeout for this request.
730
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
731
+ sent along with the request as metadata. Normally, each value must be of type `str`,
732
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
733
+ be of type `bytes`.
734
+
735
+ Returns:
736
+ google.ai.generativelanguage_v1alpha.services.model_service.pagers.ListTunedModelsAsyncPager:
737
+ Response from ListTunedModels containing a paginated
738
+ list of Models.
739
+
740
+ Iterating over this object will yield results and
741
+ resolve additional pages automatically.
742
+
743
+ """
744
+ # Create or coerce a protobuf request object.
745
+ # - Quick check: If we got a request object, we should *not* have
746
+ # gotten any keyword arguments that map to the request.
747
+ has_flattened_params = any([page_size, page_token])
748
+ if request is not None and has_flattened_params:
749
+ raise ValueError(
750
+ "If the `request` argument is set, then none of "
751
+ "the individual field arguments should be set."
752
+ )
753
+
754
+ # - Use the request object if provided (there's no risk of modifying the input as
755
+ # there are no flattened fields), or create one.
756
+ if not isinstance(request, model_service.ListTunedModelsRequest):
757
+ request = model_service.ListTunedModelsRequest(request)
758
+
759
+ # If we have keyword arguments corresponding to fields on the
760
+ # request, apply these.
761
+ if page_size is not None:
762
+ request.page_size = page_size
763
+ if page_token is not None:
764
+ request.page_token = page_token
765
+
766
+ # Wrap the RPC method; this adds retry and timeout information,
767
+ # and friendly error handling.
768
+ rpc = self._client._transport._wrapped_methods[
769
+ self._client._transport.list_tuned_models
770
+ ]
771
+
772
+ # Validate the universe domain.
773
+ self._client._validate_universe_domain()
774
+
775
+ # Send the request.
776
+ response = await rpc(
777
+ request,
778
+ retry=retry,
779
+ timeout=timeout,
780
+ metadata=metadata,
781
+ )
782
+
783
+ # This method is paged; wrap the response in a pager, which provides
784
+ # an `__aiter__` convenience method.
785
+ response = pagers.ListTunedModelsAsyncPager(
786
+ method=rpc,
787
+ request=request,
788
+ response=response,
789
+ retry=retry,
790
+ timeout=timeout,
791
+ metadata=metadata,
792
+ )
793
+
794
+ # Done; return the response.
795
+ return response
796
+
797
+ async def create_tuned_model(
798
+ self,
799
+ request: Optional[Union[model_service.CreateTunedModelRequest, dict]] = None,
800
+ *,
801
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
802
+ tuned_model_id: Optional[str] = None,
803
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
804
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
805
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
806
+ ) -> operation_async.AsyncOperation:
807
+ r"""Creates a tuned model. Check intermediate tuning progress (if
808
+ any) through the [google.longrunning.Operations] service.
809
+
810
+ Access status and results through the Operations service.
811
+ Example: GET /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
812
+
813
+ .. code-block:: python
814
+
815
+ # This snippet has been automatically generated and should be regarded as a
816
+ # code template only.
817
+ # It will require modifications to work:
818
+ # - It may require correct/in-range values for request initialization.
819
+ # - It may require specifying regional endpoints when creating the service
820
+ # client as shown in:
821
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
822
+ from google.ai import generativelanguage_v1alpha
823
+
824
+ async def sample_create_tuned_model():
825
+ # Create a client
826
+ client = generativelanguage_v1alpha.ModelServiceAsyncClient()
827
+
828
+ # Initialize request argument(s)
829
+ request = generativelanguage_v1alpha.CreateTunedModelRequest(
830
+ )
831
+
832
+ # Make the request
833
+ operation = client.create_tuned_model(request=request)
834
+
835
+ print("Waiting for operation to complete...")
836
+
837
+ response = (await operation).result()
838
+
839
+ # Handle the response
840
+ print(response)
841
+
842
+ Args:
843
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.CreateTunedModelRequest, dict]]):
844
+ The request object. Request to create a TunedModel.
845
+ tuned_model (:class:`google.ai.generativelanguage_v1alpha.types.TunedModel`):
846
+ Required. The tuned model to create.
847
+ This corresponds to the ``tuned_model`` field
848
+ on the ``request`` instance; if ``request`` is provided, this
849
+ should not be set.
850
+ tuned_model_id (:class:`str`):
851
+ Optional. The unique id for the tuned model if
852
+ specified. This value should be up to 40 characters, the
853
+ first character must be a letter, the last could be a
854
+ letter or a number. The id must match the regular
855
+ expression: ``[a-z]([a-z0-9-]{0,38}[a-z0-9])?``.
856
+
857
+ This corresponds to the ``tuned_model_id`` field
858
+ on the ``request`` instance; if ``request`` is provided, this
859
+ should not be set.
860
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
861
+ should be retried.
862
+ timeout (float): The timeout for this request.
863
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
864
+ sent along with the request as metadata. Normally, each value must be of type `str`,
865
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
866
+ be of type `bytes`.
867
+
868
+ Returns:
869
+ google.api_core.operation_async.AsyncOperation:
870
+ An object representing a long-running operation.
871
+
872
+ The result type for the operation will be
873
+ :class:`google.ai.generativelanguage_v1alpha.types.TunedModel`
874
+ A fine-tuned model created using
875
+ ModelService.CreateTunedModel.
876
+
877
+ """
878
+ # Create or coerce a protobuf request object.
879
+ # - Quick check: If we got a request object, we should *not* have
880
+ # gotten any keyword arguments that map to the request.
881
+ has_flattened_params = any([tuned_model, tuned_model_id])
882
+ if request is not None and has_flattened_params:
883
+ raise ValueError(
884
+ "If the `request` argument is set, then none of "
885
+ "the individual field arguments should be set."
886
+ )
887
+
888
+ # - Use the request object if provided (there's no risk of modifying the input as
889
+ # there are no flattened fields), or create one.
890
+ if not isinstance(request, model_service.CreateTunedModelRequest):
891
+ request = model_service.CreateTunedModelRequest(request)
892
+
893
+ # If we have keyword arguments corresponding to fields on the
894
+ # request, apply these.
895
+ if tuned_model is not None:
896
+ request.tuned_model = tuned_model
897
+ if tuned_model_id is not None:
898
+ request.tuned_model_id = tuned_model_id
899
+
900
+ # Wrap the RPC method; this adds retry and timeout information,
901
+ # and friendly error handling.
902
+ rpc = self._client._transport._wrapped_methods[
903
+ self._client._transport.create_tuned_model
904
+ ]
905
+
906
+ # Validate the universe domain.
907
+ self._client._validate_universe_domain()
908
+
909
+ # Send the request.
910
+ response = await rpc(
911
+ request,
912
+ retry=retry,
913
+ timeout=timeout,
914
+ metadata=metadata,
915
+ )
916
+
917
+ # Wrap the response in an operation future.
918
+ response = operation_async.from_gapic(
919
+ response,
920
+ self._client._transport.operations_client,
921
+ gag_tuned_model.TunedModel,
922
+ metadata_type=model_service.CreateTunedModelMetadata,
923
+ )
924
+
925
+ # Done; return the response.
926
+ return response
927
+
928
+ async def update_tuned_model(
929
+ self,
930
+ request: Optional[Union[model_service.UpdateTunedModelRequest, dict]] = None,
931
+ *,
932
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
933
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
934
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
935
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
936
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
937
+ ) -> gag_tuned_model.TunedModel:
938
+ r"""Updates a tuned model.
939
+
940
+ .. code-block:: python
941
+
942
+ # This snippet has been automatically generated and should be regarded as a
943
+ # code template only.
944
+ # It will require modifications to work:
945
+ # - It may require correct/in-range values for request initialization.
946
+ # - It may require specifying regional endpoints when creating the service
947
+ # client as shown in:
948
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
949
+ from google.ai import generativelanguage_v1alpha
950
+
951
+ async def sample_update_tuned_model():
952
+ # Create a client
953
+ client = generativelanguage_v1alpha.ModelServiceAsyncClient()
954
+
955
+ # Initialize request argument(s)
956
+ request = generativelanguage_v1alpha.UpdateTunedModelRequest(
957
+ )
958
+
959
+ # Make the request
960
+ response = await client.update_tuned_model(request=request)
961
+
962
+ # Handle the response
963
+ print(response)
964
+
965
+ Args:
966
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.UpdateTunedModelRequest, dict]]):
967
+ The request object. Request to update a TunedModel.
968
+ tuned_model (:class:`google.ai.generativelanguage_v1alpha.types.TunedModel`):
969
+ Required. The tuned model to update.
970
+ This corresponds to the ``tuned_model`` field
971
+ on the ``request`` instance; if ``request`` is provided, this
972
+ should not be set.
973
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
974
+ Optional. The list of fields to
975
+ update.
976
+
977
+ This corresponds to the ``update_mask`` field
978
+ on the ``request`` instance; if ``request`` is provided, this
979
+ should not be set.
980
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
981
+ should be retried.
982
+ timeout (float): The timeout for this request.
983
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
984
+ sent along with the request as metadata. Normally, each value must be of type `str`,
985
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
986
+ be of type `bytes`.
987
+
988
+ Returns:
989
+ google.ai.generativelanguage_v1alpha.types.TunedModel:
990
+ A fine-tuned model created using
991
+ ModelService.CreateTunedModel.
992
+
993
+ """
994
+ # Create or coerce a protobuf request object.
995
+ # - Quick check: If we got a request object, we should *not* have
996
+ # gotten any keyword arguments that map to the request.
997
+ has_flattened_params = any([tuned_model, update_mask])
998
+ if request is not None and has_flattened_params:
999
+ raise ValueError(
1000
+ "If the `request` argument is set, then none of "
1001
+ "the individual field arguments should be set."
1002
+ )
1003
+
1004
+ # - Use the request object if provided (there's no risk of modifying the input as
1005
+ # there are no flattened fields), or create one.
1006
+ if not isinstance(request, model_service.UpdateTunedModelRequest):
1007
+ request = model_service.UpdateTunedModelRequest(request)
1008
+
1009
+ # If we have keyword arguments corresponding to fields on the
1010
+ # request, apply these.
1011
+ if tuned_model is not None:
1012
+ request.tuned_model = tuned_model
1013
+ if update_mask is not None:
1014
+ request.update_mask = update_mask
1015
+
1016
+ # Wrap the RPC method; this adds retry and timeout information,
1017
+ # and friendly error handling.
1018
+ rpc = self._client._transport._wrapped_methods[
1019
+ self._client._transport.update_tuned_model
1020
+ ]
1021
+
1022
+ # Certain fields should be provided within the metadata header;
1023
+ # add these here.
1024
+ metadata = tuple(metadata) + (
1025
+ gapic_v1.routing_header.to_grpc_metadata(
1026
+ (("tuned_model.name", request.tuned_model.name),)
1027
+ ),
1028
+ )
1029
+
1030
+ # Validate the universe domain.
1031
+ self._client._validate_universe_domain()
1032
+
1033
+ # Send the request.
1034
+ response = await rpc(
1035
+ request,
1036
+ retry=retry,
1037
+ timeout=timeout,
1038
+ metadata=metadata,
1039
+ )
1040
+
1041
+ # Done; return the response.
1042
+ return response
1043
+
1044
+ async def delete_tuned_model(
1045
+ self,
1046
+ request: Optional[Union[model_service.DeleteTunedModelRequest, dict]] = None,
1047
+ *,
1048
+ name: Optional[str] = None,
1049
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1050
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1051
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1052
+ ) -> None:
1053
+ r"""Deletes a tuned model.
1054
+
1055
+ .. code-block:: python
1056
+
1057
+ # This snippet has been automatically generated and should be regarded as a
1058
+ # code template only.
1059
+ # It will require modifications to work:
1060
+ # - It may require correct/in-range values for request initialization.
1061
+ # - It may require specifying regional endpoints when creating the service
1062
+ # client as shown in:
1063
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1064
+ from google.ai import generativelanguage_v1alpha
1065
+
1066
+ async def sample_delete_tuned_model():
1067
+ # Create a client
1068
+ client = generativelanguage_v1alpha.ModelServiceAsyncClient()
1069
+
1070
+ # Initialize request argument(s)
1071
+ request = generativelanguage_v1alpha.DeleteTunedModelRequest(
1072
+ name="name_value",
1073
+ )
1074
+
1075
+ # Make the request
1076
+ await client.delete_tuned_model(request=request)
1077
+
1078
+ Args:
1079
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.DeleteTunedModelRequest, dict]]):
1080
+ The request object. Request to delete a TunedModel.
1081
+ name (:class:`str`):
1082
+ Required. The resource name of the model. Format:
1083
+ ``tunedModels/my-model-id``
1084
+
1085
+ This corresponds to the ``name`` field
1086
+ on the ``request`` instance; if ``request`` is provided, this
1087
+ should not be set.
1088
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
1089
+ should be retried.
1090
+ timeout (float): The timeout for this request.
1091
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1092
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1093
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1094
+ be of type `bytes`.
1095
+ """
1096
+ # Create or coerce a protobuf request object.
1097
+ # - Quick check: If we got a request object, we should *not* have
1098
+ # gotten any keyword arguments that map to the request.
1099
+ has_flattened_params = any([name])
1100
+ if request is not None and has_flattened_params:
1101
+ raise ValueError(
1102
+ "If the `request` argument is set, then none of "
1103
+ "the individual field arguments should be set."
1104
+ )
1105
+
1106
+ # - Use the request object if provided (there's no risk of modifying the input as
1107
+ # there are no flattened fields), or create one.
1108
+ if not isinstance(request, model_service.DeleteTunedModelRequest):
1109
+ request = model_service.DeleteTunedModelRequest(request)
1110
+
1111
+ # If we have keyword arguments corresponding to fields on the
1112
+ # request, apply these.
1113
+ if name is not None:
1114
+ request.name = name
1115
+
1116
+ # Wrap the RPC method; this adds retry and timeout information,
1117
+ # and friendly error handling.
1118
+ rpc = self._client._transport._wrapped_methods[
1119
+ self._client._transport.delete_tuned_model
1120
+ ]
1121
+
1122
+ # Certain fields should be provided within the metadata header;
1123
+ # add these here.
1124
+ metadata = tuple(metadata) + (
1125
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1126
+ )
1127
+
1128
+ # Validate the universe domain.
1129
+ self._client._validate_universe_domain()
1130
+
1131
+ # Send the request.
1132
+ await rpc(
1133
+ request,
1134
+ retry=retry,
1135
+ timeout=timeout,
1136
+ metadata=metadata,
1137
+ )
1138
+
1139
+ async def list_operations(
1140
+ self,
1141
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
1142
+ *,
1143
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1144
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1145
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1146
+ ) -> operations_pb2.ListOperationsResponse:
1147
+ r"""Lists operations that match the specified filter in the request.
1148
+
1149
+ Args:
1150
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
1151
+ The request object. Request message for
1152
+ `ListOperations` method.
1153
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
1154
+ if any, should be retried.
1155
+ timeout (float): The timeout for this request.
1156
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1157
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1158
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1159
+ be of type `bytes`.
1160
+ Returns:
1161
+ ~.operations_pb2.ListOperationsResponse:
1162
+ Response message for ``ListOperations`` method.
1163
+ """
1164
+ # Create or coerce a protobuf request object.
1165
+ # The request isn't a proto-plus wrapped type,
1166
+ # so it must be constructed via keyword expansion.
1167
+ if isinstance(request, dict):
1168
+ request = operations_pb2.ListOperationsRequest(**request)
1169
+
1170
+ # Wrap the RPC method; this adds retry and timeout information,
1171
+ # and friendly error handling.
1172
+ rpc = self.transport._wrapped_methods[self._client._transport.list_operations]
1173
+
1174
+ # Certain fields should be provided within the metadata header;
1175
+ # add these here.
1176
+ metadata = tuple(metadata) + (
1177
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1178
+ )
1179
+
1180
+ # Validate the universe domain.
1181
+ self._client._validate_universe_domain()
1182
+
1183
+ # Send the request.
1184
+ response = await rpc(
1185
+ request,
1186
+ retry=retry,
1187
+ timeout=timeout,
1188
+ metadata=metadata,
1189
+ )
1190
+
1191
+ # Done; return the response.
1192
+ return response
1193
+
1194
+ async def get_operation(
1195
+ self,
1196
+ request: Optional[operations_pb2.GetOperationRequest] = None,
1197
+ *,
1198
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1199
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1200
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1201
+ ) -> operations_pb2.Operation:
1202
+ r"""Gets the latest state of a long-running operation.
1203
+
1204
+ Args:
1205
+ request (:class:`~.operations_pb2.GetOperationRequest`):
1206
+ The request object. Request message for
1207
+ `GetOperation` method.
1208
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
1209
+ if any, should be retried.
1210
+ timeout (float): The timeout for this request.
1211
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1212
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1213
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1214
+ be of type `bytes`.
1215
+ Returns:
1216
+ ~.operations_pb2.Operation:
1217
+ An ``Operation`` object.
1218
+ """
1219
+ # Create or coerce a protobuf request object.
1220
+ # The request isn't a proto-plus wrapped type,
1221
+ # so it must be constructed via keyword expansion.
1222
+ if isinstance(request, dict):
1223
+ request = operations_pb2.GetOperationRequest(**request)
1224
+
1225
+ # Wrap the RPC method; this adds retry and timeout information,
1226
+ # and friendly error handling.
1227
+ rpc = self.transport._wrapped_methods[self._client._transport.get_operation]
1228
+
1229
+ # Certain fields should be provided within the metadata header;
1230
+ # add these here.
1231
+ metadata = tuple(metadata) + (
1232
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1233
+ )
1234
+
1235
+ # Validate the universe domain.
1236
+ self._client._validate_universe_domain()
1237
+
1238
+ # Send the request.
1239
+ response = await rpc(
1240
+ request,
1241
+ retry=retry,
1242
+ timeout=timeout,
1243
+ metadata=metadata,
1244
+ )
1245
+
1246
+ # Done; return the response.
1247
+ return response
1248
+
1249
+ async def __aenter__(self) -> "ModelServiceAsyncClient":
1250
+ return self
1251
+
1252
+ async def __aexit__(self, exc_type, exc, tb):
1253
+ await self.transport.close()
1254
+
1255
+
1256
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1257
+ gapic_version=package_version.__version__
1258
+ )
1259
+
1260
+
1261
+ __all__ = ("ModelServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/client.py ADDED
@@ -0,0 +1,1646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.api_core import operation # type: ignore
62
+ from google.api_core import operation_async # type: ignore
63
+ from google.longrunning import operations_pb2 # type: ignore
64
+ from google.protobuf import field_mask_pb2 # type: ignore
65
+ from google.protobuf import timestamp_pb2 # type: ignore
66
+
67
+ from google.ai.generativelanguage_v1alpha.services.model_service import pagers
68
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
69
+ from google.ai.generativelanguage_v1alpha.types import model, model_service
70
+ from google.ai.generativelanguage_v1alpha.types import tuned_model
71
+
72
+ from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport
73
+ from .transports.grpc import ModelServiceGrpcTransport
74
+ from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
75
+ from .transports.rest import ModelServiceRestTransport
76
+
77
+
78
+ class ModelServiceClientMeta(type):
79
+ """Metaclass for the ModelService client.
80
+
81
+ This provides class-level methods for building and retrieving
82
+ support objects (e.g. transport) without polluting the client instance
83
+ objects.
84
+ """
85
+
86
+ _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
87
+ _transport_registry["grpc"] = ModelServiceGrpcTransport
88
+ _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
89
+ _transport_registry["rest"] = ModelServiceRestTransport
90
+
91
+ def get_transport_class(
92
+ cls,
93
+ label: Optional[str] = None,
94
+ ) -> Type[ModelServiceTransport]:
95
+ """Returns an appropriate transport class.
96
+
97
+ Args:
98
+ label: The name of the desired transport. If none is
99
+ provided, then the first transport in the registry is used.
100
+
101
+ Returns:
102
+ The transport class to use.
103
+ """
104
+ # If a specific transport is requested, return that one.
105
+ if label:
106
+ return cls._transport_registry[label]
107
+
108
+ # No transport is requested; return the default (that is, the first one
109
+ # in the dictionary).
110
+ return next(iter(cls._transport_registry.values()))
111
+
112
+
113
+ class ModelServiceClient(metaclass=ModelServiceClientMeta):
114
+ """Provides methods for getting metadata information about
115
+ Generative Models.
116
+ """
117
+
118
+ @staticmethod
119
+ def _get_default_mtls_endpoint(api_endpoint):
120
+ """Converts api endpoint to mTLS endpoint.
121
+
122
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
123
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
124
+ Args:
125
+ api_endpoint (Optional[str]): the api endpoint to convert.
126
+ Returns:
127
+ str: converted mTLS api endpoint.
128
+ """
129
+ if not api_endpoint:
130
+ return api_endpoint
131
+
132
+ mtls_endpoint_re = re.compile(
133
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
134
+ )
135
+
136
+ m = mtls_endpoint_re.match(api_endpoint)
137
+ name, mtls, sandbox, googledomain = m.groups()
138
+ if mtls or not googledomain:
139
+ return api_endpoint
140
+
141
+ if sandbox:
142
+ return api_endpoint.replace(
143
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
144
+ )
145
+
146
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
147
+
148
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
149
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
150
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
151
+ DEFAULT_ENDPOINT
152
+ )
153
+
154
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
155
+ _DEFAULT_UNIVERSE = "googleapis.com"
156
+
157
+ @classmethod
158
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
159
+ """Creates an instance of this client using the provided credentials
160
+ info.
161
+
162
+ Args:
163
+ info (dict): The service account private key info.
164
+ args: Additional arguments to pass to the constructor.
165
+ kwargs: Additional arguments to pass to the constructor.
166
+
167
+ Returns:
168
+ ModelServiceClient: The constructed client.
169
+ """
170
+ credentials = service_account.Credentials.from_service_account_info(info)
171
+ kwargs["credentials"] = credentials
172
+ return cls(*args, **kwargs)
173
+
174
+ @classmethod
175
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
176
+ """Creates an instance of this client using the provided credentials
177
+ file.
178
+
179
+ Args:
180
+ filename (str): The path to the service account private key json
181
+ file.
182
+ args: Additional arguments to pass to the constructor.
183
+ kwargs: Additional arguments to pass to the constructor.
184
+
185
+ Returns:
186
+ ModelServiceClient: The constructed client.
187
+ """
188
+ credentials = service_account.Credentials.from_service_account_file(filename)
189
+ kwargs["credentials"] = credentials
190
+ return cls(*args, **kwargs)
191
+
192
+ from_service_account_json = from_service_account_file
193
+
194
+ @property
195
+ def transport(self) -> ModelServiceTransport:
196
+ """Returns the transport used by the client instance.
197
+
198
+ Returns:
199
+ ModelServiceTransport: The transport used by the client
200
+ instance.
201
+ """
202
+ return self._transport
203
+
204
+ @staticmethod
205
+ def model_path(
206
+ model: str,
207
+ ) -> str:
208
+ """Returns a fully-qualified model string."""
209
+ return "models/{model}".format(
210
+ model=model,
211
+ )
212
+
213
+ @staticmethod
214
+ def parse_model_path(path: str) -> Dict[str, str]:
215
+ """Parses a model path into its component segments."""
216
+ m = re.match(r"^models/(?P<model>.+?)$", path)
217
+ return m.groupdict() if m else {}
218
+
219
+ @staticmethod
220
+ def tuned_model_path(
221
+ tuned_model: str,
222
+ ) -> str:
223
+ """Returns a fully-qualified tuned_model string."""
224
+ return "tunedModels/{tuned_model}".format(
225
+ tuned_model=tuned_model,
226
+ )
227
+
228
+ @staticmethod
229
+ def parse_tuned_model_path(path: str) -> Dict[str, str]:
230
+ """Parses a tuned_model path into its component segments."""
231
+ m = re.match(r"^tunedModels/(?P<tuned_model>.+?)$", path)
232
+ return m.groupdict() if m else {}
233
+
234
+ @staticmethod
235
+ def common_billing_account_path(
236
+ billing_account: str,
237
+ ) -> str:
238
+ """Returns a fully-qualified billing_account string."""
239
+ return "billingAccounts/{billing_account}".format(
240
+ billing_account=billing_account,
241
+ )
242
+
243
+ @staticmethod
244
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
245
+ """Parse a billing_account path into its component segments."""
246
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
247
+ return m.groupdict() if m else {}
248
+
249
+ @staticmethod
250
+ def common_folder_path(
251
+ folder: str,
252
+ ) -> str:
253
+ """Returns a fully-qualified folder string."""
254
+ return "folders/{folder}".format(
255
+ folder=folder,
256
+ )
257
+
258
+ @staticmethod
259
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
260
+ """Parse a folder path into its component segments."""
261
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
262
+ return m.groupdict() if m else {}
263
+
264
+ @staticmethod
265
+ def common_organization_path(
266
+ organization: str,
267
+ ) -> str:
268
+ """Returns a fully-qualified organization string."""
269
+ return "organizations/{organization}".format(
270
+ organization=organization,
271
+ )
272
+
273
+ @staticmethod
274
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
275
+ """Parse a organization path into its component segments."""
276
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
277
+ return m.groupdict() if m else {}
278
+
279
+ @staticmethod
280
+ def common_project_path(
281
+ project: str,
282
+ ) -> str:
283
+ """Returns a fully-qualified project string."""
284
+ return "projects/{project}".format(
285
+ project=project,
286
+ )
287
+
288
+ @staticmethod
289
+ def parse_common_project_path(path: str) -> Dict[str, str]:
290
+ """Parse a project path into its component segments."""
291
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
292
+ return m.groupdict() if m else {}
293
+
294
+ @staticmethod
295
+ def common_location_path(
296
+ project: str,
297
+ location: str,
298
+ ) -> str:
299
+ """Returns a fully-qualified location string."""
300
+ return "projects/{project}/locations/{location}".format(
301
+ project=project,
302
+ location=location,
303
+ )
304
+
305
+ @staticmethod
306
+ def parse_common_location_path(path: str) -> Dict[str, str]:
307
+ """Parse a location path into its component segments."""
308
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
309
+ return m.groupdict() if m else {}
310
+
311
+ @classmethod
312
+ def get_mtls_endpoint_and_cert_source(
313
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
314
+ ):
315
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
316
+
317
+ The client cert source is determined in the following order:
318
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
319
+ client cert source is None.
320
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
321
+ default client cert source exists, use the default one; otherwise the client cert
322
+ source is None.
323
+
324
+ The API endpoint is determined in the following order:
325
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
326
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
327
+ default mTLS endpoint; if the environment variable is "never", use the default API
328
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
329
+ use the default API endpoint.
330
+
331
+ More details can be found at https://google.aip.dev/auth/4114.
332
+
333
+ Args:
334
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
335
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
336
+ in this method.
337
+
338
+ Returns:
339
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
340
+ client cert source to use.
341
+
342
+ Raises:
343
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
344
+ """
345
+
346
+ warnings.warn(
347
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
348
+ DeprecationWarning,
349
+ )
350
+ if client_options is None:
351
+ client_options = client_options_lib.ClientOptions()
352
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
353
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
354
+ if use_client_cert not in ("true", "false"):
355
+ raise ValueError(
356
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
357
+ )
358
+ if use_mtls_endpoint not in ("auto", "never", "always"):
359
+ raise MutualTLSChannelError(
360
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
361
+ )
362
+
363
+ # Figure out the client cert source to use.
364
+ client_cert_source = None
365
+ if use_client_cert == "true":
366
+ if client_options.client_cert_source:
367
+ client_cert_source = client_options.client_cert_source
368
+ elif mtls.has_default_client_cert_source():
369
+ client_cert_source = mtls.default_client_cert_source()
370
+
371
+ # Figure out which api endpoint to use.
372
+ if client_options.api_endpoint is not None:
373
+ api_endpoint = client_options.api_endpoint
374
+ elif use_mtls_endpoint == "always" or (
375
+ use_mtls_endpoint == "auto" and client_cert_source
376
+ ):
377
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
378
+ else:
379
+ api_endpoint = cls.DEFAULT_ENDPOINT
380
+
381
+ return api_endpoint, client_cert_source
382
+
383
+ @staticmethod
384
+ def _read_environment_variables():
385
+ """Returns the environment variables used by the client.
386
+
387
+ Returns:
388
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
389
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
390
+
391
+ Raises:
392
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
393
+ any of ["true", "false"].
394
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
395
+ is not any of ["auto", "never", "always"].
396
+ """
397
+ use_client_cert = os.getenv(
398
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
399
+ ).lower()
400
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
401
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
402
+ if use_client_cert not in ("true", "false"):
403
+ raise ValueError(
404
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
405
+ )
406
+ if use_mtls_endpoint not in ("auto", "never", "always"):
407
+ raise MutualTLSChannelError(
408
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
409
+ )
410
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
411
+
412
+ @staticmethod
413
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
414
+ """Return the client cert source to be used by the client.
415
+
416
+ Args:
417
+ provided_cert_source (bytes): The client certificate source provided.
418
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
419
+
420
+ Returns:
421
+ bytes or None: The client cert source to be used by the client.
422
+ """
423
+ client_cert_source = None
424
+ if use_cert_flag:
425
+ if provided_cert_source:
426
+ client_cert_source = provided_cert_source
427
+ elif mtls.has_default_client_cert_source():
428
+ client_cert_source = mtls.default_client_cert_source()
429
+ return client_cert_source
430
+
431
+ @staticmethod
432
+ def _get_api_endpoint(
433
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
434
+ ):
435
+ """Return the API endpoint used by the client.
436
+
437
+ Args:
438
+ api_override (str): The API endpoint override. If specified, this is always
439
+ the return value of this function and the other arguments are not used.
440
+ client_cert_source (bytes): The client certificate source used by the client.
441
+ universe_domain (str): The universe domain used by the client.
442
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
443
+ Possible values are "always", "auto", or "never".
444
+
445
+ Returns:
446
+ str: The API endpoint to be used by the client.
447
+ """
448
+ if api_override is not None:
449
+ api_endpoint = api_override
450
+ elif use_mtls_endpoint == "always" or (
451
+ use_mtls_endpoint == "auto" and client_cert_source
452
+ ):
453
+ _default_universe = ModelServiceClient._DEFAULT_UNIVERSE
454
+ if universe_domain != _default_universe:
455
+ raise MutualTLSChannelError(
456
+ f"mTLS is not supported in any universe other than {_default_universe}."
457
+ )
458
+ api_endpoint = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
459
+ else:
460
+ api_endpoint = ModelServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
461
+ UNIVERSE_DOMAIN=universe_domain
462
+ )
463
+ return api_endpoint
464
+
465
+ @staticmethod
466
+ def _get_universe_domain(
467
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
468
+ ) -> str:
469
+ """Return the universe domain used by the client.
470
+
471
+ Args:
472
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
473
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
474
+
475
+ Returns:
476
+ str: The universe domain to be used by the client.
477
+
478
+ Raises:
479
+ ValueError: If the universe domain is an empty string.
480
+ """
481
+ universe_domain = ModelServiceClient._DEFAULT_UNIVERSE
482
+ if client_universe_domain is not None:
483
+ universe_domain = client_universe_domain
484
+ elif universe_domain_env is not None:
485
+ universe_domain = universe_domain_env
486
+ if len(universe_domain.strip()) == 0:
487
+ raise ValueError("Universe Domain cannot be an empty string.")
488
+ return universe_domain
489
+
490
+ def _validate_universe_domain(self):
491
+ """Validates client's and credentials' universe domains are consistent.
492
+
493
+ Returns:
494
+ bool: True iff the configured universe domain is valid.
495
+
496
+ Raises:
497
+ ValueError: If the configured universe domain is not valid.
498
+ """
499
+
500
+ # NOTE (b/349488459): universe validation is disabled until further notice.
501
+ return True
502
+
503
+ @property
504
+ def api_endpoint(self):
505
+ """Return the API endpoint used by the client instance.
506
+
507
+ Returns:
508
+ str: The API endpoint used by the client instance.
509
+ """
510
+ return self._api_endpoint
511
+
512
+ @property
513
+ def universe_domain(self) -> str:
514
+ """Return the universe domain used by the client instance.
515
+
516
+ Returns:
517
+ str: The universe domain used by the client instance.
518
+ """
519
+ return self._universe_domain
520
+
521
+ def __init__(
522
+ self,
523
+ *,
524
+ credentials: Optional[ga_credentials.Credentials] = None,
525
+ transport: Optional[
526
+ Union[str, ModelServiceTransport, Callable[..., ModelServiceTransport]]
527
+ ] = None,
528
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
529
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
530
+ ) -> None:
531
+ """Instantiates the model service client.
532
+
533
+ Args:
534
+ credentials (Optional[google.auth.credentials.Credentials]): The
535
+ authorization credentials to attach to requests. These
536
+ credentials identify the application to the service; if none
537
+ are specified, the client will attempt to ascertain the
538
+ credentials from the environment.
539
+ transport (Optional[Union[str,ModelServiceTransport,Callable[..., ModelServiceTransport]]]):
540
+ The transport to use, or a Callable that constructs and returns a new transport.
541
+ If a Callable is given, it will be called with the same set of initialization
542
+ arguments as used in the ModelServiceTransport constructor.
543
+ If set to None, a transport is chosen automatically.
544
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
545
+ Custom options for the client.
546
+
547
+ 1. The ``api_endpoint`` property can be used to override the
548
+ default endpoint provided by the client when ``transport`` is
549
+ not explicitly provided. Only if this property is not set and
550
+ ``transport`` was not explicitly provided, the endpoint is
551
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
552
+ variable, which have one of the following values:
553
+ "always" (always use the default mTLS endpoint), "never" (always
554
+ use the default regular endpoint) and "auto" (auto-switch to the
555
+ default mTLS endpoint if client certificate is present; this is
556
+ the default value).
557
+
558
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
559
+ is "true", then the ``client_cert_source`` property can be used
560
+ to provide a client certificate for mTLS transport. If
561
+ not provided, the default SSL client certificate will be used if
562
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
563
+ set, no client certificate will be used.
564
+
565
+ 3. The ``universe_domain`` property can be used to override the
566
+ default "googleapis.com" universe. Note that the ``api_endpoint``
567
+ property still takes precedence; and ``universe_domain`` is
568
+ currently not supported for mTLS.
569
+
570
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
571
+ The client info used to send a user-agent string along with
572
+ API requests. If ``None``, then default info will be used.
573
+ Generally, you only need to set this if you're developing
574
+ your own client library.
575
+
576
+ Raises:
577
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
578
+ creation failed for any reason.
579
+ """
580
+ self._client_options = client_options
581
+ if isinstance(self._client_options, dict):
582
+ self._client_options = client_options_lib.from_dict(self._client_options)
583
+ if self._client_options is None:
584
+ self._client_options = client_options_lib.ClientOptions()
585
+ self._client_options = cast(
586
+ client_options_lib.ClientOptions, self._client_options
587
+ )
588
+
589
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
590
+
591
+ (
592
+ self._use_client_cert,
593
+ self._use_mtls_endpoint,
594
+ self._universe_domain_env,
595
+ ) = ModelServiceClient._read_environment_variables()
596
+ self._client_cert_source = ModelServiceClient._get_client_cert_source(
597
+ self._client_options.client_cert_source, self._use_client_cert
598
+ )
599
+ self._universe_domain = ModelServiceClient._get_universe_domain(
600
+ universe_domain_opt, self._universe_domain_env
601
+ )
602
+ self._api_endpoint = None # updated below, depending on `transport`
603
+
604
+ # Initialize the universe domain validation.
605
+ self._is_universe_domain_valid = False
606
+
607
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
608
+ # Setup logging.
609
+ client_logging.initialize_logging()
610
+
611
+ api_key_value = getattr(self._client_options, "api_key", None)
612
+ if api_key_value and credentials:
613
+ raise ValueError(
614
+ "client_options.api_key and credentials are mutually exclusive"
615
+ )
616
+
617
+ # Save or instantiate the transport.
618
+ # Ordinarily, we provide the transport, but allowing a custom transport
619
+ # instance provides an extensibility point for unusual situations.
620
+ transport_provided = isinstance(transport, ModelServiceTransport)
621
+ if transport_provided:
622
+ # transport is a ModelServiceTransport instance.
623
+ if credentials or self._client_options.credentials_file or api_key_value:
624
+ raise ValueError(
625
+ "When providing a transport instance, "
626
+ "provide its credentials directly."
627
+ )
628
+ if self._client_options.scopes:
629
+ raise ValueError(
630
+ "When providing a transport instance, provide its scopes "
631
+ "directly."
632
+ )
633
+ self._transport = cast(ModelServiceTransport, transport)
634
+ self._api_endpoint = self._transport.host
635
+
636
+ self._api_endpoint = self._api_endpoint or ModelServiceClient._get_api_endpoint(
637
+ self._client_options.api_endpoint,
638
+ self._client_cert_source,
639
+ self._universe_domain,
640
+ self._use_mtls_endpoint,
641
+ )
642
+
643
+ if not transport_provided:
644
+ import google.auth._default # type: ignore
645
+
646
+ if api_key_value and hasattr(
647
+ google.auth._default, "get_api_key_credentials"
648
+ ):
649
+ credentials = google.auth._default.get_api_key_credentials(
650
+ api_key_value
651
+ )
652
+
653
+ transport_init: Union[
654
+ Type[ModelServiceTransport], Callable[..., ModelServiceTransport]
655
+ ] = (
656
+ ModelServiceClient.get_transport_class(transport)
657
+ if isinstance(transport, str) or transport is None
658
+ else cast(Callable[..., ModelServiceTransport], transport)
659
+ )
660
+ # initialize with the provided callable or the passed in class
661
+ self._transport = transport_init(
662
+ credentials=credentials,
663
+ credentials_file=self._client_options.credentials_file,
664
+ host=self._api_endpoint,
665
+ scopes=self._client_options.scopes,
666
+ client_cert_source_for_mtls=self._client_cert_source,
667
+ quota_project_id=self._client_options.quota_project_id,
668
+ client_info=client_info,
669
+ always_use_jwt_access=True,
670
+ api_audience=self._client_options.api_audience,
671
+ )
672
+
673
+ if "async" not in str(self._transport):
674
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
675
+ std_logging.DEBUG
676
+ ): # pragma: NO COVER
677
+ _LOGGER.debug(
678
+ "Created client `google.ai.generativelanguage_v1alpha.ModelServiceClient`.",
679
+ extra={
680
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
681
+ "universeDomain": getattr(
682
+ self._transport._credentials, "universe_domain", ""
683
+ ),
684
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
685
+ "credentialsInfo": getattr(
686
+ self.transport._credentials, "get_cred_info", lambda: None
687
+ )(),
688
+ }
689
+ if hasattr(self._transport, "_credentials")
690
+ else {
691
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
692
+ "credentialsType": None,
693
+ },
694
+ )
695
+
696
+ def get_model(
697
+ self,
698
+ request: Optional[Union[model_service.GetModelRequest, dict]] = None,
699
+ *,
700
+ name: Optional[str] = None,
701
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
702
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
703
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
704
+ ) -> model.Model:
705
+ r"""Gets information about a specific ``Model`` such as its version
706
+ number, token limits,
707
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
708
+ and other metadata. Refer to the `Gemini models
709
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
710
+ for detailed model information.
711
+
712
+ .. code-block:: python
713
+
714
+ # This snippet has been automatically generated and should be regarded as a
715
+ # code template only.
716
+ # It will require modifications to work:
717
+ # - It may require correct/in-range values for request initialization.
718
+ # - It may require specifying regional endpoints when creating the service
719
+ # client as shown in:
720
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
721
+ from google.ai import generativelanguage_v1alpha
722
+
723
+ def sample_get_model():
724
+ # Create a client
725
+ client = generativelanguage_v1alpha.ModelServiceClient()
726
+
727
+ # Initialize request argument(s)
728
+ request = generativelanguage_v1alpha.GetModelRequest(
729
+ name="name_value",
730
+ )
731
+
732
+ # Make the request
733
+ response = client.get_model(request=request)
734
+
735
+ # Handle the response
736
+ print(response)
737
+
738
+ Args:
739
+ request (Union[google.ai.generativelanguage_v1alpha.types.GetModelRequest, dict]):
740
+ The request object. Request for getting information about
741
+ a specific Model.
742
+ name (str):
743
+ Required. The resource name of the model.
744
+
745
+ This name should match a model name returned by the
746
+ ``ListModels`` method.
747
+
748
+ Format: ``models/{model}``
749
+
750
+ This corresponds to the ``name`` field
751
+ on the ``request`` instance; if ``request`` is provided, this
752
+ should not be set.
753
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
754
+ should be retried.
755
+ timeout (float): The timeout for this request.
756
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
757
+ sent along with the request as metadata. Normally, each value must be of type `str`,
758
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
759
+ be of type `bytes`.
760
+
761
+ Returns:
762
+ google.ai.generativelanguage_v1alpha.types.Model:
763
+ Information about a Generative
764
+ Language Model.
765
+
766
+ """
767
+ # Create or coerce a protobuf request object.
768
+ # - Quick check: If we got a request object, we should *not* have
769
+ # gotten any keyword arguments that map to the request.
770
+ has_flattened_params = any([name])
771
+ if request is not None and has_flattened_params:
772
+ raise ValueError(
773
+ "If the `request` argument is set, then none of "
774
+ "the individual field arguments should be set."
775
+ )
776
+
777
+ # - Use the request object if provided (there's no risk of modifying the input as
778
+ # there are no flattened fields), or create one.
779
+ if not isinstance(request, model_service.GetModelRequest):
780
+ request = model_service.GetModelRequest(request)
781
+ # If we have keyword arguments corresponding to fields on the
782
+ # request, apply these.
783
+ if name is not None:
784
+ request.name = name
785
+
786
+ # Wrap the RPC method; this adds retry and timeout information,
787
+ # and friendly error handling.
788
+ rpc = self._transport._wrapped_methods[self._transport.get_model]
789
+
790
+ # Certain fields should be provided within the metadata header;
791
+ # add these here.
792
+ metadata = tuple(metadata) + (
793
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
794
+ )
795
+
796
+ # Validate the universe domain.
797
+ self._validate_universe_domain()
798
+
799
+ # Send the request.
800
+ response = rpc(
801
+ request,
802
+ retry=retry,
803
+ timeout=timeout,
804
+ metadata=metadata,
805
+ )
806
+
807
+ # Done; return the response.
808
+ return response
809
+
810
+ def list_models(
811
+ self,
812
+ request: Optional[Union[model_service.ListModelsRequest, dict]] = None,
813
+ *,
814
+ page_size: Optional[int] = None,
815
+ page_token: Optional[str] = None,
816
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
817
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
818
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
819
+ ) -> pagers.ListModelsPager:
820
+ r"""Lists the
821
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
822
+ available through the Gemini API.
823
+
824
+ .. code-block:: python
825
+
826
+ # This snippet has been automatically generated and should be regarded as a
827
+ # code template only.
828
+ # It will require modifications to work:
829
+ # - It may require correct/in-range values for request initialization.
830
+ # - It may require specifying regional endpoints when creating the service
831
+ # client as shown in:
832
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
833
+ from google.ai import generativelanguage_v1alpha
834
+
835
+ def sample_list_models():
836
+ # Create a client
837
+ client = generativelanguage_v1alpha.ModelServiceClient()
838
+
839
+ # Initialize request argument(s)
840
+ request = generativelanguage_v1alpha.ListModelsRequest(
841
+ )
842
+
843
+ # Make the request
844
+ page_result = client.list_models(request=request)
845
+
846
+ # Handle the response
847
+ for response in page_result:
848
+ print(response)
849
+
850
+ Args:
851
+ request (Union[google.ai.generativelanguage_v1alpha.types.ListModelsRequest, dict]):
852
+ The request object. Request for listing all Models.
853
+ page_size (int):
854
+ The maximum number of ``Models`` to return (per page).
855
+
856
+ If unspecified, 50 models will be returned per page.
857
+ This method returns at most 1000 models per page, even
858
+ if you pass a larger page_size.
859
+
860
+ This corresponds to the ``page_size`` field
861
+ on the ``request`` instance; if ``request`` is provided, this
862
+ should not be set.
863
+ page_token (str):
864
+ A page token, received from a previous ``ListModels``
865
+ call.
866
+
867
+ Provide the ``page_token`` returned by one request as an
868
+ argument to the next request to retrieve the next page.
869
+
870
+ When paginating, all other parameters provided to
871
+ ``ListModels`` must match the call that provided the
872
+ page token.
873
+
874
+ This corresponds to the ``page_token`` field
875
+ on the ``request`` instance; if ``request`` is provided, this
876
+ should not be set.
877
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
878
+ should be retried.
879
+ timeout (float): The timeout for this request.
880
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
881
+ sent along with the request as metadata. Normally, each value must be of type `str`,
882
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
883
+ be of type `bytes`.
884
+
885
+ Returns:
886
+ google.ai.generativelanguage_v1alpha.services.model_service.pagers.ListModelsPager:
887
+ Response from ListModel containing a paginated list of
888
+ Models.
889
+
890
+ Iterating over this object will yield results and
891
+ resolve additional pages automatically.
892
+
893
+ """
894
+ # Create or coerce a protobuf request object.
895
+ # - Quick check: If we got a request object, we should *not* have
896
+ # gotten any keyword arguments that map to the request.
897
+ has_flattened_params = any([page_size, page_token])
898
+ if request is not None and has_flattened_params:
899
+ raise ValueError(
900
+ "If the `request` argument is set, then none of "
901
+ "the individual field arguments should be set."
902
+ )
903
+
904
+ # - Use the request object if provided (there's no risk of modifying the input as
905
+ # there are no flattened fields), or create one.
906
+ if not isinstance(request, model_service.ListModelsRequest):
907
+ request = model_service.ListModelsRequest(request)
908
+ # If we have keyword arguments corresponding to fields on the
909
+ # request, apply these.
910
+ if page_size is not None:
911
+ request.page_size = page_size
912
+ if page_token is not None:
913
+ request.page_token = page_token
914
+
915
+ # Wrap the RPC method; this adds retry and timeout information,
916
+ # and friendly error handling.
917
+ rpc = self._transport._wrapped_methods[self._transport.list_models]
918
+
919
+ # Validate the universe domain.
920
+ self._validate_universe_domain()
921
+
922
+ # Send the request.
923
+ response = rpc(
924
+ request,
925
+ retry=retry,
926
+ timeout=timeout,
927
+ metadata=metadata,
928
+ )
929
+
930
+ # This method is paged; wrap the response in a pager, which provides
931
+ # an `__iter__` convenience method.
932
+ response = pagers.ListModelsPager(
933
+ method=rpc,
934
+ request=request,
935
+ response=response,
936
+ retry=retry,
937
+ timeout=timeout,
938
+ metadata=metadata,
939
+ )
940
+
941
+ # Done; return the response.
942
+ return response
943
+
944
+ def get_tuned_model(
945
+ self,
946
+ request: Optional[Union[model_service.GetTunedModelRequest, dict]] = None,
947
+ *,
948
+ name: Optional[str] = None,
949
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
950
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
951
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
952
+ ) -> tuned_model.TunedModel:
953
+ r"""Gets information about a specific TunedModel.
954
+
955
+ .. code-block:: python
956
+
957
+ # This snippet has been automatically generated and should be regarded as a
958
+ # code template only.
959
+ # It will require modifications to work:
960
+ # - It may require correct/in-range values for request initialization.
961
+ # - It may require specifying regional endpoints when creating the service
962
+ # client as shown in:
963
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
964
+ from google.ai import generativelanguage_v1alpha
965
+
966
+ def sample_get_tuned_model():
967
+ # Create a client
968
+ client = generativelanguage_v1alpha.ModelServiceClient()
969
+
970
+ # Initialize request argument(s)
971
+ request = generativelanguage_v1alpha.GetTunedModelRequest(
972
+ name="name_value",
973
+ )
974
+
975
+ # Make the request
976
+ response = client.get_tuned_model(request=request)
977
+
978
+ # Handle the response
979
+ print(response)
980
+
981
+ Args:
982
+ request (Union[google.ai.generativelanguage_v1alpha.types.GetTunedModelRequest, dict]):
983
+ The request object. Request for getting information about
984
+ a specific Model.
985
+ name (str):
986
+ Required. The resource name of the model.
987
+
988
+ Format: ``tunedModels/my-model-id``
989
+
990
+ This corresponds to the ``name`` field
991
+ on the ``request`` instance; if ``request`` is provided, this
992
+ should not be set.
993
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
994
+ should be retried.
995
+ timeout (float): The timeout for this request.
996
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
997
+ sent along with the request as metadata. Normally, each value must be of type `str`,
998
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
999
+ be of type `bytes`.
1000
+
1001
+ Returns:
1002
+ google.ai.generativelanguage_v1alpha.types.TunedModel:
1003
+ A fine-tuned model created using
1004
+ ModelService.CreateTunedModel.
1005
+
1006
+ """
1007
+ # Create or coerce a protobuf request object.
1008
+ # - Quick check: If we got a request object, we should *not* have
1009
+ # gotten any keyword arguments that map to the request.
1010
+ has_flattened_params = any([name])
1011
+ if request is not None and has_flattened_params:
1012
+ raise ValueError(
1013
+ "If the `request` argument is set, then none of "
1014
+ "the individual field arguments should be set."
1015
+ )
1016
+
1017
+ # - Use the request object if provided (there's no risk of modifying the input as
1018
+ # there are no flattened fields), or create one.
1019
+ if not isinstance(request, model_service.GetTunedModelRequest):
1020
+ request = model_service.GetTunedModelRequest(request)
1021
+ # If we have keyword arguments corresponding to fields on the
1022
+ # request, apply these.
1023
+ if name is not None:
1024
+ request.name = name
1025
+
1026
+ # Wrap the RPC method; this adds retry and timeout information,
1027
+ # and friendly error handling.
1028
+ rpc = self._transport._wrapped_methods[self._transport.get_tuned_model]
1029
+
1030
+ # Certain fields should be provided within the metadata header;
1031
+ # add these here.
1032
+ metadata = tuple(metadata) + (
1033
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1034
+ )
1035
+
1036
+ # Validate the universe domain.
1037
+ self._validate_universe_domain()
1038
+
1039
+ # Send the request.
1040
+ response = rpc(
1041
+ request,
1042
+ retry=retry,
1043
+ timeout=timeout,
1044
+ metadata=metadata,
1045
+ )
1046
+
1047
+ # Done; return the response.
1048
+ return response
1049
+
1050
+ def list_tuned_models(
1051
+ self,
1052
+ request: Optional[Union[model_service.ListTunedModelsRequest, dict]] = None,
1053
+ *,
1054
+ page_size: Optional[int] = None,
1055
+ page_token: Optional[str] = None,
1056
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1057
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1058
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1059
+ ) -> pagers.ListTunedModelsPager:
1060
+ r"""Lists created tuned models.
1061
+
1062
+ .. code-block:: python
1063
+
1064
+ # This snippet has been automatically generated and should be regarded as a
1065
+ # code template only.
1066
+ # It will require modifications to work:
1067
+ # - It may require correct/in-range values for request initialization.
1068
+ # - It may require specifying regional endpoints when creating the service
1069
+ # client as shown in:
1070
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1071
+ from google.ai import generativelanguage_v1alpha
1072
+
1073
+ def sample_list_tuned_models():
1074
+ # Create a client
1075
+ client = generativelanguage_v1alpha.ModelServiceClient()
1076
+
1077
+ # Initialize request argument(s)
1078
+ request = generativelanguage_v1alpha.ListTunedModelsRequest(
1079
+ )
1080
+
1081
+ # Make the request
1082
+ page_result = client.list_tuned_models(request=request)
1083
+
1084
+ # Handle the response
1085
+ for response in page_result:
1086
+ print(response)
1087
+
1088
+ Args:
1089
+ request (Union[google.ai.generativelanguage_v1alpha.types.ListTunedModelsRequest, dict]):
1090
+ The request object. Request for listing TunedModels.
1091
+ page_size (int):
1092
+ Optional. The maximum number of ``TunedModels`` to
1093
+ return (per page). The service may return fewer tuned
1094
+ models.
1095
+
1096
+ If unspecified, at most 10 tuned models will be
1097
+ returned. This method returns at most 1000 models per
1098
+ page, even if you pass a larger page_size.
1099
+
1100
+ This corresponds to the ``page_size`` field
1101
+ on the ``request`` instance; if ``request`` is provided, this
1102
+ should not be set.
1103
+ page_token (str):
1104
+ Optional. A page token, received from a previous
1105
+ ``ListTunedModels`` call.
1106
+
1107
+ Provide the ``page_token`` returned by one request as an
1108
+ argument to the next request to retrieve the next page.
1109
+
1110
+ When paginating, all other parameters provided to
1111
+ ``ListTunedModels`` must match the call that provided
1112
+ the page token.
1113
+
1114
+ This corresponds to the ``page_token`` field
1115
+ on the ``request`` instance; if ``request`` is provided, this
1116
+ should not be set.
1117
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1118
+ should be retried.
1119
+ timeout (float): The timeout for this request.
1120
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1121
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1122
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1123
+ be of type `bytes`.
1124
+
1125
+ Returns:
1126
+ google.ai.generativelanguage_v1alpha.services.model_service.pagers.ListTunedModelsPager:
1127
+ Response from ListTunedModels containing a paginated
1128
+ list of Models.
1129
+
1130
+ Iterating over this object will yield results and
1131
+ resolve additional pages automatically.
1132
+
1133
+ """
1134
+ # Create or coerce a protobuf request object.
1135
+ # - Quick check: If we got a request object, we should *not* have
1136
+ # gotten any keyword arguments that map to the request.
1137
+ has_flattened_params = any([page_size, page_token])
1138
+ if request is not None and has_flattened_params:
1139
+ raise ValueError(
1140
+ "If the `request` argument is set, then none of "
1141
+ "the individual field arguments should be set."
1142
+ )
1143
+
1144
+ # - Use the request object if provided (there's no risk of modifying the input as
1145
+ # there are no flattened fields), or create one.
1146
+ if not isinstance(request, model_service.ListTunedModelsRequest):
1147
+ request = model_service.ListTunedModelsRequest(request)
1148
+ # If we have keyword arguments corresponding to fields on the
1149
+ # request, apply these.
1150
+ if page_size is not None:
1151
+ request.page_size = page_size
1152
+ if page_token is not None:
1153
+ request.page_token = page_token
1154
+
1155
+ # Wrap the RPC method; this adds retry and timeout information,
1156
+ # and friendly error handling.
1157
+ rpc = self._transport._wrapped_methods[self._transport.list_tuned_models]
1158
+
1159
+ # Validate the universe domain.
1160
+ self._validate_universe_domain()
1161
+
1162
+ # Send the request.
1163
+ response = rpc(
1164
+ request,
1165
+ retry=retry,
1166
+ timeout=timeout,
1167
+ metadata=metadata,
1168
+ )
1169
+
1170
+ # This method is paged; wrap the response in a pager, which provides
1171
+ # an `__iter__` convenience method.
1172
+ response = pagers.ListTunedModelsPager(
1173
+ method=rpc,
1174
+ request=request,
1175
+ response=response,
1176
+ retry=retry,
1177
+ timeout=timeout,
1178
+ metadata=metadata,
1179
+ )
1180
+
1181
+ # Done; return the response.
1182
+ return response
1183
+
1184
+ def create_tuned_model(
1185
+ self,
1186
+ request: Optional[Union[model_service.CreateTunedModelRequest, dict]] = None,
1187
+ *,
1188
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
1189
+ tuned_model_id: Optional[str] = None,
1190
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1191
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1192
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1193
+ ) -> operation.Operation:
1194
+ r"""Creates a tuned model. Check intermediate tuning progress (if
1195
+ any) through the [google.longrunning.Operations] service.
1196
+
1197
+ Access status and results through the Operations service.
1198
+ Example: GET /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
1199
+
1200
+ .. code-block:: python
1201
+
1202
+ # This snippet has been automatically generated and should be regarded as a
1203
+ # code template only.
1204
+ # It will require modifications to work:
1205
+ # - It may require correct/in-range values for request initialization.
1206
+ # - It may require specifying regional endpoints when creating the service
1207
+ # client as shown in:
1208
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1209
+ from google.ai import generativelanguage_v1alpha
1210
+
1211
+ def sample_create_tuned_model():
1212
+ # Create a client
1213
+ client = generativelanguage_v1alpha.ModelServiceClient()
1214
+
1215
+ # Initialize request argument(s)
1216
+ request = generativelanguage_v1alpha.CreateTunedModelRequest(
1217
+ )
1218
+
1219
+ # Make the request
1220
+ operation = client.create_tuned_model(request=request)
1221
+
1222
+ print("Waiting for operation to complete...")
1223
+
1224
+ response = operation.result()
1225
+
1226
+ # Handle the response
1227
+ print(response)
1228
+
1229
+ Args:
1230
+ request (Union[google.ai.generativelanguage_v1alpha.types.CreateTunedModelRequest, dict]):
1231
+ The request object. Request to create a TunedModel.
1232
+ tuned_model (google.ai.generativelanguage_v1alpha.types.TunedModel):
1233
+ Required. The tuned model to create.
1234
+ This corresponds to the ``tuned_model`` field
1235
+ on the ``request`` instance; if ``request`` is provided, this
1236
+ should not be set.
1237
+ tuned_model_id (str):
1238
+ Optional. The unique id for the tuned model if
1239
+ specified. This value should be up to 40 characters, the
1240
+ first character must be a letter, the last could be a
1241
+ letter or a number. The id must match the regular
1242
+ expression: ``[a-z]([a-z0-9-]{0,38}[a-z0-9])?``.
1243
+
1244
+ This corresponds to the ``tuned_model_id`` field
1245
+ on the ``request`` instance; if ``request`` is provided, this
1246
+ should not be set.
1247
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1248
+ should be retried.
1249
+ timeout (float): The timeout for this request.
1250
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1251
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1252
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1253
+ be of type `bytes`.
1254
+
1255
+ Returns:
1256
+ google.api_core.operation.Operation:
1257
+ An object representing a long-running operation.
1258
+
1259
+ The result type for the operation will be
1260
+ :class:`google.ai.generativelanguage_v1alpha.types.TunedModel`
1261
+ A fine-tuned model created using
1262
+ ModelService.CreateTunedModel.
1263
+
1264
+ """
1265
+ # Create or coerce a protobuf request object.
1266
+ # - Quick check: If we got a request object, we should *not* have
1267
+ # gotten any keyword arguments that map to the request.
1268
+ has_flattened_params = any([tuned_model, tuned_model_id])
1269
+ if request is not None and has_flattened_params:
1270
+ raise ValueError(
1271
+ "If the `request` argument is set, then none of "
1272
+ "the individual field arguments should be set."
1273
+ )
1274
+
1275
+ # - Use the request object if provided (there's no risk of modifying the input as
1276
+ # there are no flattened fields), or create one.
1277
+ if not isinstance(request, model_service.CreateTunedModelRequest):
1278
+ request = model_service.CreateTunedModelRequest(request)
1279
+ # If we have keyword arguments corresponding to fields on the
1280
+ # request, apply these.
1281
+ if tuned_model is not None:
1282
+ request.tuned_model = tuned_model
1283
+ if tuned_model_id is not None:
1284
+ request.tuned_model_id = tuned_model_id
1285
+
1286
+ # Wrap the RPC method; this adds retry and timeout information,
1287
+ # and friendly error handling.
1288
+ rpc = self._transport._wrapped_methods[self._transport.create_tuned_model]
1289
+
1290
+ # Validate the universe domain.
1291
+ self._validate_universe_domain()
1292
+
1293
+ # Send the request.
1294
+ response = rpc(
1295
+ request,
1296
+ retry=retry,
1297
+ timeout=timeout,
1298
+ metadata=metadata,
1299
+ )
1300
+
1301
+ # Wrap the response in an operation future.
1302
+ response = operation.from_gapic(
1303
+ response,
1304
+ self._transport.operations_client,
1305
+ gag_tuned_model.TunedModel,
1306
+ metadata_type=model_service.CreateTunedModelMetadata,
1307
+ )
1308
+
1309
+ # Done; return the response.
1310
+ return response
1311
+
1312
+ def update_tuned_model(
1313
+ self,
1314
+ request: Optional[Union[model_service.UpdateTunedModelRequest, dict]] = None,
1315
+ *,
1316
+ tuned_model: Optional[gag_tuned_model.TunedModel] = None,
1317
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
1318
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1319
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1320
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1321
+ ) -> gag_tuned_model.TunedModel:
1322
+ r"""Updates a tuned model.
1323
+
1324
+ .. code-block:: python
1325
+
1326
+ # This snippet has been automatically generated and should be regarded as a
1327
+ # code template only.
1328
+ # It will require modifications to work:
1329
+ # - It may require correct/in-range values for request initialization.
1330
+ # - It may require specifying regional endpoints when creating the service
1331
+ # client as shown in:
1332
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1333
+ from google.ai import generativelanguage_v1alpha
1334
+
1335
+ def sample_update_tuned_model():
1336
+ # Create a client
1337
+ client = generativelanguage_v1alpha.ModelServiceClient()
1338
+
1339
+ # Initialize request argument(s)
1340
+ request = generativelanguage_v1alpha.UpdateTunedModelRequest(
1341
+ )
1342
+
1343
+ # Make the request
1344
+ response = client.update_tuned_model(request=request)
1345
+
1346
+ # Handle the response
1347
+ print(response)
1348
+
1349
+ Args:
1350
+ request (Union[google.ai.generativelanguage_v1alpha.types.UpdateTunedModelRequest, dict]):
1351
+ The request object. Request to update a TunedModel.
1352
+ tuned_model (google.ai.generativelanguage_v1alpha.types.TunedModel):
1353
+ Required. The tuned model to update.
1354
+ This corresponds to the ``tuned_model`` field
1355
+ on the ``request`` instance; if ``request`` is provided, this
1356
+ should not be set.
1357
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
1358
+ Optional. The list of fields to
1359
+ update.
1360
+
1361
+ This corresponds to the ``update_mask`` field
1362
+ on the ``request`` instance; if ``request`` is provided, this
1363
+ should not be set.
1364
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1365
+ should be retried.
1366
+ timeout (float): The timeout for this request.
1367
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1368
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1369
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1370
+ be of type `bytes`.
1371
+
1372
+ Returns:
1373
+ google.ai.generativelanguage_v1alpha.types.TunedModel:
1374
+ A fine-tuned model created using
1375
+ ModelService.CreateTunedModel.
1376
+
1377
+ """
1378
+ # Create or coerce a protobuf request object.
1379
+ # - Quick check: If we got a request object, we should *not* have
1380
+ # gotten any keyword arguments that map to the request.
1381
+ has_flattened_params = any([tuned_model, update_mask])
1382
+ if request is not None and has_flattened_params:
1383
+ raise ValueError(
1384
+ "If the `request` argument is set, then none of "
1385
+ "the individual field arguments should be set."
1386
+ )
1387
+
1388
+ # - Use the request object if provided (there's no risk of modifying the input as
1389
+ # there are no flattened fields), or create one.
1390
+ if not isinstance(request, model_service.UpdateTunedModelRequest):
1391
+ request = model_service.UpdateTunedModelRequest(request)
1392
+ # If we have keyword arguments corresponding to fields on the
1393
+ # request, apply these.
1394
+ if tuned_model is not None:
1395
+ request.tuned_model = tuned_model
1396
+ if update_mask is not None:
1397
+ request.update_mask = update_mask
1398
+
1399
+ # Wrap the RPC method; this adds retry and timeout information,
1400
+ # and friendly error handling.
1401
+ rpc = self._transport._wrapped_methods[self._transport.update_tuned_model]
1402
+
1403
+ # Certain fields should be provided within the metadata header;
1404
+ # add these here.
1405
+ metadata = tuple(metadata) + (
1406
+ gapic_v1.routing_header.to_grpc_metadata(
1407
+ (("tuned_model.name", request.tuned_model.name),)
1408
+ ),
1409
+ )
1410
+
1411
+ # Validate the universe domain.
1412
+ self._validate_universe_domain()
1413
+
1414
+ # Send the request.
1415
+ response = rpc(
1416
+ request,
1417
+ retry=retry,
1418
+ timeout=timeout,
1419
+ metadata=metadata,
1420
+ )
1421
+
1422
+ # Done; return the response.
1423
+ return response
1424
+
1425
+ def delete_tuned_model(
1426
+ self,
1427
+ request: Optional[Union[model_service.DeleteTunedModelRequest, dict]] = None,
1428
+ *,
1429
+ name: Optional[str] = None,
1430
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1431
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1432
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1433
+ ) -> None:
1434
+ r"""Deletes a tuned model.
1435
+
1436
+ .. code-block:: python
1437
+
1438
+ # This snippet has been automatically generated and should be regarded as a
1439
+ # code template only.
1440
+ # It will require modifications to work:
1441
+ # - It may require correct/in-range values for request initialization.
1442
+ # - It may require specifying regional endpoints when creating the service
1443
+ # client as shown in:
1444
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1445
+ from google.ai import generativelanguage_v1alpha
1446
+
1447
+ def sample_delete_tuned_model():
1448
+ # Create a client
1449
+ client = generativelanguage_v1alpha.ModelServiceClient()
1450
+
1451
+ # Initialize request argument(s)
1452
+ request = generativelanguage_v1alpha.DeleteTunedModelRequest(
1453
+ name="name_value",
1454
+ )
1455
+
1456
+ # Make the request
1457
+ client.delete_tuned_model(request=request)
1458
+
1459
+ Args:
1460
+ request (Union[google.ai.generativelanguage_v1alpha.types.DeleteTunedModelRequest, dict]):
1461
+ The request object. Request to delete a TunedModel.
1462
+ name (str):
1463
+ Required. The resource name of the model. Format:
1464
+ ``tunedModels/my-model-id``
1465
+
1466
+ This corresponds to the ``name`` field
1467
+ on the ``request`` instance; if ``request`` is provided, this
1468
+ should not be set.
1469
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1470
+ should be retried.
1471
+ timeout (float): The timeout for this request.
1472
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1473
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1474
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1475
+ be of type `bytes`.
1476
+ """
1477
+ # Create or coerce a protobuf request object.
1478
+ # - Quick check: If we got a request object, we should *not* have
1479
+ # gotten any keyword arguments that map to the request.
1480
+ has_flattened_params = any([name])
1481
+ if request is not None and has_flattened_params:
1482
+ raise ValueError(
1483
+ "If the `request` argument is set, then none of "
1484
+ "the individual field arguments should be set."
1485
+ )
1486
+
1487
+ # - Use the request object if provided (there's no risk of modifying the input as
1488
+ # there are no flattened fields), or create one.
1489
+ if not isinstance(request, model_service.DeleteTunedModelRequest):
1490
+ request = model_service.DeleteTunedModelRequest(request)
1491
+ # If we have keyword arguments corresponding to fields on the
1492
+ # request, apply these.
1493
+ if name is not None:
1494
+ request.name = name
1495
+
1496
+ # Wrap the RPC method; this adds retry and timeout information,
1497
+ # and friendly error handling.
1498
+ rpc = self._transport._wrapped_methods[self._transport.delete_tuned_model]
1499
+
1500
+ # Certain fields should be provided within the metadata header;
1501
+ # add these here.
1502
+ metadata = tuple(metadata) + (
1503
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1504
+ )
1505
+
1506
+ # Validate the universe domain.
1507
+ self._validate_universe_domain()
1508
+
1509
+ # Send the request.
1510
+ rpc(
1511
+ request,
1512
+ retry=retry,
1513
+ timeout=timeout,
1514
+ metadata=metadata,
1515
+ )
1516
+
1517
+ def __enter__(self) -> "ModelServiceClient":
1518
+ return self
1519
+
1520
+ def __exit__(self, type, value, traceback):
1521
+ """Releases underlying transport's resources.
1522
+
1523
+ .. warning::
1524
+ ONLY use as a context manager if the transport is NOT shared
1525
+ with other clients! Exiting the with block will CLOSE the transport
1526
+ and may cause errors in other clients!
1527
+ """
1528
+ self.transport.close()
1529
+
1530
+ def list_operations(
1531
+ self,
1532
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
1533
+ *,
1534
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1535
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1536
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1537
+ ) -> operations_pb2.ListOperationsResponse:
1538
+ r"""Lists operations that match the specified filter in the request.
1539
+
1540
+ Args:
1541
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
1542
+ The request object. Request message for
1543
+ `ListOperations` method.
1544
+ retry (google.api_core.retry.Retry): Designation of what errors,
1545
+ if any, should be retried.
1546
+ timeout (float): The timeout for this request.
1547
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1548
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1549
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1550
+ be of type `bytes`.
1551
+ Returns:
1552
+ ~.operations_pb2.ListOperationsResponse:
1553
+ Response message for ``ListOperations`` method.
1554
+ """
1555
+ # Create or coerce a protobuf request object.
1556
+ # The request isn't a proto-plus wrapped type,
1557
+ # so it must be constructed via keyword expansion.
1558
+ if isinstance(request, dict):
1559
+ request = operations_pb2.ListOperationsRequest(**request)
1560
+
1561
+ # Wrap the RPC method; this adds retry and timeout information,
1562
+ # and friendly error handling.
1563
+ rpc = self._transport._wrapped_methods[self._transport.list_operations]
1564
+
1565
+ # Certain fields should be provided within the metadata header;
1566
+ # add these here.
1567
+ metadata = tuple(metadata) + (
1568
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1569
+ )
1570
+
1571
+ # Validate the universe domain.
1572
+ self._validate_universe_domain()
1573
+
1574
+ # Send the request.
1575
+ response = rpc(
1576
+ request,
1577
+ retry=retry,
1578
+ timeout=timeout,
1579
+ metadata=metadata,
1580
+ )
1581
+
1582
+ # Done; return the response.
1583
+ return response
1584
+
1585
+ def get_operation(
1586
+ self,
1587
+ request: Optional[operations_pb2.GetOperationRequest] = None,
1588
+ *,
1589
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1590
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1591
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1592
+ ) -> operations_pb2.Operation:
1593
+ r"""Gets the latest state of a long-running operation.
1594
+
1595
+ Args:
1596
+ request (:class:`~.operations_pb2.GetOperationRequest`):
1597
+ The request object. Request message for
1598
+ `GetOperation` method.
1599
+ retry (google.api_core.retry.Retry): Designation of what errors,
1600
+ if any, should be retried.
1601
+ timeout (float): The timeout for this request.
1602
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1603
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1604
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1605
+ be of type `bytes`.
1606
+ Returns:
1607
+ ~.operations_pb2.Operation:
1608
+ An ``Operation`` object.
1609
+ """
1610
+ # Create or coerce a protobuf request object.
1611
+ # The request isn't a proto-plus wrapped type,
1612
+ # so it must be constructed via keyword expansion.
1613
+ if isinstance(request, dict):
1614
+ request = operations_pb2.GetOperationRequest(**request)
1615
+
1616
+ # Wrap the RPC method; this adds retry and timeout information,
1617
+ # and friendly error handling.
1618
+ rpc = self._transport._wrapped_methods[self._transport.get_operation]
1619
+
1620
+ # Certain fields should be provided within the metadata header;
1621
+ # add these here.
1622
+ metadata = tuple(metadata) + (
1623
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1624
+ )
1625
+
1626
+ # Validate the universe domain.
1627
+ self._validate_universe_domain()
1628
+
1629
+ # Send the request.
1630
+ response = rpc(
1631
+ request,
1632
+ retry=retry,
1633
+ timeout=timeout,
1634
+ metadata=metadata,
1635
+ )
1636
+
1637
+ # Done; return the response.
1638
+ return response
1639
+
1640
+
1641
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1642
+ gapic_version=package_version.__version__
1643
+ )
1644
+
1645
+
1646
+ __all__ = ("ModelServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/pagers.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from typing import (
17
+ Any,
18
+ AsyncIterator,
19
+ Awaitable,
20
+ Callable,
21
+ Iterator,
22
+ Optional,
23
+ Sequence,
24
+ Tuple,
25
+ Union,
26
+ )
27
+
28
+ from google.api_core import gapic_v1
29
+ from google.api_core import retry as retries
30
+ from google.api_core import retry_async as retries_async
31
+
32
+ try:
33
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
34
+ OptionalAsyncRetry = Union[
35
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
36
+ ]
37
+ except AttributeError: # pragma: NO COVER
38
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
39
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
40
+
41
+ from google.ai.generativelanguage_v1alpha.types import model, model_service, tuned_model
42
+
43
+
44
+ class ListModelsPager:
45
+ """A pager for iterating through ``list_models`` requests.
46
+
47
+ This class thinly wraps an initial
48
+ :class:`google.ai.generativelanguage_v1alpha.types.ListModelsResponse` object, and
49
+ provides an ``__iter__`` method to iterate through its
50
+ ``models`` field.
51
+
52
+ If there are more pages, the ``__iter__`` method will make additional
53
+ ``ListModels`` requests and continue to iterate
54
+ through the ``models`` field on the
55
+ corresponding responses.
56
+
57
+ All the usual :class:`google.ai.generativelanguage_v1alpha.types.ListModelsResponse`
58
+ attributes are available on the pager. If multiple requests are made, only
59
+ the most recent response is retained, and thus used for attribute lookup.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ method: Callable[..., model_service.ListModelsResponse],
65
+ request: model_service.ListModelsRequest,
66
+ response: model_service.ListModelsResponse,
67
+ *,
68
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
69
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
70
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
71
+ ):
72
+ """Instantiate the pager.
73
+
74
+ Args:
75
+ method (Callable): The method that was originally called, and
76
+ which instantiated this pager.
77
+ request (google.ai.generativelanguage_v1alpha.types.ListModelsRequest):
78
+ The initial request object.
79
+ response (google.ai.generativelanguage_v1alpha.types.ListModelsResponse):
80
+ The initial response object.
81
+ retry (google.api_core.retry.Retry): Designation of what errors,
82
+ if any, should be retried.
83
+ timeout (float): The timeout for this request.
84
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
85
+ sent along with the request as metadata. Normally, each value must be of type `str`,
86
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
87
+ be of type `bytes`.
88
+ """
89
+ self._method = method
90
+ self._request = model_service.ListModelsRequest(request)
91
+ self._response = response
92
+ self._retry = retry
93
+ self._timeout = timeout
94
+ self._metadata = metadata
95
+
96
+ def __getattr__(self, name: str) -> Any:
97
+ return getattr(self._response, name)
98
+
99
+ @property
100
+ def pages(self) -> Iterator[model_service.ListModelsResponse]:
101
+ yield self._response
102
+ while self._response.next_page_token:
103
+ self._request.page_token = self._response.next_page_token
104
+ self._response = self._method(
105
+ self._request,
106
+ retry=self._retry,
107
+ timeout=self._timeout,
108
+ metadata=self._metadata,
109
+ )
110
+ yield self._response
111
+
112
+ def __iter__(self) -> Iterator[model.Model]:
113
+ for page in self.pages:
114
+ yield from page.models
115
+
116
+ def __repr__(self) -> str:
117
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
118
+
119
+
120
+ class ListModelsAsyncPager:
121
+ """A pager for iterating through ``list_models`` requests.
122
+
123
+ This class thinly wraps an initial
124
+ :class:`google.ai.generativelanguage_v1alpha.types.ListModelsResponse` object, and
125
+ provides an ``__aiter__`` method to iterate through its
126
+ ``models`` field.
127
+
128
+ If there are more pages, the ``__aiter__`` method will make additional
129
+ ``ListModels`` requests and continue to iterate
130
+ through the ``models`` field on the
131
+ corresponding responses.
132
+
133
+ All the usual :class:`google.ai.generativelanguage_v1alpha.types.ListModelsResponse`
134
+ attributes are available on the pager. If multiple requests are made, only
135
+ the most recent response is retained, and thus used for attribute lookup.
136
+ """
137
+
138
+ def __init__(
139
+ self,
140
+ method: Callable[..., Awaitable[model_service.ListModelsResponse]],
141
+ request: model_service.ListModelsRequest,
142
+ response: model_service.ListModelsResponse,
143
+ *,
144
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
145
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
146
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
147
+ ):
148
+ """Instantiates the pager.
149
+
150
+ Args:
151
+ method (Callable): The method that was originally called, and
152
+ which instantiated this pager.
153
+ request (google.ai.generativelanguage_v1alpha.types.ListModelsRequest):
154
+ The initial request object.
155
+ response (google.ai.generativelanguage_v1alpha.types.ListModelsResponse):
156
+ The initial response object.
157
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
158
+ if any, should be retried.
159
+ timeout (float): The timeout for this request.
160
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
161
+ sent along with the request as metadata. Normally, each value must be of type `str`,
162
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
163
+ be of type `bytes`.
164
+ """
165
+ self._method = method
166
+ self._request = model_service.ListModelsRequest(request)
167
+ self._response = response
168
+ self._retry = retry
169
+ self._timeout = timeout
170
+ self._metadata = metadata
171
+
172
+ def __getattr__(self, name: str) -> Any:
173
+ return getattr(self._response, name)
174
+
175
+ @property
176
+ async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]:
177
+ yield self._response
178
+ while self._response.next_page_token:
179
+ self._request.page_token = self._response.next_page_token
180
+ self._response = await self._method(
181
+ self._request,
182
+ retry=self._retry,
183
+ timeout=self._timeout,
184
+ metadata=self._metadata,
185
+ )
186
+ yield self._response
187
+
188
+ def __aiter__(self) -> AsyncIterator[model.Model]:
189
+ async def async_generator():
190
+ async for page in self.pages:
191
+ for response in page.models:
192
+ yield response
193
+
194
+ return async_generator()
195
+
196
+ def __repr__(self) -> str:
197
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
198
+
199
+
200
+ class ListTunedModelsPager:
201
+ """A pager for iterating through ``list_tuned_models`` requests.
202
+
203
+ This class thinly wraps an initial
204
+ :class:`google.ai.generativelanguage_v1alpha.types.ListTunedModelsResponse` object, and
205
+ provides an ``__iter__`` method to iterate through its
206
+ ``tuned_models`` field.
207
+
208
+ If there are more pages, the ``__iter__`` method will make additional
209
+ ``ListTunedModels`` requests and continue to iterate
210
+ through the ``tuned_models`` field on the
211
+ corresponding responses.
212
+
213
+ All the usual :class:`google.ai.generativelanguage_v1alpha.types.ListTunedModelsResponse`
214
+ attributes are available on the pager. If multiple requests are made, only
215
+ the most recent response is retained, and thus used for attribute lookup.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ method: Callable[..., model_service.ListTunedModelsResponse],
221
+ request: model_service.ListTunedModelsRequest,
222
+ response: model_service.ListTunedModelsResponse,
223
+ *,
224
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
225
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
226
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
227
+ ):
228
+ """Instantiate the pager.
229
+
230
+ Args:
231
+ method (Callable): The method that was originally called, and
232
+ which instantiated this pager.
233
+ request (google.ai.generativelanguage_v1alpha.types.ListTunedModelsRequest):
234
+ The initial request object.
235
+ response (google.ai.generativelanguage_v1alpha.types.ListTunedModelsResponse):
236
+ The initial response object.
237
+ retry (google.api_core.retry.Retry): Designation of what errors,
238
+ if any, should be retried.
239
+ timeout (float): The timeout for this request.
240
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
241
+ sent along with the request as metadata. Normally, each value must be of type `str`,
242
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
243
+ be of type `bytes`.
244
+ """
245
+ self._method = method
246
+ self._request = model_service.ListTunedModelsRequest(request)
247
+ self._response = response
248
+ self._retry = retry
249
+ self._timeout = timeout
250
+ self._metadata = metadata
251
+
252
+ def __getattr__(self, name: str) -> Any:
253
+ return getattr(self._response, name)
254
+
255
+ @property
256
+ def pages(self) -> Iterator[model_service.ListTunedModelsResponse]:
257
+ yield self._response
258
+ while self._response.next_page_token:
259
+ self._request.page_token = self._response.next_page_token
260
+ self._response = self._method(
261
+ self._request,
262
+ retry=self._retry,
263
+ timeout=self._timeout,
264
+ metadata=self._metadata,
265
+ )
266
+ yield self._response
267
+
268
+ def __iter__(self) -> Iterator[tuned_model.TunedModel]:
269
+ for page in self.pages:
270
+ yield from page.tuned_models
271
+
272
+ def __repr__(self) -> str:
273
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
274
+
275
+
276
+ class ListTunedModelsAsyncPager:
277
+ """A pager for iterating through ``list_tuned_models`` requests.
278
+
279
+ This class thinly wraps an initial
280
+ :class:`google.ai.generativelanguage_v1alpha.types.ListTunedModelsResponse` object, and
281
+ provides an ``__aiter__`` method to iterate through its
282
+ ``tuned_models`` field.
283
+
284
+ If there are more pages, the ``__aiter__`` method will make additional
285
+ ``ListTunedModels`` requests and continue to iterate
286
+ through the ``tuned_models`` field on the
287
+ corresponding responses.
288
+
289
+ All the usual :class:`google.ai.generativelanguage_v1alpha.types.ListTunedModelsResponse`
290
+ attributes are available on the pager. If multiple requests are made, only
291
+ the most recent response is retained, and thus used for attribute lookup.
292
+ """
293
+
294
+ def __init__(
295
+ self,
296
+ method: Callable[..., Awaitable[model_service.ListTunedModelsResponse]],
297
+ request: model_service.ListTunedModelsRequest,
298
+ response: model_service.ListTunedModelsResponse,
299
+ *,
300
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
301
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
302
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
303
+ ):
304
+ """Instantiates the pager.
305
+
306
+ Args:
307
+ method (Callable): The method that was originally called, and
308
+ which instantiated this pager.
309
+ request (google.ai.generativelanguage_v1alpha.types.ListTunedModelsRequest):
310
+ The initial request object.
311
+ response (google.ai.generativelanguage_v1alpha.types.ListTunedModelsResponse):
312
+ The initial response object.
313
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
314
+ if any, should be retried.
315
+ timeout (float): The timeout for this request.
316
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
317
+ sent along with the request as metadata. Normally, each value must be of type `str`,
318
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
319
+ be of type `bytes`.
320
+ """
321
+ self._method = method
322
+ self._request = model_service.ListTunedModelsRequest(request)
323
+ self._response = response
324
+ self._retry = retry
325
+ self._timeout = timeout
326
+ self._metadata = metadata
327
+
328
+ def __getattr__(self, name: str) -> Any:
329
+ return getattr(self._response, name)
330
+
331
+ @property
332
+ async def pages(self) -> AsyncIterator[model_service.ListTunedModelsResponse]:
333
+ yield self._response
334
+ while self._response.next_page_token:
335
+ self._request.page_token = self._response.next_page_token
336
+ self._response = await self._method(
337
+ self._request,
338
+ retry=self._retry,
339
+ timeout=self._timeout,
340
+ metadata=self._metadata,
341
+ )
342
+ yield self._response
343
+
344
+ def __aiter__(self) -> AsyncIterator[tuned_model.TunedModel]:
345
+ async def async_generator():
346
+ async for page in self.pages:
347
+ for response in page.tuned_models:
348
+ yield response
349
+
350
+ return async_generator()
351
+
352
+ def __repr__(self) -> str:
353
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import ModelServiceTransport
20
+ from .grpc import ModelServiceGrpcTransport
21
+ from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport
22
+ from .rest import ModelServiceRestInterceptor, ModelServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
26
+ _transport_registry["grpc"] = ModelServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = ModelServiceRestTransport
29
+
30
+ __all__ = (
31
+ "ModelServiceTransport",
32
+ "ModelServiceGrpcTransport",
33
+ "ModelServiceGrpcAsyncIOTransport",
34
+ "ModelServiceRestTransport",
35
+ "ModelServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (896 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (11.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (25.5 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (28.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (68.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (22 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/base.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1, operations_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+ from google.protobuf import empty_pb2 # type: ignore
28
+
29
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
30
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
31
+ from google.ai.generativelanguage_v1alpha.types import model, model_service
32
+ from google.ai.generativelanguage_v1alpha.types import tuned_model
33
+
34
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
35
+ gapic_version=package_version.__version__
36
+ )
37
+
38
+
39
+ class ModelServiceTransport(abc.ABC):
40
+ """Abstract transport class for ModelService."""
41
+
42
+ AUTH_SCOPES = ()
43
+
44
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
45
+
46
+ def __init__(
47
+ self,
48
+ *,
49
+ host: str = DEFAULT_HOST,
50
+ credentials: Optional[ga_credentials.Credentials] = None,
51
+ credentials_file: Optional[str] = None,
52
+ scopes: Optional[Sequence[str]] = None,
53
+ quota_project_id: Optional[str] = None,
54
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
55
+ always_use_jwt_access: Optional[bool] = False,
56
+ api_audience: Optional[str] = None,
57
+ **kwargs,
58
+ ) -> None:
59
+ """Instantiate the transport.
60
+
61
+ Args:
62
+ host (Optional[str]):
63
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
64
+ credentials (Optional[google.auth.credentials.Credentials]): The
65
+ authorization credentials to attach to requests. These
66
+ credentials identify the application to the service; if none
67
+ are specified, the client will attempt to ascertain the
68
+ credentials from the environment.
69
+ credentials_file (Optional[str]): A file with credentials that can
70
+ be loaded with :func:`google.auth.load_credentials_from_file`.
71
+ This argument is mutually exclusive with credentials.
72
+ scopes (Optional[Sequence[str]]): A list of scopes.
73
+ quota_project_id (Optional[str]): An optional project to use for billing
74
+ and quota.
75
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
76
+ The client info used to send a user-agent string along with
77
+ API requests. If ``None``, then default info will be used.
78
+ Generally, you only need to set this if you're developing
79
+ your own client library.
80
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
81
+ be used for service account credentials.
82
+ """
83
+
84
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
85
+
86
+ # Save the scopes.
87
+ self._scopes = scopes
88
+ if not hasattr(self, "_ignore_credentials"):
89
+ self._ignore_credentials: bool = False
90
+
91
+ # If no credentials are provided, then determine the appropriate
92
+ # defaults.
93
+ if credentials and credentials_file:
94
+ raise core_exceptions.DuplicateCredentialArgs(
95
+ "'credentials_file' and 'credentials' are mutually exclusive"
96
+ )
97
+
98
+ if credentials_file is not None:
99
+ credentials, _ = google.auth.load_credentials_from_file(
100
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
101
+ )
102
+ elif credentials is None and not self._ignore_credentials:
103
+ credentials, _ = google.auth.default(
104
+ **scopes_kwargs, quota_project_id=quota_project_id
105
+ )
106
+ # Don't apply audience if the credentials file passed from user.
107
+ if hasattr(credentials, "with_gdch_audience"):
108
+ credentials = credentials.with_gdch_audience(
109
+ api_audience if api_audience else host
110
+ )
111
+
112
+ # If the credentials are service account credentials, then always try to use self signed JWT.
113
+ if (
114
+ always_use_jwt_access
115
+ and isinstance(credentials, service_account.Credentials)
116
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
117
+ ):
118
+ credentials = credentials.with_always_use_jwt_access(True)
119
+
120
+ # Save the credentials.
121
+ self._credentials = credentials
122
+
123
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
124
+ if ":" not in host:
125
+ host += ":443"
126
+ self._host = host
127
+
128
+ @property
129
+ def host(self):
130
+ return self._host
131
+
132
+ def _prep_wrapped_messages(self, client_info):
133
+ # Precompute the wrapped methods.
134
+ self._wrapped_methods = {
135
+ self.get_model: gapic_v1.method.wrap_method(
136
+ self.get_model,
137
+ default_timeout=None,
138
+ client_info=client_info,
139
+ ),
140
+ self.list_models: gapic_v1.method.wrap_method(
141
+ self.list_models,
142
+ default_timeout=None,
143
+ client_info=client_info,
144
+ ),
145
+ self.get_tuned_model: gapic_v1.method.wrap_method(
146
+ self.get_tuned_model,
147
+ default_timeout=None,
148
+ client_info=client_info,
149
+ ),
150
+ self.list_tuned_models: gapic_v1.method.wrap_method(
151
+ self.list_tuned_models,
152
+ default_timeout=None,
153
+ client_info=client_info,
154
+ ),
155
+ self.create_tuned_model: gapic_v1.method.wrap_method(
156
+ self.create_tuned_model,
157
+ default_timeout=None,
158
+ client_info=client_info,
159
+ ),
160
+ self.update_tuned_model: gapic_v1.method.wrap_method(
161
+ self.update_tuned_model,
162
+ default_timeout=None,
163
+ client_info=client_info,
164
+ ),
165
+ self.delete_tuned_model: gapic_v1.method.wrap_method(
166
+ self.delete_tuned_model,
167
+ default_timeout=None,
168
+ client_info=client_info,
169
+ ),
170
+ self.get_operation: gapic_v1.method.wrap_method(
171
+ self.get_operation,
172
+ default_timeout=None,
173
+ client_info=client_info,
174
+ ),
175
+ self.list_operations: gapic_v1.method.wrap_method(
176
+ self.list_operations,
177
+ default_timeout=None,
178
+ client_info=client_info,
179
+ ),
180
+ }
181
+
182
+ def close(self):
183
+ """Closes resources associated with the transport.
184
+
185
+ .. warning::
186
+ Only call this method if the transport is NOT shared
187
+ with other clients - this may cause errors in other clients!
188
+ """
189
+ raise NotImplementedError()
190
+
191
+ @property
192
+ def operations_client(self):
193
+ """Return the client designed to process long-running operations."""
194
+ raise NotImplementedError()
195
+
196
+ @property
197
+ def get_model(
198
+ self,
199
+ ) -> Callable[
200
+ [model_service.GetModelRequest], Union[model.Model, Awaitable[model.Model]]
201
+ ]:
202
+ raise NotImplementedError()
203
+
204
+ @property
205
+ def list_models(
206
+ self,
207
+ ) -> Callable[
208
+ [model_service.ListModelsRequest],
209
+ Union[
210
+ model_service.ListModelsResponse,
211
+ Awaitable[model_service.ListModelsResponse],
212
+ ],
213
+ ]:
214
+ raise NotImplementedError()
215
+
216
+ @property
217
+ def get_tuned_model(
218
+ self,
219
+ ) -> Callable[
220
+ [model_service.GetTunedModelRequest],
221
+ Union[tuned_model.TunedModel, Awaitable[tuned_model.TunedModel]],
222
+ ]:
223
+ raise NotImplementedError()
224
+
225
+ @property
226
+ def list_tuned_models(
227
+ self,
228
+ ) -> Callable[
229
+ [model_service.ListTunedModelsRequest],
230
+ Union[
231
+ model_service.ListTunedModelsResponse,
232
+ Awaitable[model_service.ListTunedModelsResponse],
233
+ ],
234
+ ]:
235
+ raise NotImplementedError()
236
+
237
+ @property
238
+ def create_tuned_model(
239
+ self,
240
+ ) -> Callable[
241
+ [model_service.CreateTunedModelRequest],
242
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
243
+ ]:
244
+ raise NotImplementedError()
245
+
246
+ @property
247
+ def update_tuned_model(
248
+ self,
249
+ ) -> Callable[
250
+ [model_service.UpdateTunedModelRequest],
251
+ Union[gag_tuned_model.TunedModel, Awaitable[gag_tuned_model.TunedModel]],
252
+ ]:
253
+ raise NotImplementedError()
254
+
255
+ @property
256
+ def delete_tuned_model(
257
+ self,
258
+ ) -> Callable[
259
+ [model_service.DeleteTunedModelRequest],
260
+ Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
261
+ ]:
262
+ raise NotImplementedError()
263
+
264
+ @property
265
+ def list_operations(
266
+ self,
267
+ ) -> Callable[
268
+ [operations_pb2.ListOperationsRequest],
269
+ Union[
270
+ operations_pb2.ListOperationsResponse,
271
+ Awaitable[operations_pb2.ListOperationsResponse],
272
+ ],
273
+ ]:
274
+ raise NotImplementedError()
275
+
276
+ @property
277
+ def get_operation(
278
+ self,
279
+ ) -> Callable[
280
+ [operations_pb2.GetOperationRequest],
281
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
282
+ ]:
283
+ raise NotImplementedError()
284
+
285
+ @property
286
+ def kind(self) -> str:
287
+ raise NotImplementedError()
288
+
289
+
290
+ __all__ = ("ModelServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/grpc.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json
17
+ import logging as std_logging
18
+ import pickle
19
+ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, grpc_helpers, operations_v1
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.auth.transport.grpc import SslCredentials # type: ignore
26
+ from google.longrunning import operations_pb2 # type: ignore
27
+ from google.protobuf import empty_pb2 # type: ignore
28
+ from google.protobuf.json_format import MessageToJson
29
+ import google.protobuf.message
30
+ import grpc # type: ignore
31
+ import proto # type: ignore
32
+
33
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
34
+ from google.ai.generativelanguage_v1alpha.types import model, model_service
35
+ from google.ai.generativelanguage_v1alpha.types import tuned_model
36
+
37
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
38
+
39
+ try:
40
+ from google.api_core import client_logging # type: ignore
41
+
42
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
43
+ except ImportError: # pragma: NO COVER
44
+ CLIENT_LOGGING_SUPPORTED = False
45
+
46
+ _LOGGER = std_logging.getLogger(__name__)
47
+
48
+
49
+ class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
50
+ def intercept_unary_unary(self, continuation, client_call_details, request):
51
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
52
+ std_logging.DEBUG
53
+ )
54
+ if logging_enabled: # pragma: NO COVER
55
+ request_metadata = client_call_details.metadata
56
+ if isinstance(request, proto.Message):
57
+ request_payload = type(request).to_json(request)
58
+ elif isinstance(request, google.protobuf.message.Message):
59
+ request_payload = MessageToJson(request)
60
+ else:
61
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
62
+
63
+ request_metadata = {
64
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
65
+ for key, value in request_metadata
66
+ }
67
+ grpc_request = {
68
+ "payload": request_payload,
69
+ "requestMethod": "grpc",
70
+ "metadata": dict(request_metadata),
71
+ }
72
+ _LOGGER.debug(
73
+ f"Sending request for {client_call_details.method}",
74
+ extra={
75
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
76
+ "rpcName": client_call_details.method,
77
+ "request": grpc_request,
78
+ "metadata": grpc_request["metadata"],
79
+ },
80
+ )
81
+
82
+ response = continuation(client_call_details, request)
83
+ if logging_enabled: # pragma: NO COVER
84
+ response_metadata = response.trailing_metadata()
85
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
86
+ metadata = (
87
+ dict([(k, str(v)) for k, v in response_metadata])
88
+ if response_metadata
89
+ else None
90
+ )
91
+ result = response.result()
92
+ if isinstance(result, proto.Message):
93
+ response_payload = type(result).to_json(result)
94
+ elif isinstance(result, google.protobuf.message.Message):
95
+ response_payload = MessageToJson(result)
96
+ else:
97
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
98
+ grpc_response = {
99
+ "payload": response_payload,
100
+ "metadata": metadata,
101
+ "status": "OK",
102
+ }
103
+ _LOGGER.debug(
104
+ f"Received response for {client_call_details.method}.",
105
+ extra={
106
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
107
+ "rpcName": client_call_details.method,
108
+ "response": grpc_response,
109
+ "metadata": grpc_response["metadata"],
110
+ },
111
+ )
112
+ return response
113
+
114
+
115
+ class ModelServiceGrpcTransport(ModelServiceTransport):
116
+ """gRPC backend transport for ModelService.
117
+
118
+ Provides methods for getting metadata information about
119
+ Generative Models.
120
+
121
+ This class defines the same methods as the primary client, so the
122
+ primary client can load the underlying transport implementation
123
+ and call it.
124
+
125
+ It sends protocol buffers over the wire using gRPC (which is built on
126
+ top of HTTP/2); the ``grpcio`` package must be installed.
127
+ """
128
+
129
+ _stubs: Dict[str, Callable]
130
+
131
+ def __init__(
132
+ self,
133
+ *,
134
+ host: str = "generativelanguage.googleapis.com",
135
+ credentials: Optional[ga_credentials.Credentials] = None,
136
+ credentials_file: Optional[str] = None,
137
+ scopes: Optional[Sequence[str]] = None,
138
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
139
+ api_mtls_endpoint: Optional[str] = None,
140
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
141
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
142
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
143
+ quota_project_id: Optional[str] = None,
144
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
145
+ always_use_jwt_access: Optional[bool] = False,
146
+ api_audience: Optional[str] = None,
147
+ ) -> None:
148
+ """Instantiate the transport.
149
+
150
+ Args:
151
+ host (Optional[str]):
152
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
153
+ credentials (Optional[google.auth.credentials.Credentials]): The
154
+ authorization credentials to attach to requests. These
155
+ credentials identify the application to the service; if none
156
+ are specified, the client will attempt to ascertain the
157
+ credentials from the environment.
158
+ This argument is ignored if a ``channel`` instance is provided.
159
+ credentials_file (Optional[str]): A file with credentials that can
160
+ be loaded with :func:`google.auth.load_credentials_from_file`.
161
+ This argument is ignored if a ``channel`` instance is provided.
162
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
163
+ ignored if a ``channel`` instance is provided.
164
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
165
+ A ``Channel`` instance through which to make calls, or a Callable
166
+ that constructs and returns one. If set to None, ``self.create_channel``
167
+ is used to create the channel. If a Callable is given, it will be called
168
+ with the same arguments as used in ``self.create_channel``.
169
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
170
+ If provided, it overrides the ``host`` argument and tries to create
171
+ a mutual TLS channel with client SSL credentials from
172
+ ``client_cert_source`` or application default SSL credentials.
173
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
174
+ Deprecated. A callback to provide client SSL certificate bytes and
175
+ private key bytes, both in PEM format. It is ignored if
176
+ ``api_mtls_endpoint`` is None.
177
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
178
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
179
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
180
+ A callback to provide client certificate bytes and private key bytes,
181
+ both in PEM format. It is used to configure a mutual TLS channel. It is
182
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
183
+ quota_project_id (Optional[str]): An optional project to use for billing
184
+ and quota.
185
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
186
+ The client info used to send a user-agent string along with
187
+ API requests. If ``None``, then default info will be used.
188
+ Generally, you only need to set this if you're developing
189
+ your own client library.
190
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
191
+ be used for service account credentials.
192
+
193
+ Raises:
194
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
195
+ creation failed for any reason.
196
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
197
+ and ``credentials_file`` are passed.
198
+ """
199
+ self._grpc_channel = None
200
+ self._ssl_channel_credentials = ssl_channel_credentials
201
+ self._stubs: Dict[str, Callable] = {}
202
+ self._operations_client: Optional[operations_v1.OperationsClient] = None
203
+
204
+ if api_mtls_endpoint:
205
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
206
+ if client_cert_source:
207
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
208
+
209
+ if isinstance(channel, grpc.Channel):
210
+ # Ignore credentials if a channel was passed.
211
+ credentials = None
212
+ self._ignore_credentials = True
213
+ # If a channel was explicitly provided, set it.
214
+ self._grpc_channel = channel
215
+ self._ssl_channel_credentials = None
216
+
217
+ else:
218
+ if api_mtls_endpoint:
219
+ host = api_mtls_endpoint
220
+
221
+ # Create SSL credentials with client_cert_source or application
222
+ # default SSL credentials.
223
+ if client_cert_source:
224
+ cert, key = client_cert_source()
225
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
226
+ certificate_chain=cert, private_key=key
227
+ )
228
+ else:
229
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
230
+
231
+ else:
232
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
233
+ cert, key = client_cert_source_for_mtls()
234
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
235
+ certificate_chain=cert, private_key=key
236
+ )
237
+
238
+ # The base transport sets the host, credentials and scopes
239
+ super().__init__(
240
+ host=host,
241
+ credentials=credentials,
242
+ credentials_file=credentials_file,
243
+ scopes=scopes,
244
+ quota_project_id=quota_project_id,
245
+ client_info=client_info,
246
+ always_use_jwt_access=always_use_jwt_access,
247
+ api_audience=api_audience,
248
+ )
249
+
250
+ if not self._grpc_channel:
251
+ # initialize with the provided callable or the default channel
252
+ channel_init = channel or type(self).create_channel
253
+ self._grpc_channel = channel_init(
254
+ self._host,
255
+ # use the credentials which are saved
256
+ credentials=self._credentials,
257
+ # Set ``credentials_file`` to ``None`` here as
258
+ # the credentials that we saved earlier should be used.
259
+ credentials_file=None,
260
+ scopes=self._scopes,
261
+ ssl_credentials=self._ssl_channel_credentials,
262
+ quota_project_id=quota_project_id,
263
+ options=[
264
+ ("grpc.max_send_message_length", -1),
265
+ ("grpc.max_receive_message_length", -1),
266
+ ],
267
+ )
268
+
269
+ self._interceptor = _LoggingClientInterceptor()
270
+ self._logged_channel = grpc.intercept_channel(
271
+ self._grpc_channel, self._interceptor
272
+ )
273
+
274
+ # Wrap messages. This must be done after self._logged_channel exists
275
+ self._prep_wrapped_messages(client_info)
276
+
277
+ @classmethod
278
+ def create_channel(
279
+ cls,
280
+ host: str = "generativelanguage.googleapis.com",
281
+ credentials: Optional[ga_credentials.Credentials] = None,
282
+ credentials_file: Optional[str] = None,
283
+ scopes: Optional[Sequence[str]] = None,
284
+ quota_project_id: Optional[str] = None,
285
+ **kwargs,
286
+ ) -> grpc.Channel:
287
+ """Create and return a gRPC channel object.
288
+ Args:
289
+ host (Optional[str]): The host for the channel to use.
290
+ credentials (Optional[~.Credentials]): The
291
+ authorization credentials to attach to requests. These
292
+ credentials identify this application to the service. If
293
+ none are specified, the client will attempt to ascertain
294
+ the credentials from the environment.
295
+ credentials_file (Optional[str]): A file with credentials that can
296
+ be loaded with :func:`google.auth.load_credentials_from_file`.
297
+ This argument is mutually exclusive with credentials.
298
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
299
+ service. These are only used when credentials are not specified and
300
+ are passed to :func:`google.auth.default`.
301
+ quota_project_id (Optional[str]): An optional project to use for billing
302
+ and quota.
303
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
304
+ channel creation.
305
+ Returns:
306
+ grpc.Channel: A gRPC channel object.
307
+
308
+ Raises:
309
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
310
+ and ``credentials_file`` are passed.
311
+ """
312
+
313
+ return grpc_helpers.create_channel(
314
+ host,
315
+ credentials=credentials,
316
+ credentials_file=credentials_file,
317
+ quota_project_id=quota_project_id,
318
+ default_scopes=cls.AUTH_SCOPES,
319
+ scopes=scopes,
320
+ default_host=cls.DEFAULT_HOST,
321
+ **kwargs,
322
+ )
323
+
324
+ @property
325
+ def grpc_channel(self) -> grpc.Channel:
326
+ """Return the channel designed to connect to this service."""
327
+ return self._grpc_channel
328
+
329
+ @property
330
+ def operations_client(self) -> operations_v1.OperationsClient:
331
+ """Create the client designed to process long-running operations.
332
+
333
+ This property caches on the instance; repeated calls return the same
334
+ client.
335
+ """
336
+ # Quick check: Only create a new client if we do not already have one.
337
+ if self._operations_client is None:
338
+ self._operations_client = operations_v1.OperationsClient(
339
+ self._logged_channel
340
+ )
341
+
342
+ # Return the client from cache.
343
+ return self._operations_client
344
+
345
+ @property
346
+ def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
347
+ r"""Return a callable for the get model method over gRPC.
348
+
349
+ Gets information about a specific ``Model`` such as its version
350
+ number, token limits,
351
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
352
+ and other metadata. Refer to the `Gemini models
353
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
354
+ for detailed model information.
355
+
356
+ Returns:
357
+ Callable[[~.GetModelRequest],
358
+ ~.Model]:
359
+ A function that, when called, will call the underlying RPC
360
+ on the server.
361
+ """
362
+ # Generate a "stub function" on-the-fly which will actually make
363
+ # the request.
364
+ # gRPC handles serialization and deserialization, so we just need
365
+ # to pass in the functions for each.
366
+ if "get_model" not in self._stubs:
367
+ self._stubs["get_model"] = self._logged_channel.unary_unary(
368
+ "/google.ai.generativelanguage.v1alpha.ModelService/GetModel",
369
+ request_serializer=model_service.GetModelRequest.serialize,
370
+ response_deserializer=model.Model.deserialize,
371
+ )
372
+ return self._stubs["get_model"]
373
+
374
+ @property
375
+ def list_models(
376
+ self,
377
+ ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
378
+ r"""Return a callable for the list models method over gRPC.
379
+
380
+ Lists the
381
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
382
+ available through the Gemini API.
383
+
384
+ Returns:
385
+ Callable[[~.ListModelsRequest],
386
+ ~.ListModelsResponse]:
387
+ A function that, when called, will call the underlying RPC
388
+ on the server.
389
+ """
390
+ # Generate a "stub function" on-the-fly which will actually make
391
+ # the request.
392
+ # gRPC handles serialization and deserialization, so we just need
393
+ # to pass in the functions for each.
394
+ if "list_models" not in self._stubs:
395
+ self._stubs["list_models"] = self._logged_channel.unary_unary(
396
+ "/google.ai.generativelanguage.v1alpha.ModelService/ListModels",
397
+ request_serializer=model_service.ListModelsRequest.serialize,
398
+ response_deserializer=model_service.ListModelsResponse.deserialize,
399
+ )
400
+ return self._stubs["list_models"]
401
+
402
+ @property
403
+ def get_tuned_model(
404
+ self,
405
+ ) -> Callable[[model_service.GetTunedModelRequest], tuned_model.TunedModel]:
406
+ r"""Return a callable for the get tuned model method over gRPC.
407
+
408
+ Gets information about a specific TunedModel.
409
+
410
+ Returns:
411
+ Callable[[~.GetTunedModelRequest],
412
+ ~.TunedModel]:
413
+ A function that, when called, will call the underlying RPC
414
+ on the server.
415
+ """
416
+ # Generate a "stub function" on-the-fly which will actually make
417
+ # the request.
418
+ # gRPC handles serialization and deserialization, so we just need
419
+ # to pass in the functions for each.
420
+ if "get_tuned_model" not in self._stubs:
421
+ self._stubs["get_tuned_model"] = self._logged_channel.unary_unary(
422
+ "/google.ai.generativelanguage.v1alpha.ModelService/GetTunedModel",
423
+ request_serializer=model_service.GetTunedModelRequest.serialize,
424
+ response_deserializer=tuned_model.TunedModel.deserialize,
425
+ )
426
+ return self._stubs["get_tuned_model"]
427
+
428
+ @property
429
+ def list_tuned_models(
430
+ self,
431
+ ) -> Callable[
432
+ [model_service.ListTunedModelsRequest], model_service.ListTunedModelsResponse
433
+ ]:
434
+ r"""Return a callable for the list tuned models method over gRPC.
435
+
436
+ Lists created tuned models.
437
+
438
+ Returns:
439
+ Callable[[~.ListTunedModelsRequest],
440
+ ~.ListTunedModelsResponse]:
441
+ A function that, when called, will call the underlying RPC
442
+ on the server.
443
+ """
444
+ # Generate a "stub function" on-the-fly which will actually make
445
+ # the request.
446
+ # gRPC handles serialization and deserialization, so we just need
447
+ # to pass in the functions for each.
448
+ if "list_tuned_models" not in self._stubs:
449
+ self._stubs["list_tuned_models"] = self._logged_channel.unary_unary(
450
+ "/google.ai.generativelanguage.v1alpha.ModelService/ListTunedModels",
451
+ request_serializer=model_service.ListTunedModelsRequest.serialize,
452
+ response_deserializer=model_service.ListTunedModelsResponse.deserialize,
453
+ )
454
+ return self._stubs["list_tuned_models"]
455
+
456
+ @property
457
+ def create_tuned_model(
458
+ self,
459
+ ) -> Callable[[model_service.CreateTunedModelRequest], operations_pb2.Operation]:
460
+ r"""Return a callable for the create tuned model method over gRPC.
461
+
462
+ Creates a tuned model. Check intermediate tuning progress (if
463
+ any) through the [google.longrunning.Operations] service.
464
+
465
+ Access status and results through the Operations service.
466
+ Example: GET /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
467
+
468
+ Returns:
469
+ Callable[[~.CreateTunedModelRequest],
470
+ ~.Operation]:
471
+ A function that, when called, will call the underlying RPC
472
+ on the server.
473
+ """
474
+ # Generate a "stub function" on-the-fly which will actually make
475
+ # the request.
476
+ # gRPC handles serialization and deserialization, so we just need
477
+ # to pass in the functions for each.
478
+ if "create_tuned_model" not in self._stubs:
479
+ self._stubs["create_tuned_model"] = self._logged_channel.unary_unary(
480
+ "/google.ai.generativelanguage.v1alpha.ModelService/CreateTunedModel",
481
+ request_serializer=model_service.CreateTunedModelRequest.serialize,
482
+ response_deserializer=operations_pb2.Operation.FromString,
483
+ )
484
+ return self._stubs["create_tuned_model"]
485
+
486
+ @property
487
+ def update_tuned_model(
488
+ self,
489
+ ) -> Callable[[model_service.UpdateTunedModelRequest], gag_tuned_model.TunedModel]:
490
+ r"""Return a callable for the update tuned model method over gRPC.
491
+
492
+ Updates a tuned model.
493
+
494
+ Returns:
495
+ Callable[[~.UpdateTunedModelRequest],
496
+ ~.TunedModel]:
497
+ A function that, when called, will call the underlying RPC
498
+ on the server.
499
+ """
500
+ # Generate a "stub function" on-the-fly which will actually make
501
+ # the request.
502
+ # gRPC handles serialization and deserialization, so we just need
503
+ # to pass in the functions for each.
504
+ if "update_tuned_model" not in self._stubs:
505
+ self._stubs["update_tuned_model"] = self._logged_channel.unary_unary(
506
+ "/google.ai.generativelanguage.v1alpha.ModelService/UpdateTunedModel",
507
+ request_serializer=model_service.UpdateTunedModelRequest.serialize,
508
+ response_deserializer=gag_tuned_model.TunedModel.deserialize,
509
+ )
510
+ return self._stubs["update_tuned_model"]
511
+
512
+ @property
513
+ def delete_tuned_model(
514
+ self,
515
+ ) -> Callable[[model_service.DeleteTunedModelRequest], empty_pb2.Empty]:
516
+ r"""Return a callable for the delete tuned model method over gRPC.
517
+
518
+ Deletes a tuned model.
519
+
520
+ Returns:
521
+ Callable[[~.DeleteTunedModelRequest],
522
+ ~.Empty]:
523
+ A function that, when called, will call the underlying RPC
524
+ on the server.
525
+ """
526
+ # Generate a "stub function" on-the-fly which will actually make
527
+ # the request.
528
+ # gRPC handles serialization and deserialization, so we just need
529
+ # to pass in the functions for each.
530
+ if "delete_tuned_model" not in self._stubs:
531
+ self._stubs["delete_tuned_model"] = self._logged_channel.unary_unary(
532
+ "/google.ai.generativelanguage.v1alpha.ModelService/DeleteTunedModel",
533
+ request_serializer=model_service.DeleteTunedModelRequest.serialize,
534
+ response_deserializer=empty_pb2.Empty.FromString,
535
+ )
536
+ return self._stubs["delete_tuned_model"]
537
+
538
+ def close(self):
539
+ self._logged_channel.close()
540
+
541
+ @property
542
+ def get_operation(
543
+ self,
544
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
545
+ r"""Return a callable for the get_operation method over gRPC."""
546
+ # Generate a "stub function" on-the-fly which will actually make
547
+ # the request.
548
+ # gRPC handles serialization and deserialization, so we just need
549
+ # to pass in the functions for each.
550
+ if "get_operation" not in self._stubs:
551
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
552
+ "/google.longrunning.Operations/GetOperation",
553
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
554
+ response_deserializer=operations_pb2.Operation.FromString,
555
+ )
556
+ return self._stubs["get_operation"]
557
+
558
+ @property
559
+ def list_operations(
560
+ self,
561
+ ) -> Callable[
562
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
563
+ ]:
564
+ r"""Return a callable for the list_operations method over gRPC."""
565
+ # Generate a "stub function" on-the-fly which will actually make
566
+ # the request.
567
+ # gRPC handles serialization and deserialization, so we just need
568
+ # to pass in the functions for each.
569
+ if "list_operations" not in self._stubs:
570
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
571
+ "/google.longrunning.Operations/ListOperations",
572
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
573
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
574
+ )
575
+ return self._stubs["list_operations"]
576
+
577
+ @property
578
+ def kind(self) -> str:
579
+ return "grpc"
580
+
581
+
582
+ __all__ = ("ModelServiceGrpcTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,655 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async, operations_v1
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.longrunning import operations_pb2 # type: ignore
29
+ from google.protobuf import empty_pb2 # type: ignore
30
+ from google.protobuf.json_format import MessageToJson
31
+ import google.protobuf.message
32
+ import grpc # type: ignore
33
+ from grpc.experimental import aio # type: ignore
34
+ import proto # type: ignore
35
+
36
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
37
+ from google.ai.generativelanguage_v1alpha.types import model, model_service
38
+ from google.ai.generativelanguage_v1alpha.types import tuned_model
39
+
40
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
41
+ from .grpc import ModelServiceGrpcTransport
42
+
43
+ try:
44
+ from google.api_core import client_logging # type: ignore
45
+
46
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
47
+ except ImportError: # pragma: NO COVER
48
+ CLIENT_LOGGING_SUPPORTED = False
49
+
50
+ _LOGGER = std_logging.getLogger(__name__)
51
+
52
+
53
+ class _LoggingClientAIOInterceptor(
54
+ grpc.aio.UnaryUnaryClientInterceptor
55
+ ): # pragma: NO COVER
56
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
57
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
58
+ std_logging.DEBUG
59
+ )
60
+ if logging_enabled: # pragma: NO COVER
61
+ request_metadata = client_call_details.metadata
62
+ if isinstance(request, proto.Message):
63
+ request_payload = type(request).to_json(request)
64
+ elif isinstance(request, google.protobuf.message.Message):
65
+ request_payload = MessageToJson(request)
66
+ else:
67
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
68
+
69
+ request_metadata = {
70
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
71
+ for key, value in request_metadata
72
+ }
73
+ grpc_request = {
74
+ "payload": request_payload,
75
+ "requestMethod": "grpc",
76
+ "metadata": dict(request_metadata),
77
+ }
78
+ _LOGGER.debug(
79
+ f"Sending request for {client_call_details.method}",
80
+ extra={
81
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
82
+ "rpcName": str(client_call_details.method),
83
+ "request": grpc_request,
84
+ "metadata": grpc_request["metadata"],
85
+ },
86
+ )
87
+ response = await continuation(client_call_details, request)
88
+ if logging_enabled: # pragma: NO COVER
89
+ response_metadata = await response.trailing_metadata()
90
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
91
+ metadata = (
92
+ dict([(k, str(v)) for k, v in response_metadata])
93
+ if response_metadata
94
+ else None
95
+ )
96
+ result = await response
97
+ if isinstance(result, proto.Message):
98
+ response_payload = type(result).to_json(result)
99
+ elif isinstance(result, google.protobuf.message.Message):
100
+ response_payload = MessageToJson(result)
101
+ else:
102
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
103
+ grpc_response = {
104
+ "payload": response_payload,
105
+ "metadata": metadata,
106
+ "status": "OK",
107
+ }
108
+ _LOGGER.debug(
109
+ f"Received response to rpc {client_call_details.method}.",
110
+ extra={
111
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
112
+ "rpcName": str(client_call_details.method),
113
+ "response": grpc_response,
114
+ "metadata": grpc_response["metadata"],
115
+ },
116
+ )
117
+ return response
118
+
119
+
120
+ class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport):
121
+ """gRPC AsyncIO backend transport for ModelService.
122
+
123
+ Provides methods for getting metadata information about
124
+ Generative Models.
125
+
126
+ This class defines the same methods as the primary client, so the
127
+ primary client can load the underlying transport implementation
128
+ and call it.
129
+
130
+ It sends protocol buffers over the wire using gRPC (which is built on
131
+ top of HTTP/2); the ``grpcio`` package must be installed.
132
+ """
133
+
134
+ _grpc_channel: aio.Channel
135
+ _stubs: Dict[str, Callable] = {}
136
+
137
+ @classmethod
138
+ def create_channel(
139
+ cls,
140
+ host: str = "generativelanguage.googleapis.com",
141
+ credentials: Optional[ga_credentials.Credentials] = None,
142
+ credentials_file: Optional[str] = None,
143
+ scopes: Optional[Sequence[str]] = None,
144
+ quota_project_id: Optional[str] = None,
145
+ **kwargs,
146
+ ) -> aio.Channel:
147
+ """Create and return a gRPC AsyncIO channel object.
148
+ Args:
149
+ host (Optional[str]): The host for the channel to use.
150
+ credentials (Optional[~.Credentials]): The
151
+ authorization credentials to attach to requests. These
152
+ credentials identify this application to the service. If
153
+ none are specified, the client will attempt to ascertain
154
+ the credentials from the environment.
155
+ credentials_file (Optional[str]): A file with credentials that can
156
+ be loaded with :func:`google.auth.load_credentials_from_file`.
157
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
158
+ service. These are only used when credentials are not specified and
159
+ are passed to :func:`google.auth.default`.
160
+ quota_project_id (Optional[str]): An optional project to use for billing
161
+ and quota.
162
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
163
+ channel creation.
164
+ Returns:
165
+ aio.Channel: A gRPC AsyncIO channel object.
166
+ """
167
+
168
+ return grpc_helpers_async.create_channel(
169
+ host,
170
+ credentials=credentials,
171
+ credentials_file=credentials_file,
172
+ quota_project_id=quota_project_id,
173
+ default_scopes=cls.AUTH_SCOPES,
174
+ scopes=scopes,
175
+ default_host=cls.DEFAULT_HOST,
176
+ **kwargs,
177
+ )
178
+
179
+ def __init__(
180
+ self,
181
+ *,
182
+ host: str = "generativelanguage.googleapis.com",
183
+ credentials: Optional[ga_credentials.Credentials] = None,
184
+ credentials_file: Optional[str] = None,
185
+ scopes: Optional[Sequence[str]] = None,
186
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
187
+ api_mtls_endpoint: Optional[str] = None,
188
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
189
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
190
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
191
+ quota_project_id: Optional[str] = None,
192
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
193
+ always_use_jwt_access: Optional[bool] = False,
194
+ api_audience: Optional[str] = None,
195
+ ) -> None:
196
+ """Instantiate the transport.
197
+
198
+ Args:
199
+ host (Optional[str]):
200
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
201
+ credentials (Optional[google.auth.credentials.Credentials]): The
202
+ authorization credentials to attach to requests. These
203
+ credentials identify the application to the service; if none
204
+ are specified, the client will attempt to ascertain the
205
+ credentials from the environment.
206
+ This argument is ignored if a ``channel`` instance is provided.
207
+ credentials_file (Optional[str]): A file with credentials that can
208
+ be loaded with :func:`google.auth.load_credentials_from_file`.
209
+ This argument is ignored if a ``channel`` instance is provided.
210
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
211
+ service. These are only used when credentials are not specified and
212
+ are passed to :func:`google.auth.default`.
213
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
214
+ A ``Channel`` instance through which to make calls, or a Callable
215
+ that constructs and returns one. If set to None, ``self.create_channel``
216
+ is used to create the channel. If a Callable is given, it will be called
217
+ with the same arguments as used in ``self.create_channel``.
218
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
219
+ If provided, it overrides the ``host`` argument and tries to create
220
+ a mutual TLS channel with client SSL credentials from
221
+ ``client_cert_source`` or application default SSL credentials.
222
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
223
+ Deprecated. A callback to provide client SSL certificate bytes and
224
+ private key bytes, both in PEM format. It is ignored if
225
+ ``api_mtls_endpoint`` is None.
226
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
227
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
228
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
229
+ A callback to provide client certificate bytes and private key bytes,
230
+ both in PEM format. It is used to configure a mutual TLS channel. It is
231
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
232
+ quota_project_id (Optional[str]): An optional project to use for billing
233
+ and quota.
234
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
235
+ The client info used to send a user-agent string along with
236
+ API requests. If ``None``, then default info will be used.
237
+ Generally, you only need to set this if you're developing
238
+ your own client library.
239
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
240
+ be used for service account credentials.
241
+
242
+ Raises:
243
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
244
+ creation failed for any reason.
245
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
246
+ and ``credentials_file`` are passed.
247
+ """
248
+ self._grpc_channel = None
249
+ self._ssl_channel_credentials = ssl_channel_credentials
250
+ self._stubs: Dict[str, Callable] = {}
251
+ self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
252
+
253
+ if api_mtls_endpoint:
254
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
255
+ if client_cert_source:
256
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
257
+
258
+ if isinstance(channel, aio.Channel):
259
+ # Ignore credentials if a channel was passed.
260
+ credentials = None
261
+ self._ignore_credentials = True
262
+ # If a channel was explicitly provided, set it.
263
+ self._grpc_channel = channel
264
+ self._ssl_channel_credentials = None
265
+ else:
266
+ if api_mtls_endpoint:
267
+ host = api_mtls_endpoint
268
+
269
+ # Create SSL credentials with client_cert_source or application
270
+ # default SSL credentials.
271
+ if client_cert_source:
272
+ cert, key = client_cert_source()
273
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
274
+ certificate_chain=cert, private_key=key
275
+ )
276
+ else:
277
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
278
+
279
+ else:
280
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
281
+ cert, key = client_cert_source_for_mtls()
282
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
283
+ certificate_chain=cert, private_key=key
284
+ )
285
+
286
+ # The base transport sets the host, credentials and scopes
287
+ super().__init__(
288
+ host=host,
289
+ credentials=credentials,
290
+ credentials_file=credentials_file,
291
+ scopes=scopes,
292
+ quota_project_id=quota_project_id,
293
+ client_info=client_info,
294
+ always_use_jwt_access=always_use_jwt_access,
295
+ api_audience=api_audience,
296
+ )
297
+
298
+ if not self._grpc_channel:
299
+ # initialize with the provided callable or the default channel
300
+ channel_init = channel or type(self).create_channel
301
+ self._grpc_channel = channel_init(
302
+ self._host,
303
+ # use the credentials which are saved
304
+ credentials=self._credentials,
305
+ # Set ``credentials_file`` to ``None`` here as
306
+ # the credentials that we saved earlier should be used.
307
+ credentials_file=None,
308
+ scopes=self._scopes,
309
+ ssl_credentials=self._ssl_channel_credentials,
310
+ quota_project_id=quota_project_id,
311
+ options=[
312
+ ("grpc.max_send_message_length", -1),
313
+ ("grpc.max_receive_message_length", -1),
314
+ ],
315
+ )
316
+
317
+ self._interceptor = _LoggingClientAIOInterceptor()
318
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
319
+ self._logged_channel = self._grpc_channel
320
+ self._wrap_with_kind = (
321
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
322
+ )
323
+ # Wrap messages. This must be done after self._logged_channel exists
324
+ self._prep_wrapped_messages(client_info)
325
+
326
+ @property
327
+ def grpc_channel(self) -> aio.Channel:
328
+ """Create the channel designed to connect to this service.
329
+
330
+ This property caches on the instance; repeated calls return
331
+ the same channel.
332
+ """
333
+ # Return the channel from cache.
334
+ return self._grpc_channel
335
+
336
+ @property
337
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
338
+ """Create the client designed to process long-running operations.
339
+
340
+ This property caches on the instance; repeated calls return the same
341
+ client.
342
+ """
343
+ # Quick check: Only create a new client if we do not already have one.
344
+ if self._operations_client is None:
345
+ self._operations_client = operations_v1.OperationsAsyncClient(
346
+ self._logged_channel
347
+ )
348
+
349
+ # Return the client from cache.
350
+ return self._operations_client
351
+
352
+ @property
353
+ def get_model(
354
+ self,
355
+ ) -> Callable[[model_service.GetModelRequest], Awaitable[model.Model]]:
356
+ r"""Return a callable for the get model method over gRPC.
357
+
358
+ Gets information about a specific ``Model`` such as its version
359
+ number, token limits,
360
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
361
+ and other metadata. Refer to the `Gemini models
362
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
363
+ for detailed model information.
364
+
365
+ Returns:
366
+ Callable[[~.GetModelRequest],
367
+ Awaitable[~.Model]]:
368
+ A function that, when called, will call the underlying RPC
369
+ on the server.
370
+ """
371
+ # Generate a "stub function" on-the-fly which will actually make
372
+ # the request.
373
+ # gRPC handles serialization and deserialization, so we just need
374
+ # to pass in the functions for each.
375
+ if "get_model" not in self._stubs:
376
+ self._stubs["get_model"] = self._logged_channel.unary_unary(
377
+ "/google.ai.generativelanguage.v1alpha.ModelService/GetModel",
378
+ request_serializer=model_service.GetModelRequest.serialize,
379
+ response_deserializer=model.Model.deserialize,
380
+ )
381
+ return self._stubs["get_model"]
382
+
383
+ @property
384
+ def list_models(
385
+ self,
386
+ ) -> Callable[
387
+ [model_service.ListModelsRequest], Awaitable[model_service.ListModelsResponse]
388
+ ]:
389
+ r"""Return a callable for the list models method over gRPC.
390
+
391
+ Lists the
392
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
393
+ available through the Gemini API.
394
+
395
+ Returns:
396
+ Callable[[~.ListModelsRequest],
397
+ Awaitable[~.ListModelsResponse]]:
398
+ A function that, when called, will call the underlying RPC
399
+ on the server.
400
+ """
401
+ # Generate a "stub function" on-the-fly which will actually make
402
+ # the request.
403
+ # gRPC handles serialization and deserialization, so we just need
404
+ # to pass in the functions for each.
405
+ if "list_models" not in self._stubs:
406
+ self._stubs["list_models"] = self._logged_channel.unary_unary(
407
+ "/google.ai.generativelanguage.v1alpha.ModelService/ListModels",
408
+ request_serializer=model_service.ListModelsRequest.serialize,
409
+ response_deserializer=model_service.ListModelsResponse.deserialize,
410
+ )
411
+ return self._stubs["list_models"]
412
+
413
+ @property
414
+ def get_tuned_model(
415
+ self,
416
+ ) -> Callable[
417
+ [model_service.GetTunedModelRequest], Awaitable[tuned_model.TunedModel]
418
+ ]:
419
+ r"""Return a callable for the get tuned model method over gRPC.
420
+
421
+ Gets information about a specific TunedModel.
422
+
423
+ Returns:
424
+ Callable[[~.GetTunedModelRequest],
425
+ Awaitable[~.TunedModel]]:
426
+ A function that, when called, will call the underlying RPC
427
+ on the server.
428
+ """
429
+ # Generate a "stub function" on-the-fly which will actually make
430
+ # the request.
431
+ # gRPC handles serialization and deserialization, so we just need
432
+ # to pass in the functions for each.
433
+ if "get_tuned_model" not in self._stubs:
434
+ self._stubs["get_tuned_model"] = self._logged_channel.unary_unary(
435
+ "/google.ai.generativelanguage.v1alpha.ModelService/GetTunedModel",
436
+ request_serializer=model_service.GetTunedModelRequest.serialize,
437
+ response_deserializer=tuned_model.TunedModel.deserialize,
438
+ )
439
+ return self._stubs["get_tuned_model"]
440
+
441
+ @property
442
+ def list_tuned_models(
443
+ self,
444
+ ) -> Callable[
445
+ [model_service.ListTunedModelsRequest],
446
+ Awaitable[model_service.ListTunedModelsResponse],
447
+ ]:
448
+ r"""Return a callable for the list tuned models method over gRPC.
449
+
450
+ Lists created tuned models.
451
+
452
+ Returns:
453
+ Callable[[~.ListTunedModelsRequest],
454
+ Awaitable[~.ListTunedModelsResponse]]:
455
+ A function that, when called, will call the underlying RPC
456
+ on the server.
457
+ """
458
+ # Generate a "stub function" on-the-fly which will actually make
459
+ # the request.
460
+ # gRPC handles serialization and deserialization, so we just need
461
+ # to pass in the functions for each.
462
+ if "list_tuned_models" not in self._stubs:
463
+ self._stubs["list_tuned_models"] = self._logged_channel.unary_unary(
464
+ "/google.ai.generativelanguage.v1alpha.ModelService/ListTunedModels",
465
+ request_serializer=model_service.ListTunedModelsRequest.serialize,
466
+ response_deserializer=model_service.ListTunedModelsResponse.deserialize,
467
+ )
468
+ return self._stubs["list_tuned_models"]
469
+
470
+ @property
471
+ def create_tuned_model(
472
+ self,
473
+ ) -> Callable[
474
+ [model_service.CreateTunedModelRequest], Awaitable[operations_pb2.Operation]
475
+ ]:
476
+ r"""Return a callable for the create tuned model method over gRPC.
477
+
478
+ Creates a tuned model. Check intermediate tuning progress (if
479
+ any) through the [google.longrunning.Operations] service.
480
+
481
+ Access status and results through the Operations service.
482
+ Example: GET /v1/tunedModels/az2mb0bpw6i/operations/000-111-222
483
+
484
+ Returns:
485
+ Callable[[~.CreateTunedModelRequest],
486
+ Awaitable[~.Operation]]:
487
+ A function that, when called, will call the underlying RPC
488
+ on the server.
489
+ """
490
+ # Generate a "stub function" on-the-fly which will actually make
491
+ # the request.
492
+ # gRPC handles serialization and deserialization, so we just need
493
+ # to pass in the functions for each.
494
+ if "create_tuned_model" not in self._stubs:
495
+ self._stubs["create_tuned_model"] = self._logged_channel.unary_unary(
496
+ "/google.ai.generativelanguage.v1alpha.ModelService/CreateTunedModel",
497
+ request_serializer=model_service.CreateTunedModelRequest.serialize,
498
+ response_deserializer=operations_pb2.Operation.FromString,
499
+ )
500
+ return self._stubs["create_tuned_model"]
501
+
502
+ @property
503
+ def update_tuned_model(
504
+ self,
505
+ ) -> Callable[
506
+ [model_service.UpdateTunedModelRequest], Awaitable[gag_tuned_model.TunedModel]
507
+ ]:
508
+ r"""Return a callable for the update tuned model method over gRPC.
509
+
510
+ Updates a tuned model.
511
+
512
+ Returns:
513
+ Callable[[~.UpdateTunedModelRequest],
514
+ Awaitable[~.TunedModel]]:
515
+ A function that, when called, will call the underlying RPC
516
+ on the server.
517
+ """
518
+ # Generate a "stub function" on-the-fly which will actually make
519
+ # the request.
520
+ # gRPC handles serialization and deserialization, so we just need
521
+ # to pass in the functions for each.
522
+ if "update_tuned_model" not in self._stubs:
523
+ self._stubs["update_tuned_model"] = self._logged_channel.unary_unary(
524
+ "/google.ai.generativelanguage.v1alpha.ModelService/UpdateTunedModel",
525
+ request_serializer=model_service.UpdateTunedModelRequest.serialize,
526
+ response_deserializer=gag_tuned_model.TunedModel.deserialize,
527
+ )
528
+ return self._stubs["update_tuned_model"]
529
+
530
+ @property
531
+ def delete_tuned_model(
532
+ self,
533
+ ) -> Callable[[model_service.DeleteTunedModelRequest], Awaitable[empty_pb2.Empty]]:
534
+ r"""Return a callable for the delete tuned model method over gRPC.
535
+
536
+ Deletes a tuned model.
537
+
538
+ Returns:
539
+ Callable[[~.DeleteTunedModelRequest],
540
+ Awaitable[~.Empty]]:
541
+ A function that, when called, will call the underlying RPC
542
+ on the server.
543
+ """
544
+ # Generate a "stub function" on-the-fly which will actually make
545
+ # the request.
546
+ # gRPC handles serialization and deserialization, so we just need
547
+ # to pass in the functions for each.
548
+ if "delete_tuned_model" not in self._stubs:
549
+ self._stubs["delete_tuned_model"] = self._logged_channel.unary_unary(
550
+ "/google.ai.generativelanguage.v1alpha.ModelService/DeleteTunedModel",
551
+ request_serializer=model_service.DeleteTunedModelRequest.serialize,
552
+ response_deserializer=empty_pb2.Empty.FromString,
553
+ )
554
+ return self._stubs["delete_tuned_model"]
555
+
556
+ def _prep_wrapped_messages(self, client_info):
557
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
558
+ self._wrapped_methods = {
559
+ self.get_model: self._wrap_method(
560
+ self.get_model,
561
+ default_timeout=None,
562
+ client_info=client_info,
563
+ ),
564
+ self.list_models: self._wrap_method(
565
+ self.list_models,
566
+ default_timeout=None,
567
+ client_info=client_info,
568
+ ),
569
+ self.get_tuned_model: self._wrap_method(
570
+ self.get_tuned_model,
571
+ default_timeout=None,
572
+ client_info=client_info,
573
+ ),
574
+ self.list_tuned_models: self._wrap_method(
575
+ self.list_tuned_models,
576
+ default_timeout=None,
577
+ client_info=client_info,
578
+ ),
579
+ self.create_tuned_model: self._wrap_method(
580
+ self.create_tuned_model,
581
+ default_timeout=None,
582
+ client_info=client_info,
583
+ ),
584
+ self.update_tuned_model: self._wrap_method(
585
+ self.update_tuned_model,
586
+ default_timeout=None,
587
+ client_info=client_info,
588
+ ),
589
+ self.delete_tuned_model: self._wrap_method(
590
+ self.delete_tuned_model,
591
+ default_timeout=None,
592
+ client_info=client_info,
593
+ ),
594
+ self.get_operation: self._wrap_method(
595
+ self.get_operation,
596
+ default_timeout=None,
597
+ client_info=client_info,
598
+ ),
599
+ self.list_operations: self._wrap_method(
600
+ self.list_operations,
601
+ default_timeout=None,
602
+ client_info=client_info,
603
+ ),
604
+ }
605
+
606
+ def _wrap_method(self, func, *args, **kwargs):
607
+ if self._wrap_with_kind: # pragma: NO COVER
608
+ kwargs["kind"] = self.kind
609
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
610
+
611
+ def close(self):
612
+ return self._logged_channel.close()
613
+
614
+ @property
615
+ def kind(self) -> str:
616
+ return "grpc_asyncio"
617
+
618
+ @property
619
+ def get_operation(
620
+ self,
621
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
622
+ r"""Return a callable for the get_operation method over gRPC."""
623
+ # Generate a "stub function" on-the-fly which will actually make
624
+ # the request.
625
+ # gRPC handles serialization and deserialization, so we just need
626
+ # to pass in the functions for each.
627
+ if "get_operation" not in self._stubs:
628
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
629
+ "/google.longrunning.Operations/GetOperation",
630
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
631
+ response_deserializer=operations_pb2.Operation.FromString,
632
+ )
633
+ return self._stubs["get_operation"]
634
+
635
+ @property
636
+ def list_operations(
637
+ self,
638
+ ) -> Callable[
639
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
640
+ ]:
641
+ r"""Return a callable for the list_operations method over gRPC."""
642
+ # Generate a "stub function" on-the-fly which will actually make
643
+ # the request.
644
+ # gRPC handles serialization and deserialization, so we just need
645
+ # to pass in the functions for each.
646
+ if "list_operations" not in self._stubs:
647
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
648
+ "/google.longrunning.Operations/ListOperations",
649
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
650
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
651
+ )
652
+ return self._stubs["list_operations"]
653
+
654
+
655
+ __all__ = ("ModelServiceGrpcAsyncIOTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/rest.py ADDED
@@ -0,0 +1,1819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import dataclasses
17
+ import json # type: ignore
18
+ import logging
19
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, operations_v1, rest_helpers, rest_streaming
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import retry as retries
25
+ from google.auth import credentials as ga_credentials # type: ignore
26
+ from google.auth.transport.requests import AuthorizedSession # type: ignore
27
+ from google.longrunning import operations_pb2 # type: ignore
28
+ from google.protobuf import empty_pb2 # type: ignore
29
+ from google.protobuf import json_format
30
+ from requests import __version__ as requests_version
31
+
32
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
33
+ from google.ai.generativelanguage_v1alpha.types import model, model_service
34
+ from google.ai.generativelanguage_v1alpha.types import tuned_model
35
+
36
+ from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
37
+ from .rest_base import _BaseModelServiceRestTransport
38
+
39
+ try:
40
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
41
+ except AttributeError: # pragma: NO COVER
42
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
43
+
44
+ try:
45
+ from google.api_core import client_logging # type: ignore
46
+
47
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
48
+ except ImportError: # pragma: NO COVER
49
+ CLIENT_LOGGING_SUPPORTED = False
50
+
51
+ _LOGGER = logging.getLogger(__name__)
52
+
53
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
54
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
55
+ grpc_version=None,
56
+ rest_version=f"requests@{requests_version}",
57
+ )
58
+
59
+
60
+ class ModelServiceRestInterceptor:
61
+ """Interceptor for ModelService.
62
+
63
+ Interceptors are used to manipulate requests, request metadata, and responses
64
+ in arbitrary ways.
65
+ Example use cases include:
66
+ * Logging
67
+ * Verifying requests according to service or custom semantics
68
+ * Stripping extraneous information from responses
69
+
70
+ These use cases and more can be enabled by injecting an
71
+ instance of a custom subclass when constructing the ModelServiceRestTransport.
72
+
73
+ .. code-block:: python
74
+ class MyCustomModelServiceInterceptor(ModelServiceRestInterceptor):
75
+ def pre_create_tuned_model(self, request, metadata):
76
+ logging.log(f"Received request: {request}")
77
+ return request, metadata
78
+
79
+ def post_create_tuned_model(self, response):
80
+ logging.log(f"Received response: {response}")
81
+ return response
82
+
83
+ def pre_delete_tuned_model(self, request, metadata):
84
+ logging.log(f"Received request: {request}")
85
+ return request, metadata
86
+
87
+ def pre_get_model(self, request, metadata):
88
+ logging.log(f"Received request: {request}")
89
+ return request, metadata
90
+
91
+ def post_get_model(self, response):
92
+ logging.log(f"Received response: {response}")
93
+ return response
94
+
95
+ def pre_get_tuned_model(self, request, metadata):
96
+ logging.log(f"Received request: {request}")
97
+ return request, metadata
98
+
99
+ def post_get_tuned_model(self, response):
100
+ logging.log(f"Received response: {response}")
101
+ return response
102
+
103
+ def pre_list_models(self, request, metadata):
104
+ logging.log(f"Received request: {request}")
105
+ return request, metadata
106
+
107
+ def post_list_models(self, response):
108
+ logging.log(f"Received response: {response}")
109
+ return response
110
+
111
+ def pre_list_tuned_models(self, request, metadata):
112
+ logging.log(f"Received request: {request}")
113
+ return request, metadata
114
+
115
+ def post_list_tuned_models(self, response):
116
+ logging.log(f"Received response: {response}")
117
+ return response
118
+
119
+ def pre_update_tuned_model(self, request, metadata):
120
+ logging.log(f"Received request: {request}")
121
+ return request, metadata
122
+
123
+ def post_update_tuned_model(self, response):
124
+ logging.log(f"Received response: {response}")
125
+ return response
126
+
127
+ transport = ModelServiceRestTransport(interceptor=MyCustomModelServiceInterceptor())
128
+ client = ModelServiceClient(transport=transport)
129
+
130
+
131
+ """
132
+
133
+ def pre_create_tuned_model(
134
+ self,
135
+ request: model_service.CreateTunedModelRequest,
136
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
137
+ ) -> Tuple[
138
+ model_service.CreateTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
139
+ ]:
140
+ """Pre-rpc interceptor for create_tuned_model
141
+
142
+ Override in a subclass to manipulate the request or metadata
143
+ before they are sent to the ModelService server.
144
+ """
145
+ return request, metadata
146
+
147
+ def post_create_tuned_model(
148
+ self, response: operations_pb2.Operation
149
+ ) -> operations_pb2.Operation:
150
+ """Post-rpc interceptor for create_tuned_model
151
+
152
+ Override in a subclass to manipulate the response
153
+ after it is returned by the ModelService server but before
154
+ it is returned to user code.
155
+ """
156
+ return response
157
+
158
+ def pre_delete_tuned_model(
159
+ self,
160
+ request: model_service.DeleteTunedModelRequest,
161
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
162
+ ) -> Tuple[
163
+ model_service.DeleteTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
164
+ ]:
165
+ """Pre-rpc interceptor for delete_tuned_model
166
+
167
+ Override in a subclass to manipulate the request or metadata
168
+ before they are sent to the ModelService server.
169
+ """
170
+ return request, metadata
171
+
172
+ def pre_get_model(
173
+ self,
174
+ request: model_service.GetModelRequest,
175
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
176
+ ) -> Tuple[model_service.GetModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
177
+ """Pre-rpc interceptor for get_model
178
+
179
+ Override in a subclass to manipulate the request or metadata
180
+ before they are sent to the ModelService server.
181
+ """
182
+ return request, metadata
183
+
184
+ def post_get_model(self, response: model.Model) -> model.Model:
185
+ """Post-rpc interceptor for get_model
186
+
187
+ Override in a subclass to manipulate the response
188
+ after it is returned by the ModelService server but before
189
+ it is returned to user code.
190
+ """
191
+ return response
192
+
193
+ def pre_get_tuned_model(
194
+ self,
195
+ request: model_service.GetTunedModelRequest,
196
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
197
+ ) -> Tuple[
198
+ model_service.GetTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
199
+ ]:
200
+ """Pre-rpc interceptor for get_tuned_model
201
+
202
+ Override in a subclass to manipulate the request or metadata
203
+ before they are sent to the ModelService server.
204
+ """
205
+ return request, metadata
206
+
207
+ def post_get_tuned_model(
208
+ self, response: tuned_model.TunedModel
209
+ ) -> tuned_model.TunedModel:
210
+ """Post-rpc interceptor for get_tuned_model
211
+
212
+ Override in a subclass to manipulate the response
213
+ after it is returned by the ModelService server but before
214
+ it is returned to user code.
215
+ """
216
+ return response
217
+
218
+ def pre_list_models(
219
+ self,
220
+ request: model_service.ListModelsRequest,
221
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
222
+ ) -> Tuple[
223
+ model_service.ListModelsRequest, Sequence[Tuple[str, Union[str, bytes]]]
224
+ ]:
225
+ """Pre-rpc interceptor for list_models
226
+
227
+ Override in a subclass to manipulate the request or metadata
228
+ before they are sent to the ModelService server.
229
+ """
230
+ return request, metadata
231
+
232
+ def post_list_models(
233
+ self, response: model_service.ListModelsResponse
234
+ ) -> model_service.ListModelsResponse:
235
+ """Post-rpc interceptor for list_models
236
+
237
+ Override in a subclass to manipulate the response
238
+ after it is returned by the ModelService server but before
239
+ it is returned to user code.
240
+ """
241
+ return response
242
+
243
+ def pre_list_tuned_models(
244
+ self,
245
+ request: model_service.ListTunedModelsRequest,
246
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
247
+ ) -> Tuple[
248
+ model_service.ListTunedModelsRequest, Sequence[Tuple[str, Union[str, bytes]]]
249
+ ]:
250
+ """Pre-rpc interceptor for list_tuned_models
251
+
252
+ Override in a subclass to manipulate the request or metadata
253
+ before they are sent to the ModelService server.
254
+ """
255
+ return request, metadata
256
+
257
+ def post_list_tuned_models(
258
+ self, response: model_service.ListTunedModelsResponse
259
+ ) -> model_service.ListTunedModelsResponse:
260
+ """Post-rpc interceptor for list_tuned_models
261
+
262
+ Override in a subclass to manipulate the response
263
+ after it is returned by the ModelService server but before
264
+ it is returned to user code.
265
+ """
266
+ return response
267
+
268
+ def pre_update_tuned_model(
269
+ self,
270
+ request: model_service.UpdateTunedModelRequest,
271
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
272
+ ) -> Tuple[
273
+ model_service.UpdateTunedModelRequest, Sequence[Tuple[str, Union[str, bytes]]]
274
+ ]:
275
+ """Pre-rpc interceptor for update_tuned_model
276
+
277
+ Override in a subclass to manipulate the request or metadata
278
+ before they are sent to the ModelService server.
279
+ """
280
+ return request, metadata
281
+
282
+ def post_update_tuned_model(
283
+ self, response: gag_tuned_model.TunedModel
284
+ ) -> gag_tuned_model.TunedModel:
285
+ """Post-rpc interceptor for update_tuned_model
286
+
287
+ Override in a subclass to manipulate the response
288
+ after it is returned by the ModelService server but before
289
+ it is returned to user code.
290
+ """
291
+ return response
292
+
293
+ def pre_get_operation(
294
+ self,
295
+ request: operations_pb2.GetOperationRequest,
296
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
297
+ ) -> Tuple[
298
+ operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
299
+ ]:
300
+ """Pre-rpc interceptor for get_operation
301
+
302
+ Override in a subclass to manipulate the request or metadata
303
+ before they are sent to the ModelService server.
304
+ """
305
+ return request, metadata
306
+
307
+ def post_get_operation(
308
+ self, response: operations_pb2.Operation
309
+ ) -> operations_pb2.Operation:
310
+ """Post-rpc interceptor for get_operation
311
+
312
+ Override in a subclass to manipulate the response
313
+ after it is returned by the ModelService server but before
314
+ it is returned to user code.
315
+ """
316
+ return response
317
+
318
+ def pre_list_operations(
319
+ self,
320
+ request: operations_pb2.ListOperationsRequest,
321
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
322
+ ) -> Tuple[
323
+ operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
324
+ ]:
325
+ """Pre-rpc interceptor for list_operations
326
+
327
+ Override in a subclass to manipulate the request or metadata
328
+ before they are sent to the ModelService server.
329
+ """
330
+ return request, metadata
331
+
332
+ def post_list_operations(
333
+ self, response: operations_pb2.ListOperationsResponse
334
+ ) -> operations_pb2.ListOperationsResponse:
335
+ """Post-rpc interceptor for list_operations
336
+
337
+ Override in a subclass to manipulate the response
338
+ after it is returned by the ModelService server but before
339
+ it is returned to user code.
340
+ """
341
+ return response
342
+
343
+
344
+ @dataclasses.dataclass
345
+ class ModelServiceRestStub:
346
+ _session: AuthorizedSession
347
+ _host: str
348
+ _interceptor: ModelServiceRestInterceptor
349
+
350
+
351
+ class ModelServiceRestTransport(_BaseModelServiceRestTransport):
352
+ """REST backend synchronous transport for ModelService.
353
+
354
+ Provides methods for getting metadata information about
355
+ Generative Models.
356
+
357
+ This class defines the same methods as the primary client, so the
358
+ primary client can load the underlying transport implementation
359
+ and call it.
360
+
361
+ It sends JSON representations of protocol buffers over HTTP/1.1
362
+ """
363
+
364
+ def __init__(
365
+ self,
366
+ *,
367
+ host: str = "generativelanguage.googleapis.com",
368
+ credentials: Optional[ga_credentials.Credentials] = None,
369
+ credentials_file: Optional[str] = None,
370
+ scopes: Optional[Sequence[str]] = None,
371
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
372
+ quota_project_id: Optional[str] = None,
373
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
374
+ always_use_jwt_access: Optional[bool] = False,
375
+ url_scheme: str = "https",
376
+ interceptor: Optional[ModelServiceRestInterceptor] = None,
377
+ api_audience: Optional[str] = None,
378
+ ) -> None:
379
+ """Instantiate the transport.
380
+
381
+ Args:
382
+ host (Optional[str]):
383
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
384
+ credentials (Optional[google.auth.credentials.Credentials]): The
385
+ authorization credentials to attach to requests. These
386
+ credentials identify the application to the service; if none
387
+ are specified, the client will attempt to ascertain the
388
+ credentials from the environment.
389
+
390
+ credentials_file (Optional[str]): A file with credentials that can
391
+ be loaded with :func:`google.auth.load_credentials_from_file`.
392
+ This argument is ignored if ``channel`` is provided.
393
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
394
+ ignored if ``channel`` is provided.
395
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
396
+ certificate to configure mutual TLS HTTP channel. It is ignored
397
+ if ``channel`` is provided.
398
+ quota_project_id (Optional[str]): An optional project to use for billing
399
+ and quota.
400
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
401
+ The client info used to send a user-agent string along with
402
+ API requests. If ``None``, then default info will be used.
403
+ Generally, you only need to set this if you are developing
404
+ your own client library.
405
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
406
+ be used for service account credentials.
407
+ url_scheme: the protocol scheme for the API endpoint. Normally
408
+ "https", but for testing or local servers,
409
+ "http" can be specified.
410
+ """
411
+ # Run the base constructor
412
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
413
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
414
+ # credentials object
415
+ super().__init__(
416
+ host=host,
417
+ credentials=credentials,
418
+ client_info=client_info,
419
+ always_use_jwt_access=always_use_jwt_access,
420
+ url_scheme=url_scheme,
421
+ api_audience=api_audience,
422
+ )
423
+ self._session = AuthorizedSession(
424
+ self._credentials, default_host=self.DEFAULT_HOST
425
+ )
426
+ self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None
427
+ if client_cert_source_for_mtls:
428
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
429
+ self._interceptor = interceptor or ModelServiceRestInterceptor()
430
+ self._prep_wrapped_messages(client_info)
431
+
432
+ @property
433
+ def operations_client(self) -> operations_v1.AbstractOperationsClient:
434
+ """Create the client designed to process long-running operations.
435
+
436
+ This property caches on the instance; repeated calls return the same
437
+ client.
438
+ """
439
+ # Only create a new client if we do not already have one.
440
+ if self._operations_client is None:
441
+ http_options: Dict[str, List[Dict[str, str]]] = {
442
+ "google.longrunning.Operations.GetOperation": [
443
+ {
444
+ "method": "get",
445
+ "uri": "/v1alpha/{name=tunedModels/*/operations/*}",
446
+ },
447
+ {
448
+ "method": "get",
449
+ "uri": "/v1alpha/{name=generatedFiles/*/operations/*}",
450
+ },
451
+ {
452
+ "method": "get",
453
+ "uri": "/v1alpha/{name=models/*/operations/*}",
454
+ },
455
+ ],
456
+ "google.longrunning.Operations.ListOperations": [
457
+ {
458
+ "method": "get",
459
+ "uri": "/v1alpha/{name=tunedModels/*}/operations",
460
+ },
461
+ {
462
+ "method": "get",
463
+ "uri": "/v1alpha/{name=models/*}/operations",
464
+ },
465
+ ],
466
+ }
467
+
468
+ rest_transport = operations_v1.OperationsRestTransport(
469
+ host=self._host,
470
+ # use the credentials which are saved
471
+ credentials=self._credentials,
472
+ scopes=self._scopes,
473
+ http_options=http_options,
474
+ path_prefix="v1alpha",
475
+ )
476
+
477
+ self._operations_client = operations_v1.AbstractOperationsClient(
478
+ transport=rest_transport
479
+ )
480
+
481
+ # Return the client from cache.
482
+ return self._operations_client
483
+
484
+ class _CreateTunedModel(
485
+ _BaseModelServiceRestTransport._BaseCreateTunedModel, ModelServiceRestStub
486
+ ):
487
+ def __hash__(self):
488
+ return hash("ModelServiceRestTransport.CreateTunedModel")
489
+
490
+ @staticmethod
491
+ def _get_response(
492
+ host,
493
+ metadata,
494
+ query_params,
495
+ session,
496
+ timeout,
497
+ transcoded_request,
498
+ body=None,
499
+ ):
500
+ uri = transcoded_request["uri"]
501
+ method = transcoded_request["method"]
502
+ headers = dict(metadata)
503
+ headers["Content-Type"] = "application/json"
504
+ response = getattr(session, method)(
505
+ "{host}{uri}".format(host=host, uri=uri),
506
+ timeout=timeout,
507
+ headers=headers,
508
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
509
+ data=body,
510
+ )
511
+ return response
512
+
513
+ def __call__(
514
+ self,
515
+ request: model_service.CreateTunedModelRequest,
516
+ *,
517
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
518
+ timeout: Optional[float] = None,
519
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
520
+ ) -> operations_pb2.Operation:
521
+ r"""Call the create tuned model method over HTTP.
522
+
523
+ Args:
524
+ request (~.model_service.CreateTunedModelRequest):
525
+ The request object. Request to create a TunedModel.
526
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
527
+ should be retried.
528
+ timeout (float): The timeout for this request.
529
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
530
+ sent along with the request as metadata. Normally, each value must be of type `str`,
531
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
532
+ be of type `bytes`.
533
+
534
+ Returns:
535
+ ~.operations_pb2.Operation:
536
+ This resource represents a
537
+ long-running operation that is the
538
+ result of a network API call.
539
+
540
+ """
541
+
542
+ http_options = (
543
+ _BaseModelServiceRestTransport._BaseCreateTunedModel._get_http_options()
544
+ )
545
+
546
+ request, metadata = self._interceptor.pre_create_tuned_model(
547
+ request, metadata
548
+ )
549
+ transcoded_request = _BaseModelServiceRestTransport._BaseCreateTunedModel._get_transcoded_request(
550
+ http_options, request
551
+ )
552
+
553
+ body = _BaseModelServiceRestTransport._BaseCreateTunedModel._get_request_body_json(
554
+ transcoded_request
555
+ )
556
+
557
+ # Jsonify the query params
558
+ query_params = _BaseModelServiceRestTransport._BaseCreateTunedModel._get_query_params_json(
559
+ transcoded_request
560
+ )
561
+
562
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
563
+ logging.DEBUG
564
+ ): # pragma: NO COVER
565
+ request_url = "{host}{uri}".format(
566
+ host=self._host, uri=transcoded_request["uri"]
567
+ )
568
+ method = transcoded_request["method"]
569
+ try:
570
+ request_payload = json_format.MessageToJson(request)
571
+ except:
572
+ request_payload = None
573
+ http_request = {
574
+ "payload": request_payload,
575
+ "requestMethod": method,
576
+ "requestUrl": request_url,
577
+ "headers": dict(metadata),
578
+ }
579
+ _LOGGER.debug(
580
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.CreateTunedModel",
581
+ extra={
582
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
583
+ "rpcName": "CreateTunedModel",
584
+ "httpRequest": http_request,
585
+ "metadata": http_request["headers"],
586
+ },
587
+ )
588
+
589
+ # Send the request
590
+ response = ModelServiceRestTransport._CreateTunedModel._get_response(
591
+ self._host,
592
+ metadata,
593
+ query_params,
594
+ self._session,
595
+ timeout,
596
+ transcoded_request,
597
+ body,
598
+ )
599
+
600
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
601
+ # subclass.
602
+ if response.status_code >= 400:
603
+ raise core_exceptions.from_http_response(response)
604
+
605
+ # Return the response
606
+ resp = operations_pb2.Operation()
607
+ json_format.Parse(response.content, resp, ignore_unknown_fields=True)
608
+
609
+ resp = self._interceptor.post_create_tuned_model(resp)
610
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
611
+ logging.DEBUG
612
+ ): # pragma: NO COVER
613
+ try:
614
+ response_payload = json_format.MessageToJson(resp)
615
+ except:
616
+ response_payload = None
617
+ http_response = {
618
+ "payload": response_payload,
619
+ "headers": dict(response.headers),
620
+ "status": response.status_code,
621
+ }
622
+ _LOGGER.debug(
623
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceClient.create_tuned_model",
624
+ extra={
625
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
626
+ "rpcName": "CreateTunedModel",
627
+ "metadata": http_response["headers"],
628
+ "httpResponse": http_response,
629
+ },
630
+ )
631
+ return resp
632
+
633
+ class _DeleteTunedModel(
634
+ _BaseModelServiceRestTransport._BaseDeleteTunedModel, ModelServiceRestStub
635
+ ):
636
+ def __hash__(self):
637
+ return hash("ModelServiceRestTransport.DeleteTunedModel")
638
+
639
+ @staticmethod
640
+ def _get_response(
641
+ host,
642
+ metadata,
643
+ query_params,
644
+ session,
645
+ timeout,
646
+ transcoded_request,
647
+ body=None,
648
+ ):
649
+ uri = transcoded_request["uri"]
650
+ method = transcoded_request["method"]
651
+ headers = dict(metadata)
652
+ headers["Content-Type"] = "application/json"
653
+ response = getattr(session, method)(
654
+ "{host}{uri}".format(host=host, uri=uri),
655
+ timeout=timeout,
656
+ headers=headers,
657
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
658
+ )
659
+ return response
660
+
661
+ def __call__(
662
+ self,
663
+ request: model_service.DeleteTunedModelRequest,
664
+ *,
665
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
666
+ timeout: Optional[float] = None,
667
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
668
+ ):
669
+ r"""Call the delete tuned model method over HTTP.
670
+
671
+ Args:
672
+ request (~.model_service.DeleteTunedModelRequest):
673
+ The request object. Request to delete a TunedModel.
674
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
675
+ should be retried.
676
+ timeout (float): The timeout for this request.
677
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
678
+ sent along with the request as metadata. Normally, each value must be of type `str`,
679
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
680
+ be of type `bytes`.
681
+ """
682
+
683
+ http_options = (
684
+ _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_http_options()
685
+ )
686
+
687
+ request, metadata = self._interceptor.pre_delete_tuned_model(
688
+ request, metadata
689
+ )
690
+ transcoded_request = _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_transcoded_request(
691
+ http_options, request
692
+ )
693
+
694
+ # Jsonify the query params
695
+ query_params = _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_query_params_json(
696
+ transcoded_request
697
+ )
698
+
699
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
700
+ logging.DEBUG
701
+ ): # pragma: NO COVER
702
+ request_url = "{host}{uri}".format(
703
+ host=self._host, uri=transcoded_request["uri"]
704
+ )
705
+ method = transcoded_request["method"]
706
+ try:
707
+ request_payload = json_format.MessageToJson(request)
708
+ except:
709
+ request_payload = None
710
+ http_request = {
711
+ "payload": request_payload,
712
+ "requestMethod": method,
713
+ "requestUrl": request_url,
714
+ "headers": dict(metadata),
715
+ }
716
+ _LOGGER.debug(
717
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.DeleteTunedModel",
718
+ extra={
719
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
720
+ "rpcName": "DeleteTunedModel",
721
+ "httpRequest": http_request,
722
+ "metadata": http_request["headers"],
723
+ },
724
+ )
725
+
726
+ # Send the request
727
+ response = ModelServiceRestTransport._DeleteTunedModel._get_response(
728
+ self._host,
729
+ metadata,
730
+ query_params,
731
+ self._session,
732
+ timeout,
733
+ transcoded_request,
734
+ )
735
+
736
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
737
+ # subclass.
738
+ if response.status_code >= 400:
739
+ raise core_exceptions.from_http_response(response)
740
+
741
+ class _GetModel(_BaseModelServiceRestTransport._BaseGetModel, ModelServiceRestStub):
742
+ def __hash__(self):
743
+ return hash("ModelServiceRestTransport.GetModel")
744
+
745
+ @staticmethod
746
+ def _get_response(
747
+ host,
748
+ metadata,
749
+ query_params,
750
+ session,
751
+ timeout,
752
+ transcoded_request,
753
+ body=None,
754
+ ):
755
+ uri = transcoded_request["uri"]
756
+ method = transcoded_request["method"]
757
+ headers = dict(metadata)
758
+ headers["Content-Type"] = "application/json"
759
+ response = getattr(session, method)(
760
+ "{host}{uri}".format(host=host, uri=uri),
761
+ timeout=timeout,
762
+ headers=headers,
763
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
764
+ )
765
+ return response
766
+
767
+ def __call__(
768
+ self,
769
+ request: model_service.GetModelRequest,
770
+ *,
771
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
772
+ timeout: Optional[float] = None,
773
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
774
+ ) -> model.Model:
775
+ r"""Call the get model method over HTTP.
776
+
777
+ Args:
778
+ request (~.model_service.GetModelRequest):
779
+ The request object. Request for getting information about
780
+ a specific Model.
781
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
782
+ should be retried.
783
+ timeout (float): The timeout for this request.
784
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
785
+ sent along with the request as metadata. Normally, each value must be of type `str`,
786
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
787
+ be of type `bytes`.
788
+
789
+ Returns:
790
+ ~.model.Model:
791
+ Information about a Generative
792
+ Language Model.
793
+
794
+ """
795
+
796
+ http_options = (
797
+ _BaseModelServiceRestTransport._BaseGetModel._get_http_options()
798
+ )
799
+
800
+ request, metadata = self._interceptor.pre_get_model(request, metadata)
801
+ transcoded_request = (
802
+ _BaseModelServiceRestTransport._BaseGetModel._get_transcoded_request(
803
+ http_options, request
804
+ )
805
+ )
806
+
807
+ # Jsonify the query params
808
+ query_params = (
809
+ _BaseModelServiceRestTransport._BaseGetModel._get_query_params_json(
810
+ transcoded_request
811
+ )
812
+ )
813
+
814
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
815
+ logging.DEBUG
816
+ ): # pragma: NO COVER
817
+ request_url = "{host}{uri}".format(
818
+ host=self._host, uri=transcoded_request["uri"]
819
+ )
820
+ method = transcoded_request["method"]
821
+ try:
822
+ request_payload = type(request).to_json(request)
823
+ except:
824
+ request_payload = None
825
+ http_request = {
826
+ "payload": request_payload,
827
+ "requestMethod": method,
828
+ "requestUrl": request_url,
829
+ "headers": dict(metadata),
830
+ }
831
+ _LOGGER.debug(
832
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.GetModel",
833
+ extra={
834
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
835
+ "rpcName": "GetModel",
836
+ "httpRequest": http_request,
837
+ "metadata": http_request["headers"],
838
+ },
839
+ )
840
+
841
+ # Send the request
842
+ response = ModelServiceRestTransport._GetModel._get_response(
843
+ self._host,
844
+ metadata,
845
+ query_params,
846
+ self._session,
847
+ timeout,
848
+ transcoded_request,
849
+ )
850
+
851
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
852
+ # subclass.
853
+ if response.status_code >= 400:
854
+ raise core_exceptions.from_http_response(response)
855
+
856
+ # Return the response
857
+ resp = model.Model()
858
+ pb_resp = model.Model.pb(resp)
859
+
860
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
861
+
862
+ resp = self._interceptor.post_get_model(resp)
863
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
864
+ logging.DEBUG
865
+ ): # pragma: NO COVER
866
+ try:
867
+ response_payload = model.Model.to_json(response)
868
+ except:
869
+ response_payload = None
870
+ http_response = {
871
+ "payload": response_payload,
872
+ "headers": dict(response.headers),
873
+ "status": response.status_code,
874
+ }
875
+ _LOGGER.debug(
876
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceClient.get_model",
877
+ extra={
878
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
879
+ "rpcName": "GetModel",
880
+ "metadata": http_response["headers"],
881
+ "httpResponse": http_response,
882
+ },
883
+ )
884
+ return resp
885
+
886
+ class _GetTunedModel(
887
+ _BaseModelServiceRestTransport._BaseGetTunedModel, ModelServiceRestStub
888
+ ):
889
+ def __hash__(self):
890
+ return hash("ModelServiceRestTransport.GetTunedModel")
891
+
892
+ @staticmethod
893
+ def _get_response(
894
+ host,
895
+ metadata,
896
+ query_params,
897
+ session,
898
+ timeout,
899
+ transcoded_request,
900
+ body=None,
901
+ ):
902
+ uri = transcoded_request["uri"]
903
+ method = transcoded_request["method"]
904
+ headers = dict(metadata)
905
+ headers["Content-Type"] = "application/json"
906
+ response = getattr(session, method)(
907
+ "{host}{uri}".format(host=host, uri=uri),
908
+ timeout=timeout,
909
+ headers=headers,
910
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
911
+ )
912
+ return response
913
+
914
+ def __call__(
915
+ self,
916
+ request: model_service.GetTunedModelRequest,
917
+ *,
918
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
919
+ timeout: Optional[float] = None,
920
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
921
+ ) -> tuned_model.TunedModel:
922
+ r"""Call the get tuned model method over HTTP.
923
+
924
+ Args:
925
+ request (~.model_service.GetTunedModelRequest):
926
+ The request object. Request for getting information about
927
+ a specific Model.
928
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
929
+ should be retried.
930
+ timeout (float): The timeout for this request.
931
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
932
+ sent along with the request as metadata. Normally, each value must be of type `str`,
933
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
934
+ be of type `bytes`.
935
+
936
+ Returns:
937
+ ~.tuned_model.TunedModel:
938
+ A fine-tuned model created using
939
+ ModelService.CreateTunedModel.
940
+
941
+ """
942
+
943
+ http_options = (
944
+ _BaseModelServiceRestTransport._BaseGetTunedModel._get_http_options()
945
+ )
946
+
947
+ request, metadata = self._interceptor.pre_get_tuned_model(request, metadata)
948
+ transcoded_request = _BaseModelServiceRestTransport._BaseGetTunedModel._get_transcoded_request(
949
+ http_options, request
950
+ )
951
+
952
+ # Jsonify the query params
953
+ query_params = _BaseModelServiceRestTransport._BaseGetTunedModel._get_query_params_json(
954
+ transcoded_request
955
+ )
956
+
957
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
958
+ logging.DEBUG
959
+ ): # pragma: NO COVER
960
+ request_url = "{host}{uri}".format(
961
+ host=self._host, uri=transcoded_request["uri"]
962
+ )
963
+ method = transcoded_request["method"]
964
+ try:
965
+ request_payload = type(request).to_json(request)
966
+ except:
967
+ request_payload = None
968
+ http_request = {
969
+ "payload": request_payload,
970
+ "requestMethod": method,
971
+ "requestUrl": request_url,
972
+ "headers": dict(metadata),
973
+ }
974
+ _LOGGER.debug(
975
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.GetTunedModel",
976
+ extra={
977
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
978
+ "rpcName": "GetTunedModel",
979
+ "httpRequest": http_request,
980
+ "metadata": http_request["headers"],
981
+ },
982
+ )
983
+
984
+ # Send the request
985
+ response = ModelServiceRestTransport._GetTunedModel._get_response(
986
+ self._host,
987
+ metadata,
988
+ query_params,
989
+ self._session,
990
+ timeout,
991
+ transcoded_request,
992
+ )
993
+
994
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
995
+ # subclass.
996
+ if response.status_code >= 400:
997
+ raise core_exceptions.from_http_response(response)
998
+
999
+ # Return the response
1000
+ resp = tuned_model.TunedModel()
1001
+ pb_resp = tuned_model.TunedModel.pb(resp)
1002
+
1003
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1004
+
1005
+ resp = self._interceptor.post_get_tuned_model(resp)
1006
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1007
+ logging.DEBUG
1008
+ ): # pragma: NO COVER
1009
+ try:
1010
+ response_payload = tuned_model.TunedModel.to_json(response)
1011
+ except:
1012
+ response_payload = None
1013
+ http_response = {
1014
+ "payload": response_payload,
1015
+ "headers": dict(response.headers),
1016
+ "status": response.status_code,
1017
+ }
1018
+ _LOGGER.debug(
1019
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceClient.get_tuned_model",
1020
+ extra={
1021
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1022
+ "rpcName": "GetTunedModel",
1023
+ "metadata": http_response["headers"],
1024
+ "httpResponse": http_response,
1025
+ },
1026
+ )
1027
+ return resp
1028
+
1029
+ class _ListModels(
1030
+ _BaseModelServiceRestTransport._BaseListModels, ModelServiceRestStub
1031
+ ):
1032
+ def __hash__(self):
1033
+ return hash("ModelServiceRestTransport.ListModels")
1034
+
1035
+ @staticmethod
1036
+ def _get_response(
1037
+ host,
1038
+ metadata,
1039
+ query_params,
1040
+ session,
1041
+ timeout,
1042
+ transcoded_request,
1043
+ body=None,
1044
+ ):
1045
+ uri = transcoded_request["uri"]
1046
+ method = transcoded_request["method"]
1047
+ headers = dict(metadata)
1048
+ headers["Content-Type"] = "application/json"
1049
+ response = getattr(session, method)(
1050
+ "{host}{uri}".format(host=host, uri=uri),
1051
+ timeout=timeout,
1052
+ headers=headers,
1053
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1054
+ )
1055
+ return response
1056
+
1057
+ def __call__(
1058
+ self,
1059
+ request: model_service.ListModelsRequest,
1060
+ *,
1061
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1062
+ timeout: Optional[float] = None,
1063
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1064
+ ) -> model_service.ListModelsResponse:
1065
+ r"""Call the list models method over HTTP.
1066
+
1067
+ Args:
1068
+ request (~.model_service.ListModelsRequest):
1069
+ The request object. Request for listing all Models.
1070
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1071
+ should be retried.
1072
+ timeout (float): The timeout for this request.
1073
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1074
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1075
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1076
+ be of type `bytes`.
1077
+
1078
+ Returns:
1079
+ ~.model_service.ListModelsResponse:
1080
+ Response from ``ListModel`` containing a paginated list
1081
+ of Models.
1082
+
1083
+ """
1084
+
1085
+ http_options = (
1086
+ _BaseModelServiceRestTransport._BaseListModels._get_http_options()
1087
+ )
1088
+
1089
+ request, metadata = self._interceptor.pre_list_models(request, metadata)
1090
+ transcoded_request = (
1091
+ _BaseModelServiceRestTransport._BaseListModels._get_transcoded_request(
1092
+ http_options, request
1093
+ )
1094
+ )
1095
+
1096
+ # Jsonify the query params
1097
+ query_params = (
1098
+ _BaseModelServiceRestTransport._BaseListModels._get_query_params_json(
1099
+ transcoded_request
1100
+ )
1101
+ )
1102
+
1103
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1104
+ logging.DEBUG
1105
+ ): # pragma: NO COVER
1106
+ request_url = "{host}{uri}".format(
1107
+ host=self._host, uri=transcoded_request["uri"]
1108
+ )
1109
+ method = transcoded_request["method"]
1110
+ try:
1111
+ request_payload = type(request).to_json(request)
1112
+ except:
1113
+ request_payload = None
1114
+ http_request = {
1115
+ "payload": request_payload,
1116
+ "requestMethod": method,
1117
+ "requestUrl": request_url,
1118
+ "headers": dict(metadata),
1119
+ }
1120
+ _LOGGER.debug(
1121
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.ListModels",
1122
+ extra={
1123
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1124
+ "rpcName": "ListModels",
1125
+ "httpRequest": http_request,
1126
+ "metadata": http_request["headers"],
1127
+ },
1128
+ )
1129
+
1130
+ # Send the request
1131
+ response = ModelServiceRestTransport._ListModels._get_response(
1132
+ self._host,
1133
+ metadata,
1134
+ query_params,
1135
+ self._session,
1136
+ timeout,
1137
+ transcoded_request,
1138
+ )
1139
+
1140
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1141
+ # subclass.
1142
+ if response.status_code >= 400:
1143
+ raise core_exceptions.from_http_response(response)
1144
+
1145
+ # Return the response
1146
+ resp = model_service.ListModelsResponse()
1147
+ pb_resp = model_service.ListModelsResponse.pb(resp)
1148
+
1149
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1150
+
1151
+ resp = self._interceptor.post_list_models(resp)
1152
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1153
+ logging.DEBUG
1154
+ ): # pragma: NO COVER
1155
+ try:
1156
+ response_payload = model_service.ListModelsResponse.to_json(
1157
+ response
1158
+ )
1159
+ except:
1160
+ response_payload = None
1161
+ http_response = {
1162
+ "payload": response_payload,
1163
+ "headers": dict(response.headers),
1164
+ "status": response.status_code,
1165
+ }
1166
+ _LOGGER.debug(
1167
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceClient.list_models",
1168
+ extra={
1169
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1170
+ "rpcName": "ListModels",
1171
+ "metadata": http_response["headers"],
1172
+ "httpResponse": http_response,
1173
+ },
1174
+ )
1175
+ return resp
1176
+
1177
+ class _ListTunedModels(
1178
+ _BaseModelServiceRestTransport._BaseListTunedModels, ModelServiceRestStub
1179
+ ):
1180
+ def __hash__(self):
1181
+ return hash("ModelServiceRestTransport.ListTunedModels")
1182
+
1183
+ @staticmethod
1184
+ def _get_response(
1185
+ host,
1186
+ metadata,
1187
+ query_params,
1188
+ session,
1189
+ timeout,
1190
+ transcoded_request,
1191
+ body=None,
1192
+ ):
1193
+ uri = transcoded_request["uri"]
1194
+ method = transcoded_request["method"]
1195
+ headers = dict(metadata)
1196
+ headers["Content-Type"] = "application/json"
1197
+ response = getattr(session, method)(
1198
+ "{host}{uri}".format(host=host, uri=uri),
1199
+ timeout=timeout,
1200
+ headers=headers,
1201
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1202
+ )
1203
+ return response
1204
+
1205
+ def __call__(
1206
+ self,
1207
+ request: model_service.ListTunedModelsRequest,
1208
+ *,
1209
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1210
+ timeout: Optional[float] = None,
1211
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1212
+ ) -> model_service.ListTunedModelsResponse:
1213
+ r"""Call the list tuned models method over HTTP.
1214
+
1215
+ Args:
1216
+ request (~.model_service.ListTunedModelsRequest):
1217
+ The request object. Request for listing TunedModels.
1218
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1219
+ should be retried.
1220
+ timeout (float): The timeout for this request.
1221
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1222
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1223
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1224
+ be of type `bytes`.
1225
+
1226
+ Returns:
1227
+ ~.model_service.ListTunedModelsResponse:
1228
+ Response from ``ListTunedModels`` containing a paginated
1229
+ list of Models.
1230
+
1231
+ """
1232
+
1233
+ http_options = (
1234
+ _BaseModelServiceRestTransport._BaseListTunedModels._get_http_options()
1235
+ )
1236
+
1237
+ request, metadata = self._interceptor.pre_list_tuned_models(
1238
+ request, metadata
1239
+ )
1240
+ transcoded_request = _BaseModelServiceRestTransport._BaseListTunedModels._get_transcoded_request(
1241
+ http_options, request
1242
+ )
1243
+
1244
+ # Jsonify the query params
1245
+ query_params = _BaseModelServiceRestTransport._BaseListTunedModels._get_query_params_json(
1246
+ transcoded_request
1247
+ )
1248
+
1249
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1250
+ logging.DEBUG
1251
+ ): # pragma: NO COVER
1252
+ request_url = "{host}{uri}".format(
1253
+ host=self._host, uri=transcoded_request["uri"]
1254
+ )
1255
+ method = transcoded_request["method"]
1256
+ try:
1257
+ request_payload = type(request).to_json(request)
1258
+ except:
1259
+ request_payload = None
1260
+ http_request = {
1261
+ "payload": request_payload,
1262
+ "requestMethod": method,
1263
+ "requestUrl": request_url,
1264
+ "headers": dict(metadata),
1265
+ }
1266
+ _LOGGER.debug(
1267
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.ListTunedModels",
1268
+ extra={
1269
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1270
+ "rpcName": "ListTunedModels",
1271
+ "httpRequest": http_request,
1272
+ "metadata": http_request["headers"],
1273
+ },
1274
+ )
1275
+
1276
+ # Send the request
1277
+ response = ModelServiceRestTransport._ListTunedModels._get_response(
1278
+ self._host,
1279
+ metadata,
1280
+ query_params,
1281
+ self._session,
1282
+ timeout,
1283
+ transcoded_request,
1284
+ )
1285
+
1286
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1287
+ # subclass.
1288
+ if response.status_code >= 400:
1289
+ raise core_exceptions.from_http_response(response)
1290
+
1291
+ # Return the response
1292
+ resp = model_service.ListTunedModelsResponse()
1293
+ pb_resp = model_service.ListTunedModelsResponse.pb(resp)
1294
+
1295
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1296
+
1297
+ resp = self._interceptor.post_list_tuned_models(resp)
1298
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1299
+ logging.DEBUG
1300
+ ): # pragma: NO COVER
1301
+ try:
1302
+ response_payload = model_service.ListTunedModelsResponse.to_json(
1303
+ response
1304
+ )
1305
+ except:
1306
+ response_payload = None
1307
+ http_response = {
1308
+ "payload": response_payload,
1309
+ "headers": dict(response.headers),
1310
+ "status": response.status_code,
1311
+ }
1312
+ _LOGGER.debug(
1313
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceClient.list_tuned_models",
1314
+ extra={
1315
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1316
+ "rpcName": "ListTunedModels",
1317
+ "metadata": http_response["headers"],
1318
+ "httpResponse": http_response,
1319
+ },
1320
+ )
1321
+ return resp
1322
+
1323
+ class _UpdateTunedModel(
1324
+ _BaseModelServiceRestTransport._BaseUpdateTunedModel, ModelServiceRestStub
1325
+ ):
1326
+ def __hash__(self):
1327
+ return hash("ModelServiceRestTransport.UpdateTunedModel")
1328
+
1329
+ @staticmethod
1330
+ def _get_response(
1331
+ host,
1332
+ metadata,
1333
+ query_params,
1334
+ session,
1335
+ timeout,
1336
+ transcoded_request,
1337
+ body=None,
1338
+ ):
1339
+ uri = transcoded_request["uri"]
1340
+ method = transcoded_request["method"]
1341
+ headers = dict(metadata)
1342
+ headers["Content-Type"] = "application/json"
1343
+ response = getattr(session, method)(
1344
+ "{host}{uri}".format(host=host, uri=uri),
1345
+ timeout=timeout,
1346
+ headers=headers,
1347
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1348
+ data=body,
1349
+ )
1350
+ return response
1351
+
1352
+ def __call__(
1353
+ self,
1354
+ request: model_service.UpdateTunedModelRequest,
1355
+ *,
1356
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1357
+ timeout: Optional[float] = None,
1358
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1359
+ ) -> gag_tuned_model.TunedModel:
1360
+ r"""Call the update tuned model method over HTTP.
1361
+
1362
+ Args:
1363
+ request (~.model_service.UpdateTunedModelRequest):
1364
+ The request object. Request to update a TunedModel.
1365
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1366
+ should be retried.
1367
+ timeout (float): The timeout for this request.
1368
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1369
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1370
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1371
+ be of type `bytes`.
1372
+
1373
+ Returns:
1374
+ ~.gag_tuned_model.TunedModel:
1375
+ A fine-tuned model created using
1376
+ ModelService.CreateTunedModel.
1377
+
1378
+ """
1379
+
1380
+ http_options = (
1381
+ _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_http_options()
1382
+ )
1383
+
1384
+ request, metadata = self._interceptor.pre_update_tuned_model(
1385
+ request, metadata
1386
+ )
1387
+ transcoded_request = _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_transcoded_request(
1388
+ http_options, request
1389
+ )
1390
+
1391
+ body = _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_request_body_json(
1392
+ transcoded_request
1393
+ )
1394
+
1395
+ # Jsonify the query params
1396
+ query_params = _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_query_params_json(
1397
+ transcoded_request
1398
+ )
1399
+
1400
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1401
+ logging.DEBUG
1402
+ ): # pragma: NO COVER
1403
+ request_url = "{host}{uri}".format(
1404
+ host=self._host, uri=transcoded_request["uri"]
1405
+ )
1406
+ method = transcoded_request["method"]
1407
+ try:
1408
+ request_payload = type(request).to_json(request)
1409
+ except:
1410
+ request_payload = None
1411
+ http_request = {
1412
+ "payload": request_payload,
1413
+ "requestMethod": method,
1414
+ "requestUrl": request_url,
1415
+ "headers": dict(metadata),
1416
+ }
1417
+ _LOGGER.debug(
1418
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.UpdateTunedModel",
1419
+ extra={
1420
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1421
+ "rpcName": "UpdateTunedModel",
1422
+ "httpRequest": http_request,
1423
+ "metadata": http_request["headers"],
1424
+ },
1425
+ )
1426
+
1427
+ # Send the request
1428
+ response = ModelServiceRestTransport._UpdateTunedModel._get_response(
1429
+ self._host,
1430
+ metadata,
1431
+ query_params,
1432
+ self._session,
1433
+ timeout,
1434
+ transcoded_request,
1435
+ body,
1436
+ )
1437
+
1438
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1439
+ # subclass.
1440
+ if response.status_code >= 400:
1441
+ raise core_exceptions.from_http_response(response)
1442
+
1443
+ # Return the response
1444
+ resp = gag_tuned_model.TunedModel()
1445
+ pb_resp = gag_tuned_model.TunedModel.pb(resp)
1446
+
1447
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
1448
+
1449
+ resp = self._interceptor.post_update_tuned_model(resp)
1450
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1451
+ logging.DEBUG
1452
+ ): # pragma: NO COVER
1453
+ try:
1454
+ response_payload = gag_tuned_model.TunedModel.to_json(response)
1455
+ except:
1456
+ response_payload = None
1457
+ http_response = {
1458
+ "payload": response_payload,
1459
+ "headers": dict(response.headers),
1460
+ "status": response.status_code,
1461
+ }
1462
+ _LOGGER.debug(
1463
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceClient.update_tuned_model",
1464
+ extra={
1465
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1466
+ "rpcName": "UpdateTunedModel",
1467
+ "metadata": http_response["headers"],
1468
+ "httpResponse": http_response,
1469
+ },
1470
+ )
1471
+ return resp
1472
+
1473
+ @property
1474
+ def create_tuned_model(
1475
+ self,
1476
+ ) -> Callable[[model_service.CreateTunedModelRequest], operations_pb2.Operation]:
1477
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1478
+ # In C++ this would require a dynamic_cast
1479
+ return self._CreateTunedModel(self._session, self._host, self._interceptor) # type: ignore
1480
+
1481
+ @property
1482
+ def delete_tuned_model(
1483
+ self,
1484
+ ) -> Callable[[model_service.DeleteTunedModelRequest], empty_pb2.Empty]:
1485
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1486
+ # In C++ this would require a dynamic_cast
1487
+ return self._DeleteTunedModel(self._session, self._host, self._interceptor) # type: ignore
1488
+
1489
+ @property
1490
+ def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
1491
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1492
+ # In C++ this would require a dynamic_cast
1493
+ return self._GetModel(self._session, self._host, self._interceptor) # type: ignore
1494
+
1495
+ @property
1496
+ def get_tuned_model(
1497
+ self,
1498
+ ) -> Callable[[model_service.GetTunedModelRequest], tuned_model.TunedModel]:
1499
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1500
+ # In C++ this would require a dynamic_cast
1501
+ return self._GetTunedModel(self._session, self._host, self._interceptor) # type: ignore
1502
+
1503
+ @property
1504
+ def list_models(
1505
+ self,
1506
+ ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
1507
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1508
+ # In C++ this would require a dynamic_cast
1509
+ return self._ListModels(self._session, self._host, self._interceptor) # type: ignore
1510
+
1511
+ @property
1512
+ def list_tuned_models(
1513
+ self,
1514
+ ) -> Callable[
1515
+ [model_service.ListTunedModelsRequest], model_service.ListTunedModelsResponse
1516
+ ]:
1517
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1518
+ # In C++ this would require a dynamic_cast
1519
+ return self._ListTunedModels(self._session, self._host, self._interceptor) # type: ignore
1520
+
1521
+ @property
1522
+ def update_tuned_model(
1523
+ self,
1524
+ ) -> Callable[[model_service.UpdateTunedModelRequest], gag_tuned_model.TunedModel]:
1525
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
1526
+ # In C++ this would require a dynamic_cast
1527
+ return self._UpdateTunedModel(self._session, self._host, self._interceptor) # type: ignore
1528
+
1529
+ @property
1530
+ def get_operation(self):
1531
+ return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore
1532
+
1533
+ class _GetOperation(
1534
+ _BaseModelServiceRestTransport._BaseGetOperation, ModelServiceRestStub
1535
+ ):
1536
+ def __hash__(self):
1537
+ return hash("ModelServiceRestTransport.GetOperation")
1538
+
1539
+ @staticmethod
1540
+ def _get_response(
1541
+ host,
1542
+ metadata,
1543
+ query_params,
1544
+ session,
1545
+ timeout,
1546
+ transcoded_request,
1547
+ body=None,
1548
+ ):
1549
+ uri = transcoded_request["uri"]
1550
+ method = transcoded_request["method"]
1551
+ headers = dict(metadata)
1552
+ headers["Content-Type"] = "application/json"
1553
+ response = getattr(session, method)(
1554
+ "{host}{uri}".format(host=host, uri=uri),
1555
+ timeout=timeout,
1556
+ headers=headers,
1557
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1558
+ )
1559
+ return response
1560
+
1561
+ def __call__(
1562
+ self,
1563
+ request: operations_pb2.GetOperationRequest,
1564
+ *,
1565
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1566
+ timeout: Optional[float] = None,
1567
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1568
+ ) -> operations_pb2.Operation:
1569
+ r"""Call the get operation method over HTTP.
1570
+
1571
+ Args:
1572
+ request (operations_pb2.GetOperationRequest):
1573
+ The request object for GetOperation method.
1574
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1575
+ should be retried.
1576
+ timeout (float): The timeout for this request.
1577
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1578
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1579
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1580
+ be of type `bytes`.
1581
+
1582
+ Returns:
1583
+ operations_pb2.Operation: Response from GetOperation method.
1584
+ """
1585
+
1586
+ http_options = (
1587
+ _BaseModelServiceRestTransport._BaseGetOperation._get_http_options()
1588
+ )
1589
+
1590
+ request, metadata = self._interceptor.pre_get_operation(request, metadata)
1591
+ transcoded_request = _BaseModelServiceRestTransport._BaseGetOperation._get_transcoded_request(
1592
+ http_options, request
1593
+ )
1594
+
1595
+ # Jsonify the query params
1596
+ query_params = (
1597
+ _BaseModelServiceRestTransport._BaseGetOperation._get_query_params_json(
1598
+ transcoded_request
1599
+ )
1600
+ )
1601
+
1602
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1603
+ logging.DEBUG
1604
+ ): # pragma: NO COVER
1605
+ request_url = "{host}{uri}".format(
1606
+ host=self._host, uri=transcoded_request["uri"]
1607
+ )
1608
+ method = transcoded_request["method"]
1609
+ try:
1610
+ request_payload = json_format.MessageToJson(request)
1611
+ except:
1612
+ request_payload = None
1613
+ http_request = {
1614
+ "payload": request_payload,
1615
+ "requestMethod": method,
1616
+ "requestUrl": request_url,
1617
+ "headers": dict(metadata),
1618
+ }
1619
+ _LOGGER.debug(
1620
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.GetOperation",
1621
+ extra={
1622
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1623
+ "rpcName": "GetOperation",
1624
+ "httpRequest": http_request,
1625
+ "metadata": http_request["headers"],
1626
+ },
1627
+ )
1628
+
1629
+ # Send the request
1630
+ response = ModelServiceRestTransport._GetOperation._get_response(
1631
+ self._host,
1632
+ metadata,
1633
+ query_params,
1634
+ self._session,
1635
+ timeout,
1636
+ transcoded_request,
1637
+ )
1638
+
1639
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1640
+ # subclass.
1641
+ if response.status_code >= 400:
1642
+ raise core_exceptions.from_http_response(response)
1643
+
1644
+ content = response.content.decode("utf-8")
1645
+ resp = operations_pb2.Operation()
1646
+ resp = json_format.Parse(content, resp)
1647
+ resp = self._interceptor.post_get_operation(resp)
1648
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1649
+ logging.DEBUG
1650
+ ): # pragma: NO COVER
1651
+ try:
1652
+ response_payload = json_format.MessageToJson(resp)
1653
+ except:
1654
+ response_payload = None
1655
+ http_response = {
1656
+ "payload": response_payload,
1657
+ "headers": dict(response.headers),
1658
+ "status": response.status_code,
1659
+ }
1660
+ _LOGGER.debug(
1661
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceAsyncClient.GetOperation",
1662
+ extra={
1663
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1664
+ "rpcName": "GetOperation",
1665
+ "httpResponse": http_response,
1666
+ "metadata": http_response["headers"],
1667
+ },
1668
+ )
1669
+ return resp
1670
+
1671
+ @property
1672
+ def list_operations(self):
1673
+ return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore
1674
+
1675
+ class _ListOperations(
1676
+ _BaseModelServiceRestTransport._BaseListOperations, ModelServiceRestStub
1677
+ ):
1678
+ def __hash__(self):
1679
+ return hash("ModelServiceRestTransport.ListOperations")
1680
+
1681
+ @staticmethod
1682
+ def _get_response(
1683
+ host,
1684
+ metadata,
1685
+ query_params,
1686
+ session,
1687
+ timeout,
1688
+ transcoded_request,
1689
+ body=None,
1690
+ ):
1691
+ uri = transcoded_request["uri"]
1692
+ method = transcoded_request["method"]
1693
+ headers = dict(metadata)
1694
+ headers["Content-Type"] = "application/json"
1695
+ response = getattr(session, method)(
1696
+ "{host}{uri}".format(host=host, uri=uri),
1697
+ timeout=timeout,
1698
+ headers=headers,
1699
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
1700
+ )
1701
+ return response
1702
+
1703
+ def __call__(
1704
+ self,
1705
+ request: operations_pb2.ListOperationsRequest,
1706
+ *,
1707
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1708
+ timeout: Optional[float] = None,
1709
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1710
+ ) -> operations_pb2.ListOperationsResponse:
1711
+ r"""Call the list operations method over HTTP.
1712
+
1713
+ Args:
1714
+ request (operations_pb2.ListOperationsRequest):
1715
+ The request object for ListOperations method.
1716
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1717
+ should be retried.
1718
+ timeout (float): The timeout for this request.
1719
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1720
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1721
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1722
+ be of type `bytes`.
1723
+
1724
+ Returns:
1725
+ operations_pb2.ListOperationsResponse: Response from ListOperations method.
1726
+ """
1727
+
1728
+ http_options = (
1729
+ _BaseModelServiceRestTransport._BaseListOperations._get_http_options()
1730
+ )
1731
+
1732
+ request, metadata = self._interceptor.pre_list_operations(request, metadata)
1733
+ transcoded_request = _BaseModelServiceRestTransport._BaseListOperations._get_transcoded_request(
1734
+ http_options, request
1735
+ )
1736
+
1737
+ # Jsonify the query params
1738
+ query_params = _BaseModelServiceRestTransport._BaseListOperations._get_query_params_json(
1739
+ transcoded_request
1740
+ )
1741
+
1742
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1743
+ logging.DEBUG
1744
+ ): # pragma: NO COVER
1745
+ request_url = "{host}{uri}".format(
1746
+ host=self._host, uri=transcoded_request["uri"]
1747
+ )
1748
+ method = transcoded_request["method"]
1749
+ try:
1750
+ request_payload = json_format.MessageToJson(request)
1751
+ except:
1752
+ request_payload = None
1753
+ http_request = {
1754
+ "payload": request_payload,
1755
+ "requestMethod": method,
1756
+ "requestUrl": request_url,
1757
+ "headers": dict(metadata),
1758
+ }
1759
+ _LOGGER.debug(
1760
+ f"Sending request for google.ai.generativelanguage_v1alpha.ModelServiceClient.ListOperations",
1761
+ extra={
1762
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1763
+ "rpcName": "ListOperations",
1764
+ "httpRequest": http_request,
1765
+ "metadata": http_request["headers"],
1766
+ },
1767
+ )
1768
+
1769
+ # Send the request
1770
+ response = ModelServiceRestTransport._ListOperations._get_response(
1771
+ self._host,
1772
+ metadata,
1773
+ query_params,
1774
+ self._session,
1775
+ timeout,
1776
+ transcoded_request,
1777
+ )
1778
+
1779
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
1780
+ # subclass.
1781
+ if response.status_code >= 400:
1782
+ raise core_exceptions.from_http_response(response)
1783
+
1784
+ content = response.content.decode("utf-8")
1785
+ resp = operations_pb2.ListOperationsResponse()
1786
+ resp = json_format.Parse(content, resp)
1787
+ resp = self._interceptor.post_list_operations(resp)
1788
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
1789
+ logging.DEBUG
1790
+ ): # pragma: NO COVER
1791
+ try:
1792
+ response_payload = json_format.MessageToJson(resp)
1793
+ except:
1794
+ response_payload = None
1795
+ http_response = {
1796
+ "payload": response_payload,
1797
+ "headers": dict(response.headers),
1798
+ "status": response.status_code,
1799
+ }
1800
+ _LOGGER.debug(
1801
+ "Received response for google.ai.generativelanguage_v1alpha.ModelServiceAsyncClient.ListOperations",
1802
+ extra={
1803
+ "serviceName": "google.ai.generativelanguage.v1alpha.ModelService",
1804
+ "rpcName": "ListOperations",
1805
+ "httpResponse": http_response,
1806
+ "metadata": http_response["headers"],
1807
+ },
1808
+ )
1809
+ return resp
1810
+
1811
+ @property
1812
+ def kind(self) -> str:
1813
+ return "rest"
1814
+
1815
+ def close(self):
1816
+ self._session.close()
1817
+
1818
+
1819
+ __all__ = ("ModelServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/model_service/transports/rest_base.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json # type: ignore
17
+ import re
18
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
19
+
20
+ from google.api_core import gapic_v1, path_template
21
+ from google.longrunning import operations_pb2 # type: ignore
22
+ from google.protobuf import empty_pb2 # type: ignore
23
+ from google.protobuf import json_format
24
+
25
+ from google.ai.generativelanguage_v1alpha.types import tuned_model as gag_tuned_model
26
+ from google.ai.generativelanguage_v1alpha.types import model, model_service
27
+ from google.ai.generativelanguage_v1alpha.types import tuned_model
28
+
29
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
30
+
31
+
32
+ class _BaseModelServiceRestTransport(ModelServiceTransport):
33
+ """Base REST backend transport for ModelService.
34
+
35
+ Note: This class is not meant to be used directly. Use its sync and
36
+ async sub-classes instead.
37
+
38
+ This class defines the same methods as the primary client, so the
39
+ primary client can load the underlying transport implementation
40
+ and call it.
41
+
42
+ It sends JSON representations of protocol buffers over HTTP/1.1
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ *,
48
+ host: str = "generativelanguage.googleapis.com",
49
+ credentials: Optional[Any] = None,
50
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
51
+ always_use_jwt_access: Optional[bool] = False,
52
+ url_scheme: str = "https",
53
+ api_audience: Optional[str] = None,
54
+ ) -> None:
55
+ """Instantiate the transport.
56
+ Args:
57
+ host (Optional[str]):
58
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
59
+ credentials (Optional[Any]): The
60
+ authorization credentials to attach to requests. These
61
+ credentials identify the application to the service; if none
62
+ are specified, the client will attempt to ascertain the
63
+ credentials from the environment.
64
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
65
+ The client info used to send a user-agent string along with
66
+ API requests. If ``None``, then default info will be used.
67
+ Generally, you only need to set this if you are developing
68
+ your own client library.
69
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
70
+ be used for service account credentials.
71
+ url_scheme: the protocol scheme for the API endpoint. Normally
72
+ "https", but for testing or local servers,
73
+ "http" can be specified.
74
+ """
75
+ # Run the base constructor
76
+ maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
77
+ if maybe_url_match is None:
78
+ raise ValueError(
79
+ f"Unexpected hostname structure: {host}"
80
+ ) # pragma: NO COVER
81
+
82
+ url_match_items = maybe_url_match.groupdict()
83
+
84
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
85
+
86
+ super().__init__(
87
+ host=host,
88
+ credentials=credentials,
89
+ client_info=client_info,
90
+ always_use_jwt_access=always_use_jwt_access,
91
+ api_audience=api_audience,
92
+ )
93
+
94
+ class _BaseCreateTunedModel:
95
+ def __hash__(self): # pragma: NO COVER
96
+ return NotImplementedError("__hash__ must be implemented.")
97
+
98
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
99
+
100
+ @classmethod
101
+ def _get_unset_required_fields(cls, message_dict):
102
+ return {
103
+ k: v
104
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
105
+ if k not in message_dict
106
+ }
107
+
108
+ @staticmethod
109
+ def _get_http_options():
110
+ http_options: List[Dict[str, str]] = [
111
+ {
112
+ "method": "post",
113
+ "uri": "/v1alpha/tunedModels",
114
+ "body": "tuned_model",
115
+ },
116
+ ]
117
+ return http_options
118
+
119
+ @staticmethod
120
+ def _get_transcoded_request(http_options, request):
121
+ pb_request = model_service.CreateTunedModelRequest.pb(request)
122
+ transcoded_request = path_template.transcode(http_options, pb_request)
123
+ return transcoded_request
124
+
125
+ @staticmethod
126
+ def _get_request_body_json(transcoded_request):
127
+ # Jsonify the request body
128
+
129
+ body = json_format.MessageToJson(
130
+ transcoded_request["body"], use_integers_for_enums=True
131
+ )
132
+ return body
133
+
134
+ @staticmethod
135
+ def _get_query_params_json(transcoded_request):
136
+ query_params = json.loads(
137
+ json_format.MessageToJson(
138
+ transcoded_request["query_params"],
139
+ use_integers_for_enums=True,
140
+ )
141
+ )
142
+ query_params.update(
143
+ _BaseModelServiceRestTransport._BaseCreateTunedModel._get_unset_required_fields(
144
+ query_params
145
+ )
146
+ )
147
+
148
+ query_params["$alt"] = "json;enum-encoding=int"
149
+ return query_params
150
+
151
+ class _BaseDeleteTunedModel:
152
+ def __hash__(self): # pragma: NO COVER
153
+ return NotImplementedError("__hash__ must be implemented.")
154
+
155
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
156
+
157
+ @classmethod
158
+ def _get_unset_required_fields(cls, message_dict):
159
+ return {
160
+ k: v
161
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
162
+ if k not in message_dict
163
+ }
164
+
165
+ @staticmethod
166
+ def _get_http_options():
167
+ http_options: List[Dict[str, str]] = [
168
+ {
169
+ "method": "delete",
170
+ "uri": "/v1alpha/{name=tunedModels/*}",
171
+ },
172
+ ]
173
+ return http_options
174
+
175
+ @staticmethod
176
+ def _get_transcoded_request(http_options, request):
177
+ pb_request = model_service.DeleteTunedModelRequest.pb(request)
178
+ transcoded_request = path_template.transcode(http_options, pb_request)
179
+ return transcoded_request
180
+
181
+ @staticmethod
182
+ def _get_query_params_json(transcoded_request):
183
+ query_params = json.loads(
184
+ json_format.MessageToJson(
185
+ transcoded_request["query_params"],
186
+ use_integers_for_enums=True,
187
+ )
188
+ )
189
+ query_params.update(
190
+ _BaseModelServiceRestTransport._BaseDeleteTunedModel._get_unset_required_fields(
191
+ query_params
192
+ )
193
+ )
194
+
195
+ query_params["$alt"] = "json;enum-encoding=int"
196
+ return query_params
197
+
198
+ class _BaseGetModel:
199
+ def __hash__(self): # pragma: NO COVER
200
+ return NotImplementedError("__hash__ must be implemented.")
201
+
202
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
203
+
204
+ @classmethod
205
+ def _get_unset_required_fields(cls, message_dict):
206
+ return {
207
+ k: v
208
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
209
+ if k not in message_dict
210
+ }
211
+
212
+ @staticmethod
213
+ def _get_http_options():
214
+ http_options: List[Dict[str, str]] = [
215
+ {
216
+ "method": "get",
217
+ "uri": "/v1alpha/{name=models/*}",
218
+ },
219
+ ]
220
+ return http_options
221
+
222
+ @staticmethod
223
+ def _get_transcoded_request(http_options, request):
224
+ pb_request = model_service.GetModelRequest.pb(request)
225
+ transcoded_request = path_template.transcode(http_options, pb_request)
226
+ return transcoded_request
227
+
228
+ @staticmethod
229
+ def _get_query_params_json(transcoded_request):
230
+ query_params = json.loads(
231
+ json_format.MessageToJson(
232
+ transcoded_request["query_params"],
233
+ use_integers_for_enums=True,
234
+ )
235
+ )
236
+ query_params.update(
237
+ _BaseModelServiceRestTransport._BaseGetModel._get_unset_required_fields(
238
+ query_params
239
+ )
240
+ )
241
+
242
+ query_params["$alt"] = "json;enum-encoding=int"
243
+ return query_params
244
+
245
+ class _BaseGetTunedModel:
246
+ def __hash__(self): # pragma: NO COVER
247
+ return NotImplementedError("__hash__ must be implemented.")
248
+
249
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
250
+
251
+ @classmethod
252
+ def _get_unset_required_fields(cls, message_dict):
253
+ return {
254
+ k: v
255
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
256
+ if k not in message_dict
257
+ }
258
+
259
+ @staticmethod
260
+ def _get_http_options():
261
+ http_options: List[Dict[str, str]] = [
262
+ {
263
+ "method": "get",
264
+ "uri": "/v1alpha/{name=tunedModels/*}",
265
+ },
266
+ ]
267
+ return http_options
268
+
269
+ @staticmethod
270
+ def _get_transcoded_request(http_options, request):
271
+ pb_request = model_service.GetTunedModelRequest.pb(request)
272
+ transcoded_request = path_template.transcode(http_options, pb_request)
273
+ return transcoded_request
274
+
275
+ @staticmethod
276
+ def _get_query_params_json(transcoded_request):
277
+ query_params = json.loads(
278
+ json_format.MessageToJson(
279
+ transcoded_request["query_params"],
280
+ use_integers_for_enums=True,
281
+ )
282
+ )
283
+ query_params.update(
284
+ _BaseModelServiceRestTransport._BaseGetTunedModel._get_unset_required_fields(
285
+ query_params
286
+ )
287
+ )
288
+
289
+ query_params["$alt"] = "json;enum-encoding=int"
290
+ return query_params
291
+
292
+ class _BaseListModels:
293
+ def __hash__(self): # pragma: NO COVER
294
+ return NotImplementedError("__hash__ must be implemented.")
295
+
296
+ @staticmethod
297
+ def _get_http_options():
298
+ http_options: List[Dict[str, str]] = [
299
+ {
300
+ "method": "get",
301
+ "uri": "/v1alpha/models",
302
+ },
303
+ ]
304
+ return http_options
305
+
306
+ @staticmethod
307
+ def _get_transcoded_request(http_options, request):
308
+ pb_request = model_service.ListModelsRequest.pb(request)
309
+ transcoded_request = path_template.transcode(http_options, pb_request)
310
+ return transcoded_request
311
+
312
+ @staticmethod
313
+ def _get_query_params_json(transcoded_request):
314
+ query_params = json.loads(
315
+ json_format.MessageToJson(
316
+ transcoded_request["query_params"],
317
+ use_integers_for_enums=True,
318
+ )
319
+ )
320
+
321
+ query_params["$alt"] = "json;enum-encoding=int"
322
+ return query_params
323
+
324
+ class _BaseListTunedModels:
325
+ def __hash__(self): # pragma: NO COVER
326
+ return NotImplementedError("__hash__ must be implemented.")
327
+
328
+ @staticmethod
329
+ def _get_http_options():
330
+ http_options: List[Dict[str, str]] = [
331
+ {
332
+ "method": "get",
333
+ "uri": "/v1alpha/tunedModels",
334
+ },
335
+ ]
336
+ return http_options
337
+
338
+ @staticmethod
339
+ def _get_transcoded_request(http_options, request):
340
+ pb_request = model_service.ListTunedModelsRequest.pb(request)
341
+ transcoded_request = path_template.transcode(http_options, pb_request)
342
+ return transcoded_request
343
+
344
+ @staticmethod
345
+ def _get_query_params_json(transcoded_request):
346
+ query_params = json.loads(
347
+ json_format.MessageToJson(
348
+ transcoded_request["query_params"],
349
+ use_integers_for_enums=True,
350
+ )
351
+ )
352
+
353
+ query_params["$alt"] = "json;enum-encoding=int"
354
+ return query_params
355
+
356
+ class _BaseUpdateTunedModel:
357
+ def __hash__(self): # pragma: NO COVER
358
+ return NotImplementedError("__hash__ must be implemented.")
359
+
360
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
361
+
362
+ @classmethod
363
+ def _get_unset_required_fields(cls, message_dict):
364
+ return {
365
+ k: v
366
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
367
+ if k not in message_dict
368
+ }
369
+
370
+ @staticmethod
371
+ def _get_http_options():
372
+ http_options: List[Dict[str, str]] = [
373
+ {
374
+ "method": "patch",
375
+ "uri": "/v1alpha/{tuned_model.name=tunedModels/*}",
376
+ "body": "tuned_model",
377
+ },
378
+ ]
379
+ return http_options
380
+
381
+ @staticmethod
382
+ def _get_transcoded_request(http_options, request):
383
+ pb_request = model_service.UpdateTunedModelRequest.pb(request)
384
+ transcoded_request = path_template.transcode(http_options, pb_request)
385
+ return transcoded_request
386
+
387
+ @staticmethod
388
+ def _get_request_body_json(transcoded_request):
389
+ # Jsonify the request body
390
+
391
+ body = json_format.MessageToJson(
392
+ transcoded_request["body"], use_integers_for_enums=True
393
+ )
394
+ return body
395
+
396
+ @staticmethod
397
+ def _get_query_params_json(transcoded_request):
398
+ query_params = json.loads(
399
+ json_format.MessageToJson(
400
+ transcoded_request["query_params"],
401
+ use_integers_for_enums=True,
402
+ )
403
+ )
404
+ query_params.update(
405
+ _BaseModelServiceRestTransport._BaseUpdateTunedModel._get_unset_required_fields(
406
+ query_params
407
+ )
408
+ )
409
+
410
+ query_params["$alt"] = "json;enum-encoding=int"
411
+ return query_params
412
+
413
+ class _BaseGetOperation:
414
+ def __hash__(self): # pragma: NO COVER
415
+ return NotImplementedError("__hash__ must be implemented.")
416
+
417
+ @staticmethod
418
+ def _get_http_options():
419
+ http_options: List[Dict[str, str]] = [
420
+ {
421
+ "method": "get",
422
+ "uri": "/v1alpha/{name=tunedModels/*/operations/*}",
423
+ },
424
+ {
425
+ "method": "get",
426
+ "uri": "/v1alpha/{name=generatedFiles/*/operations/*}",
427
+ },
428
+ {
429
+ "method": "get",
430
+ "uri": "/v1alpha/{name=models/*/operations/*}",
431
+ },
432
+ ]
433
+ return http_options
434
+
435
+ @staticmethod
436
+ def _get_transcoded_request(http_options, request):
437
+ request_kwargs = json_format.MessageToDict(request)
438
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
439
+ return transcoded_request
440
+
441
+ @staticmethod
442
+ def _get_query_params_json(transcoded_request):
443
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
444
+ return query_params
445
+
446
+ class _BaseListOperations:
447
+ def __hash__(self): # pragma: NO COVER
448
+ return NotImplementedError("__hash__ must be implemented.")
449
+
450
+ @staticmethod
451
+ def _get_http_options():
452
+ http_options: List[Dict[str, str]] = [
453
+ {
454
+ "method": "get",
455
+ "uri": "/v1alpha/{name=tunedModels/*}/operations",
456
+ },
457
+ {
458
+ "method": "get",
459
+ "uri": "/v1alpha/{name=models/*}/operations",
460
+ },
461
+ ]
462
+ return http_options
463
+
464
+ @staticmethod
465
+ def _get_transcoded_request(http_options, request):
466
+ request_kwargs = json_format.MessageToDict(request)
467
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
468
+ return transcoded_request
469
+
470
+ @staticmethod
471
+ def _get_query_params_json(transcoded_request):
472
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
473
+ return query_params
474
+
475
+
476
+ __all__ = ("_BaseModelServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (418 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (46.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (62.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/__pycache__/pagers.cpython-311.pyc ADDED
Binary file (10.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/async_client.py ADDED
@@ -0,0 +1,1150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.longrunning import operations_pb2 # type: ignore
47
+ from google.protobuf import field_mask_pb2 # type: ignore
48
+
49
+ from google.ai.generativelanguage_v1alpha.services.permission_service import pagers
50
+ from google.ai.generativelanguage_v1alpha.types import permission as gag_permission
51
+ from google.ai.generativelanguage_v1alpha.types import permission
52
+ from google.ai.generativelanguage_v1alpha.types import permission_service
53
+
54
+ from .client import PermissionServiceClient
55
+ from .transports.base import DEFAULT_CLIENT_INFO, PermissionServiceTransport
56
+ from .transports.grpc_asyncio import PermissionServiceGrpcAsyncIOTransport
57
+
58
+ try:
59
+ from google.api_core import client_logging # type: ignore
60
+
61
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
62
+ except ImportError: # pragma: NO COVER
63
+ CLIENT_LOGGING_SUPPORTED = False
64
+
65
+ _LOGGER = std_logging.getLogger(__name__)
66
+
67
+
68
+ class PermissionServiceAsyncClient:
69
+ """Provides methods for managing permissions to PaLM API
70
+ resources.
71
+ """
72
+
73
+ _client: PermissionServiceClient
74
+
75
+ # Copy defaults from the synchronous client for use here.
76
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
77
+ DEFAULT_ENDPOINT = PermissionServiceClient.DEFAULT_ENDPOINT
78
+ DEFAULT_MTLS_ENDPOINT = PermissionServiceClient.DEFAULT_MTLS_ENDPOINT
79
+ _DEFAULT_ENDPOINT_TEMPLATE = PermissionServiceClient._DEFAULT_ENDPOINT_TEMPLATE
80
+ _DEFAULT_UNIVERSE = PermissionServiceClient._DEFAULT_UNIVERSE
81
+
82
+ permission_path = staticmethod(PermissionServiceClient.permission_path)
83
+ parse_permission_path = staticmethod(PermissionServiceClient.parse_permission_path)
84
+ common_billing_account_path = staticmethod(
85
+ PermissionServiceClient.common_billing_account_path
86
+ )
87
+ parse_common_billing_account_path = staticmethod(
88
+ PermissionServiceClient.parse_common_billing_account_path
89
+ )
90
+ common_folder_path = staticmethod(PermissionServiceClient.common_folder_path)
91
+ parse_common_folder_path = staticmethod(
92
+ PermissionServiceClient.parse_common_folder_path
93
+ )
94
+ common_organization_path = staticmethod(
95
+ PermissionServiceClient.common_organization_path
96
+ )
97
+ parse_common_organization_path = staticmethod(
98
+ PermissionServiceClient.parse_common_organization_path
99
+ )
100
+ common_project_path = staticmethod(PermissionServiceClient.common_project_path)
101
+ parse_common_project_path = staticmethod(
102
+ PermissionServiceClient.parse_common_project_path
103
+ )
104
+ common_location_path = staticmethod(PermissionServiceClient.common_location_path)
105
+ parse_common_location_path = staticmethod(
106
+ PermissionServiceClient.parse_common_location_path
107
+ )
108
+
109
+ @classmethod
110
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
111
+ """Creates an instance of this client using the provided credentials
112
+ info.
113
+
114
+ Args:
115
+ info (dict): The service account private key info.
116
+ args: Additional arguments to pass to the constructor.
117
+ kwargs: Additional arguments to pass to the constructor.
118
+
119
+ Returns:
120
+ PermissionServiceAsyncClient: The constructed client.
121
+ """
122
+ return PermissionServiceClient.from_service_account_info.__func__(PermissionServiceAsyncClient, info, *args, **kwargs) # type: ignore
123
+
124
+ @classmethod
125
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
126
+ """Creates an instance of this client using the provided credentials
127
+ file.
128
+
129
+ Args:
130
+ filename (str): The path to the service account private key json
131
+ file.
132
+ args: Additional arguments to pass to the constructor.
133
+ kwargs: Additional arguments to pass to the constructor.
134
+
135
+ Returns:
136
+ PermissionServiceAsyncClient: The constructed client.
137
+ """
138
+ return PermissionServiceClient.from_service_account_file.__func__(PermissionServiceAsyncClient, filename, *args, **kwargs) # type: ignore
139
+
140
+ from_service_account_json = from_service_account_file
141
+
142
+ @classmethod
143
+ def get_mtls_endpoint_and_cert_source(
144
+ cls, client_options: Optional[ClientOptions] = None
145
+ ):
146
+ """Return the API endpoint and client cert source for mutual TLS.
147
+
148
+ The client cert source is determined in the following order:
149
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
150
+ client cert source is None.
151
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
152
+ default client cert source exists, use the default one; otherwise the client cert
153
+ source is None.
154
+
155
+ The API endpoint is determined in the following order:
156
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
157
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
158
+ default mTLS endpoint; if the environment variable is "never", use the default API
159
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
160
+ use the default API endpoint.
161
+
162
+ More details can be found at https://google.aip.dev/auth/4114.
163
+
164
+ Args:
165
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
166
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
167
+ in this method.
168
+
169
+ Returns:
170
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
171
+ client cert source to use.
172
+
173
+ Raises:
174
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
175
+ """
176
+ return PermissionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
177
+
178
+ @property
179
+ def transport(self) -> PermissionServiceTransport:
180
+ """Returns the transport used by the client instance.
181
+
182
+ Returns:
183
+ PermissionServiceTransport: The transport used by the client instance.
184
+ """
185
+ return self._client.transport
186
+
187
+ @property
188
+ def api_endpoint(self):
189
+ """Return the API endpoint used by the client instance.
190
+
191
+ Returns:
192
+ str: The API endpoint used by the client instance.
193
+ """
194
+ return self._client._api_endpoint
195
+
196
+ @property
197
+ def universe_domain(self) -> str:
198
+ """Return the universe domain used by the client instance.
199
+
200
+ Returns:
201
+ str: The universe domain used
202
+ by the client instance.
203
+ """
204
+ return self._client._universe_domain
205
+
206
+ get_transport_class = PermissionServiceClient.get_transport_class
207
+
208
+ def __init__(
209
+ self,
210
+ *,
211
+ credentials: Optional[ga_credentials.Credentials] = None,
212
+ transport: Optional[
213
+ Union[
214
+ str,
215
+ PermissionServiceTransport,
216
+ Callable[..., PermissionServiceTransport],
217
+ ]
218
+ ] = "grpc_asyncio",
219
+ client_options: Optional[ClientOptions] = None,
220
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
221
+ ) -> None:
222
+ """Instantiates the permission service async client.
223
+
224
+ Args:
225
+ credentials (Optional[google.auth.credentials.Credentials]): The
226
+ authorization credentials to attach to requests. These
227
+ credentials identify the application to the service; if none
228
+ are specified, the client will attempt to ascertain the
229
+ credentials from the environment.
230
+ transport (Optional[Union[str,PermissionServiceTransport,Callable[..., PermissionServiceTransport]]]):
231
+ The transport to use, or a Callable that constructs and returns a new transport to use.
232
+ If a Callable is given, it will be called with the same set of initialization
233
+ arguments as used in the PermissionServiceTransport constructor.
234
+ If set to None, a transport is chosen automatically.
235
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
236
+ Custom options for the client.
237
+
238
+ 1. The ``api_endpoint`` property can be used to override the
239
+ default endpoint provided by the client when ``transport`` is
240
+ not explicitly provided. Only if this property is not set and
241
+ ``transport`` was not explicitly provided, the endpoint is
242
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
243
+ variable, which have one of the following values:
244
+ "always" (always use the default mTLS endpoint), "never" (always
245
+ use the default regular endpoint) and "auto" (auto-switch to the
246
+ default mTLS endpoint if client certificate is present; this is
247
+ the default value).
248
+
249
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
250
+ is "true", then the ``client_cert_source`` property can be used
251
+ to provide a client certificate for mTLS transport. If
252
+ not provided, the default SSL client certificate will be used if
253
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
254
+ set, no client certificate will be used.
255
+
256
+ 3. The ``universe_domain`` property can be used to override the
257
+ default "googleapis.com" universe. Note that ``api_endpoint``
258
+ property still takes precedence; and ``universe_domain`` is
259
+ currently not supported for mTLS.
260
+
261
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
262
+ The client info used to send a user-agent string along with
263
+ API requests. If ``None``, then default info will be used.
264
+ Generally, you only need to set this if you're developing
265
+ your own client library.
266
+
267
+ Raises:
268
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
269
+ creation failed for any reason.
270
+ """
271
+ self._client = PermissionServiceClient(
272
+ credentials=credentials,
273
+ transport=transport,
274
+ client_options=client_options,
275
+ client_info=client_info,
276
+ )
277
+
278
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
279
+ std_logging.DEBUG
280
+ ): # pragma: NO COVER
281
+ _LOGGER.debug(
282
+ "Created client `google.ai.generativelanguage_v1alpha.PermissionServiceAsyncClient`.",
283
+ extra={
284
+ "serviceName": "google.ai.generativelanguage.v1alpha.PermissionService",
285
+ "universeDomain": getattr(
286
+ self._client._transport._credentials, "universe_domain", ""
287
+ ),
288
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
289
+ "credentialsInfo": getattr(
290
+ self.transport._credentials, "get_cred_info", lambda: None
291
+ )(),
292
+ }
293
+ if hasattr(self._client._transport, "_credentials")
294
+ else {
295
+ "serviceName": "google.ai.generativelanguage.v1alpha.PermissionService",
296
+ "credentialsType": None,
297
+ },
298
+ )
299
+
300
+ async def create_permission(
301
+ self,
302
+ request: Optional[
303
+ Union[permission_service.CreatePermissionRequest, dict]
304
+ ] = None,
305
+ *,
306
+ parent: Optional[str] = None,
307
+ permission: Optional[gag_permission.Permission] = None,
308
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
309
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
310
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
311
+ ) -> gag_permission.Permission:
312
+ r"""Create a permission to a specific resource.
313
+
314
+ .. code-block:: python
315
+
316
+ # This snippet has been automatically generated and should be regarded as a
317
+ # code template only.
318
+ # It will require modifications to work:
319
+ # - It may require correct/in-range values for request initialization.
320
+ # - It may require specifying regional endpoints when creating the service
321
+ # client as shown in:
322
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
323
+ from google.ai import generativelanguage_v1alpha
324
+
325
+ async def sample_create_permission():
326
+ # Create a client
327
+ client = generativelanguage_v1alpha.PermissionServiceAsyncClient()
328
+
329
+ # Initialize request argument(s)
330
+ request = generativelanguage_v1alpha.CreatePermissionRequest(
331
+ parent="parent_value",
332
+ )
333
+
334
+ # Make the request
335
+ response = await client.create_permission(request=request)
336
+
337
+ # Handle the response
338
+ print(response)
339
+
340
+ Args:
341
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.CreatePermissionRequest, dict]]):
342
+ The request object. Request to create a ``Permission``.
343
+ parent (:class:`str`):
344
+ Required. The parent resource of the ``Permission``.
345
+ Formats: ``tunedModels/{tuned_model}``
346
+ ``corpora/{corpus}``
347
+
348
+ This corresponds to the ``parent`` field
349
+ on the ``request`` instance; if ``request`` is provided, this
350
+ should not be set.
351
+ permission (:class:`google.ai.generativelanguage_v1alpha.types.Permission`):
352
+ Required. The permission to create.
353
+ This corresponds to the ``permission`` field
354
+ on the ``request`` instance; if ``request`` is provided, this
355
+ should not be set.
356
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
357
+ should be retried.
358
+ timeout (float): The timeout for this request.
359
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
360
+ sent along with the request as metadata. Normally, each value must be of type `str`,
361
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
362
+ be of type `bytes`.
363
+
364
+ Returns:
365
+ google.ai.generativelanguage_v1alpha.types.Permission:
366
+ Permission resource grants user,
367
+ group or the rest of the world access to
368
+ the PaLM API resource (e.g. a tuned
369
+ model, corpus).
370
+
371
+ A role is a collection of permitted
372
+ operations that allows users to perform
373
+ specific actions on PaLM API resources.
374
+ To make them available to users, groups,
375
+ or service accounts, you assign roles.
376
+ When you assign a role, you grant
377
+ permissions that the role contains.
378
+
379
+ There are three concentric roles. Each
380
+ role is a superset of the previous
381
+ role's permitted operations:
382
+
383
+ - reader can use the resource (e.g.
384
+ tuned model, corpus) for inference
385
+ - writer has reader's permissions and
386
+ additionally can edit and share
387
+ - owner has writer's permissions and
388
+ additionally can delete
389
+
390
+ """
391
+ # Create or coerce a protobuf request object.
392
+ # - Quick check: If we got a request object, we should *not* have
393
+ # gotten any keyword arguments that map to the request.
394
+ has_flattened_params = any([parent, permission])
395
+ if request is not None and has_flattened_params:
396
+ raise ValueError(
397
+ "If the `request` argument is set, then none of "
398
+ "the individual field arguments should be set."
399
+ )
400
+
401
+ # - Use the request object if provided (there's no risk of modifying the input as
402
+ # there are no flattened fields), or create one.
403
+ if not isinstance(request, permission_service.CreatePermissionRequest):
404
+ request = permission_service.CreatePermissionRequest(request)
405
+
406
+ # If we have keyword arguments corresponding to fields on the
407
+ # request, apply these.
408
+ if parent is not None:
409
+ request.parent = parent
410
+ if permission is not None:
411
+ request.permission = permission
412
+
413
+ # Wrap the RPC method; this adds retry and timeout information,
414
+ # and friendly error handling.
415
+ rpc = self._client._transport._wrapped_methods[
416
+ self._client._transport.create_permission
417
+ ]
418
+
419
+ # Certain fields should be provided within the metadata header;
420
+ # add these here.
421
+ metadata = tuple(metadata) + (
422
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
423
+ )
424
+
425
+ # Validate the universe domain.
426
+ self._client._validate_universe_domain()
427
+
428
+ # Send the request.
429
+ response = await rpc(
430
+ request,
431
+ retry=retry,
432
+ timeout=timeout,
433
+ metadata=metadata,
434
+ )
435
+
436
+ # Done; return the response.
437
+ return response
438
+
439
+ async def get_permission(
440
+ self,
441
+ request: Optional[Union[permission_service.GetPermissionRequest, dict]] = None,
442
+ *,
443
+ name: Optional[str] = None,
444
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
445
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
446
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
447
+ ) -> permission.Permission:
448
+ r"""Gets information about a specific Permission.
449
+
450
+ .. code-block:: python
451
+
452
+ # This snippet has been automatically generated and should be regarded as a
453
+ # code template only.
454
+ # It will require modifications to work:
455
+ # - It may require correct/in-range values for request initialization.
456
+ # - It may require specifying regional endpoints when creating the service
457
+ # client as shown in:
458
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
459
+ from google.ai import generativelanguage_v1alpha
460
+
461
+ async def sample_get_permission():
462
+ # Create a client
463
+ client = generativelanguage_v1alpha.PermissionServiceAsyncClient()
464
+
465
+ # Initialize request argument(s)
466
+ request = generativelanguage_v1alpha.GetPermissionRequest(
467
+ name="name_value",
468
+ )
469
+
470
+ # Make the request
471
+ response = await client.get_permission(request=request)
472
+
473
+ # Handle the response
474
+ print(response)
475
+
476
+ Args:
477
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.GetPermissionRequest, dict]]):
478
+ The request object. Request for getting information about a specific
479
+ ``Permission``.
480
+ name (:class:`str`):
481
+ Required. The resource name of the permission.
482
+
483
+ Formats:
484
+ ``tunedModels/{tuned_model}/permissions/{permission}``
485
+ ``corpora/{corpus}/permissions/{permission}``
486
+
487
+ This corresponds to the ``name`` field
488
+ on the ``request`` instance; if ``request`` is provided, this
489
+ should not be set.
490
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
491
+ should be retried.
492
+ timeout (float): The timeout for this request.
493
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
494
+ sent along with the request as metadata. Normally, each value must be of type `str`,
495
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
496
+ be of type `bytes`.
497
+
498
+ Returns:
499
+ google.ai.generativelanguage_v1alpha.types.Permission:
500
+ Permission resource grants user,
501
+ group or the rest of the world access to
502
+ the PaLM API resource (e.g. a tuned
503
+ model, corpus).
504
+
505
+ A role is a collection of permitted
506
+ operations that allows users to perform
507
+ specific actions on PaLM API resources.
508
+ To make them available to users, groups,
509
+ or service accounts, you assign roles.
510
+ When you assign a role, you grant
511
+ permissions that the role contains.
512
+
513
+ There are three concentric roles. Each
514
+ role is a superset of the previous
515
+ role's permitted operations:
516
+
517
+ - reader can use the resource (e.g.
518
+ tuned model, corpus) for inference
519
+ - writer has reader's permissions and
520
+ additionally can edit and share
521
+ - owner has writer's permissions and
522
+ additionally can delete
523
+
524
+ """
525
+ # Create or coerce a protobuf request object.
526
+ # - Quick check: If we got a request object, we should *not* have
527
+ # gotten any keyword arguments that map to the request.
528
+ has_flattened_params = any([name])
529
+ if request is not None and has_flattened_params:
530
+ raise ValueError(
531
+ "If the `request` argument is set, then none of "
532
+ "the individual field arguments should be set."
533
+ )
534
+
535
+ # - Use the request object if provided (there's no risk of modifying the input as
536
+ # there are no flattened fields), or create one.
537
+ if not isinstance(request, permission_service.GetPermissionRequest):
538
+ request = permission_service.GetPermissionRequest(request)
539
+
540
+ # If we have keyword arguments corresponding to fields on the
541
+ # request, apply these.
542
+ if name is not None:
543
+ request.name = name
544
+
545
+ # Wrap the RPC method; this adds retry and timeout information,
546
+ # and friendly error handling.
547
+ rpc = self._client._transport._wrapped_methods[
548
+ self._client._transport.get_permission
549
+ ]
550
+
551
+ # Certain fields should be provided within the metadata header;
552
+ # add these here.
553
+ metadata = tuple(metadata) + (
554
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
555
+ )
556
+
557
+ # Validate the universe domain.
558
+ self._client._validate_universe_domain()
559
+
560
+ # Send the request.
561
+ response = await rpc(
562
+ request,
563
+ retry=retry,
564
+ timeout=timeout,
565
+ metadata=metadata,
566
+ )
567
+
568
+ # Done; return the response.
569
+ return response
570
+
571
+ async def list_permissions(
572
+ self,
573
+ request: Optional[
574
+ Union[permission_service.ListPermissionsRequest, dict]
575
+ ] = None,
576
+ *,
577
+ parent: Optional[str] = None,
578
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
579
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
580
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
581
+ ) -> pagers.ListPermissionsAsyncPager:
582
+ r"""Lists permissions for the specific resource.
583
+
584
+ .. code-block:: python
585
+
586
+ # This snippet has been automatically generated and should be regarded as a
587
+ # code template only.
588
+ # It will require modifications to work:
589
+ # - It may require correct/in-range values for request initialization.
590
+ # - It may require specifying regional endpoints when creating the service
591
+ # client as shown in:
592
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
593
+ from google.ai import generativelanguage_v1alpha
594
+
595
+ async def sample_list_permissions():
596
+ # Create a client
597
+ client = generativelanguage_v1alpha.PermissionServiceAsyncClient()
598
+
599
+ # Initialize request argument(s)
600
+ request = generativelanguage_v1alpha.ListPermissionsRequest(
601
+ parent="parent_value",
602
+ )
603
+
604
+ # Make the request
605
+ page_result = client.list_permissions(request=request)
606
+
607
+ # Handle the response
608
+ async for response in page_result:
609
+ print(response)
610
+
611
+ Args:
612
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.ListPermissionsRequest, dict]]):
613
+ The request object. Request for listing permissions.
614
+ parent (:class:`str`):
615
+ Required. The parent resource of the permissions.
616
+ Formats: ``tunedModels/{tuned_model}``
617
+ ``corpora/{corpus}``
618
+
619
+ This corresponds to the ``parent`` field
620
+ on the ``request`` instance; if ``request`` is provided, this
621
+ should not be set.
622
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
623
+ should be retried.
624
+ timeout (float): The timeout for this request.
625
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
626
+ sent along with the request as metadata. Normally, each value must be of type `str`,
627
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
628
+ be of type `bytes`.
629
+
630
+ Returns:
631
+ google.ai.generativelanguage_v1alpha.services.permission_service.pagers.ListPermissionsAsyncPager:
632
+ Response from ListPermissions containing a paginated list of
633
+ permissions.
634
+
635
+ Iterating over this object will yield results and
636
+ resolve additional pages automatically.
637
+
638
+ """
639
+ # Create or coerce a protobuf request object.
640
+ # - Quick check: If we got a request object, we should *not* have
641
+ # gotten any keyword arguments that map to the request.
642
+ has_flattened_params = any([parent])
643
+ if request is not None and has_flattened_params:
644
+ raise ValueError(
645
+ "If the `request` argument is set, then none of "
646
+ "the individual field arguments should be set."
647
+ )
648
+
649
+ # - Use the request object if provided (there's no risk of modifying the input as
650
+ # there are no flattened fields), or create one.
651
+ if not isinstance(request, permission_service.ListPermissionsRequest):
652
+ request = permission_service.ListPermissionsRequest(request)
653
+
654
+ # If we have keyword arguments corresponding to fields on the
655
+ # request, apply these.
656
+ if parent is not None:
657
+ request.parent = parent
658
+
659
+ # Wrap the RPC method; this adds retry and timeout information,
660
+ # and friendly error handling.
661
+ rpc = self._client._transport._wrapped_methods[
662
+ self._client._transport.list_permissions
663
+ ]
664
+
665
+ # Certain fields should be provided within the metadata header;
666
+ # add these here.
667
+ metadata = tuple(metadata) + (
668
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
669
+ )
670
+
671
+ # Validate the universe domain.
672
+ self._client._validate_universe_domain()
673
+
674
+ # Send the request.
675
+ response = await rpc(
676
+ request,
677
+ retry=retry,
678
+ timeout=timeout,
679
+ metadata=metadata,
680
+ )
681
+
682
+ # This method is paged; wrap the response in a pager, which provides
683
+ # an `__aiter__` convenience method.
684
+ response = pagers.ListPermissionsAsyncPager(
685
+ method=rpc,
686
+ request=request,
687
+ response=response,
688
+ retry=retry,
689
+ timeout=timeout,
690
+ metadata=metadata,
691
+ )
692
+
693
+ # Done; return the response.
694
+ return response
695
+
696
+ async def update_permission(
697
+ self,
698
+ request: Optional[
699
+ Union[permission_service.UpdatePermissionRequest, dict]
700
+ ] = None,
701
+ *,
702
+ permission: Optional[gag_permission.Permission] = None,
703
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
704
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
705
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
706
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
707
+ ) -> gag_permission.Permission:
708
+ r"""Updates the permission.
709
+
710
+ .. code-block:: python
711
+
712
+ # This snippet has been automatically generated and should be regarded as a
713
+ # code template only.
714
+ # It will require modifications to work:
715
+ # - It may require correct/in-range values for request initialization.
716
+ # - It may require specifying regional endpoints when creating the service
717
+ # client as shown in:
718
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
719
+ from google.ai import generativelanguage_v1alpha
720
+
721
+ async def sample_update_permission():
722
+ # Create a client
723
+ client = generativelanguage_v1alpha.PermissionServiceAsyncClient()
724
+
725
+ # Initialize request argument(s)
726
+ request = generativelanguage_v1alpha.UpdatePermissionRequest(
727
+ )
728
+
729
+ # Make the request
730
+ response = await client.update_permission(request=request)
731
+
732
+ # Handle the response
733
+ print(response)
734
+
735
+ Args:
736
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.UpdatePermissionRequest, dict]]):
737
+ The request object. Request to update the ``Permission``.
738
+ permission (:class:`google.ai.generativelanguage_v1alpha.types.Permission`):
739
+ Required. The permission to update.
740
+
741
+ The permission's ``name`` field is used to identify the
742
+ permission to update.
743
+
744
+ This corresponds to the ``permission`` field
745
+ on the ``request`` instance; if ``request`` is provided, this
746
+ should not be set.
747
+ update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
748
+ Required. The list of fields to update. Accepted ones:
749
+
750
+ - role (``Permission.role`` field)
751
+
752
+ This corresponds to the ``update_mask`` field
753
+ on the ``request`` instance; if ``request`` is provided, this
754
+ should not be set.
755
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
756
+ should be retried.
757
+ timeout (float): The timeout for this request.
758
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
759
+ sent along with the request as metadata. Normally, each value must be of type `str`,
760
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
761
+ be of type `bytes`.
762
+
763
+ Returns:
764
+ google.ai.generativelanguage_v1alpha.types.Permission:
765
+ Permission resource grants user,
766
+ group or the rest of the world access to
767
+ the PaLM API resource (e.g. a tuned
768
+ model, corpus).
769
+
770
+ A role is a collection of permitted
771
+ operations that allows users to perform
772
+ specific actions on PaLM API resources.
773
+ To make them available to users, groups,
774
+ or service accounts, you assign roles.
775
+ When you assign a role, you grant
776
+ permissions that the role contains.
777
+
778
+ There are three concentric roles. Each
779
+ role is a superset of the previous
780
+ role's permitted operations:
781
+
782
+ - reader can use the resource (e.g.
783
+ tuned model, corpus) for inference
784
+ - writer has reader's permissions and
785
+ additionally can edit and share
786
+ - owner has writer's permissions and
787
+ additionally can delete
788
+
789
+ """
790
+ # Create or coerce a protobuf request object.
791
+ # - Quick check: If we got a request object, we should *not* have
792
+ # gotten any keyword arguments that map to the request.
793
+ has_flattened_params = any([permission, update_mask])
794
+ if request is not None and has_flattened_params:
795
+ raise ValueError(
796
+ "If the `request` argument is set, then none of "
797
+ "the individual field arguments should be set."
798
+ )
799
+
800
+ # - Use the request object if provided (there's no risk of modifying the input as
801
+ # there are no flattened fields), or create one.
802
+ if not isinstance(request, permission_service.UpdatePermissionRequest):
803
+ request = permission_service.UpdatePermissionRequest(request)
804
+
805
+ # If we have keyword arguments corresponding to fields on the
806
+ # request, apply these.
807
+ if permission is not None:
808
+ request.permission = permission
809
+ if update_mask is not None:
810
+ request.update_mask = update_mask
811
+
812
+ # Wrap the RPC method; this adds retry and timeout information,
813
+ # and friendly error handling.
814
+ rpc = self._client._transport._wrapped_methods[
815
+ self._client._transport.update_permission
816
+ ]
817
+
818
+ # Certain fields should be provided within the metadata header;
819
+ # add these here.
820
+ metadata = tuple(metadata) + (
821
+ gapic_v1.routing_header.to_grpc_metadata(
822
+ (("permission.name", request.permission.name),)
823
+ ),
824
+ )
825
+
826
+ # Validate the universe domain.
827
+ self._client._validate_universe_domain()
828
+
829
+ # Send the request.
830
+ response = await rpc(
831
+ request,
832
+ retry=retry,
833
+ timeout=timeout,
834
+ metadata=metadata,
835
+ )
836
+
837
+ # Done; return the response.
838
+ return response
839
+
840
+ async def delete_permission(
841
+ self,
842
+ request: Optional[
843
+ Union[permission_service.DeletePermissionRequest, dict]
844
+ ] = None,
845
+ *,
846
+ name: Optional[str] = None,
847
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
848
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
849
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
850
+ ) -> None:
851
+ r"""Deletes the permission.
852
+
853
+ .. code-block:: python
854
+
855
+ # This snippet has been automatically generated and should be regarded as a
856
+ # code template only.
857
+ # It will require modifications to work:
858
+ # - It may require correct/in-range values for request initialization.
859
+ # - It may require specifying regional endpoints when creating the service
860
+ # client as shown in:
861
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
862
+ from google.ai import generativelanguage_v1alpha
863
+
864
+ async def sample_delete_permission():
865
+ # Create a client
866
+ client = generativelanguage_v1alpha.PermissionServiceAsyncClient()
867
+
868
+ # Initialize request argument(s)
869
+ request = generativelanguage_v1alpha.DeletePermissionRequest(
870
+ name="name_value",
871
+ )
872
+
873
+ # Make the request
874
+ await client.delete_permission(request=request)
875
+
876
+ Args:
877
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.DeletePermissionRequest, dict]]):
878
+ The request object. Request to delete the ``Permission``.
879
+ name (:class:`str`):
880
+ Required. The resource name of the permission. Formats:
881
+ ``tunedModels/{tuned_model}/permissions/{permission}``
882
+ ``corpora/{corpus}/permissions/{permission}``
883
+
884
+ This corresponds to the ``name`` field
885
+ on the ``request`` instance; if ``request`` is provided, this
886
+ should not be set.
887
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
888
+ should be retried.
889
+ timeout (float): The timeout for this request.
890
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
891
+ sent along with the request as metadata. Normally, each value must be of type `str`,
892
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
893
+ be of type `bytes`.
894
+ """
895
+ # Create or coerce a protobuf request object.
896
+ # - Quick check: If we got a request object, we should *not* have
897
+ # gotten any keyword arguments that map to the request.
898
+ has_flattened_params = any([name])
899
+ if request is not None and has_flattened_params:
900
+ raise ValueError(
901
+ "If the `request` argument is set, then none of "
902
+ "the individual field arguments should be set."
903
+ )
904
+
905
+ # - Use the request object if provided (there's no risk of modifying the input as
906
+ # there are no flattened fields), or create one.
907
+ if not isinstance(request, permission_service.DeletePermissionRequest):
908
+ request = permission_service.DeletePermissionRequest(request)
909
+
910
+ # If we have keyword arguments corresponding to fields on the
911
+ # request, apply these.
912
+ if name is not None:
913
+ request.name = name
914
+
915
+ # Wrap the RPC method; this adds retry and timeout information,
916
+ # and friendly error handling.
917
+ rpc = self._client._transport._wrapped_methods[
918
+ self._client._transport.delete_permission
919
+ ]
920
+
921
+ # Certain fields should be provided within the metadata header;
922
+ # add these here.
923
+ metadata = tuple(metadata) + (
924
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
925
+ )
926
+
927
+ # Validate the universe domain.
928
+ self._client._validate_universe_domain()
929
+
930
+ # Send the request.
931
+ await rpc(
932
+ request,
933
+ retry=retry,
934
+ timeout=timeout,
935
+ metadata=metadata,
936
+ )
937
+
938
+ async def transfer_ownership(
939
+ self,
940
+ request: Optional[
941
+ Union[permission_service.TransferOwnershipRequest, dict]
942
+ ] = None,
943
+ *,
944
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
945
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
946
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
947
+ ) -> permission_service.TransferOwnershipResponse:
948
+ r"""Transfers ownership of the tuned model.
949
+ This is the only way to change ownership of the tuned
950
+ model. The current owner will be downgraded to writer
951
+ role.
952
+
953
+ .. code-block:: python
954
+
955
+ # This snippet has been automatically generated and should be regarded as a
956
+ # code template only.
957
+ # It will require modifications to work:
958
+ # - It may require correct/in-range values for request initialization.
959
+ # - It may require specifying regional endpoints when creating the service
960
+ # client as shown in:
961
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
962
+ from google.ai import generativelanguage_v1alpha
963
+
964
+ async def sample_transfer_ownership():
965
+ # Create a client
966
+ client = generativelanguage_v1alpha.PermissionServiceAsyncClient()
967
+
968
+ # Initialize request argument(s)
969
+ request = generativelanguage_v1alpha.TransferOwnershipRequest(
970
+ name="name_value",
971
+ email_address="email_address_value",
972
+ )
973
+
974
+ # Make the request
975
+ response = await client.transfer_ownership(request=request)
976
+
977
+ # Handle the response
978
+ print(response)
979
+
980
+ Args:
981
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.TransferOwnershipRequest, dict]]):
982
+ The request object. Request to transfer the ownership of
983
+ the tuned model.
984
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
985
+ should be retried.
986
+ timeout (float): The timeout for this request.
987
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
988
+ sent along with the request as metadata. Normally, each value must be of type `str`,
989
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
990
+ be of type `bytes`.
991
+
992
+ Returns:
993
+ google.ai.generativelanguage_v1alpha.types.TransferOwnershipResponse:
994
+ Response from TransferOwnership.
995
+ """
996
+ # Create or coerce a protobuf request object.
997
+ # - Use the request object if provided (there's no risk of modifying the input as
998
+ # there are no flattened fields), or create one.
999
+ if not isinstance(request, permission_service.TransferOwnershipRequest):
1000
+ request = permission_service.TransferOwnershipRequest(request)
1001
+
1002
+ # Wrap the RPC method; this adds retry and timeout information,
1003
+ # and friendly error handling.
1004
+ rpc = self._client._transport._wrapped_methods[
1005
+ self._client._transport.transfer_ownership
1006
+ ]
1007
+
1008
+ # Certain fields should be provided within the metadata header;
1009
+ # add these here.
1010
+ metadata = tuple(metadata) + (
1011
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1012
+ )
1013
+
1014
+ # Validate the universe domain.
1015
+ self._client._validate_universe_domain()
1016
+
1017
+ # Send the request.
1018
+ response = await rpc(
1019
+ request,
1020
+ retry=retry,
1021
+ timeout=timeout,
1022
+ metadata=metadata,
1023
+ )
1024
+
1025
+ # Done; return the response.
1026
+ return response
1027
+
1028
+ async def list_operations(
1029
+ self,
1030
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
1031
+ *,
1032
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1033
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1034
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1035
+ ) -> operations_pb2.ListOperationsResponse:
1036
+ r"""Lists operations that match the specified filter in the request.
1037
+
1038
+ Args:
1039
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
1040
+ The request object. Request message for
1041
+ `ListOperations` method.
1042
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
1043
+ if any, should be retried.
1044
+ timeout (float): The timeout for this request.
1045
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1046
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1047
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1048
+ be of type `bytes`.
1049
+ Returns:
1050
+ ~.operations_pb2.ListOperationsResponse:
1051
+ Response message for ``ListOperations`` method.
1052
+ """
1053
+ # Create or coerce a protobuf request object.
1054
+ # The request isn't a proto-plus wrapped type,
1055
+ # so it must be constructed via keyword expansion.
1056
+ if isinstance(request, dict):
1057
+ request = operations_pb2.ListOperationsRequest(**request)
1058
+
1059
+ # Wrap the RPC method; this adds retry and timeout information,
1060
+ # and friendly error handling.
1061
+ rpc = self.transport._wrapped_methods[self._client._transport.list_operations]
1062
+
1063
+ # Certain fields should be provided within the metadata header;
1064
+ # add these here.
1065
+ metadata = tuple(metadata) + (
1066
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1067
+ )
1068
+
1069
+ # Validate the universe domain.
1070
+ self._client._validate_universe_domain()
1071
+
1072
+ # Send the request.
1073
+ response = await rpc(
1074
+ request,
1075
+ retry=retry,
1076
+ timeout=timeout,
1077
+ metadata=metadata,
1078
+ )
1079
+
1080
+ # Done; return the response.
1081
+ return response
1082
+
1083
+ async def get_operation(
1084
+ self,
1085
+ request: Optional[operations_pb2.GetOperationRequest] = None,
1086
+ *,
1087
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1088
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1089
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1090
+ ) -> operations_pb2.Operation:
1091
+ r"""Gets the latest state of a long-running operation.
1092
+
1093
+ Args:
1094
+ request (:class:`~.operations_pb2.GetOperationRequest`):
1095
+ The request object. Request message for
1096
+ `GetOperation` method.
1097
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
1098
+ if any, should be retried.
1099
+ timeout (float): The timeout for this request.
1100
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1101
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1102
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1103
+ be of type `bytes`.
1104
+ Returns:
1105
+ ~.operations_pb2.Operation:
1106
+ An ``Operation`` object.
1107
+ """
1108
+ # Create or coerce a protobuf request object.
1109
+ # The request isn't a proto-plus wrapped type,
1110
+ # so it must be constructed via keyword expansion.
1111
+ if isinstance(request, dict):
1112
+ request = operations_pb2.GetOperationRequest(**request)
1113
+
1114
+ # Wrap the RPC method; this adds retry and timeout information,
1115
+ # and friendly error handling.
1116
+ rpc = self.transport._wrapped_methods[self._client._transport.get_operation]
1117
+
1118
+ # Certain fields should be provided within the metadata header;
1119
+ # add these here.
1120
+ metadata = tuple(metadata) + (
1121
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1122
+ )
1123
+
1124
+ # Validate the universe domain.
1125
+ self._client._validate_universe_domain()
1126
+
1127
+ # Send the request.
1128
+ response = await rpc(
1129
+ request,
1130
+ retry=retry,
1131
+ timeout=timeout,
1132
+ metadata=metadata,
1133
+ )
1134
+
1135
+ # Done; return the response.
1136
+ return response
1137
+
1138
+ async def __aenter__(self) -> "PermissionServiceAsyncClient":
1139
+ return self
1140
+
1141
+ async def __aexit__(self, exc_type, exc, tb):
1142
+ await self.transport.close()
1143
+
1144
+
1145
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1146
+ gapic_version=package_version.__version__
1147
+ )
1148
+
1149
+
1150
+ __all__ = ("PermissionServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/client.py ADDED
@@ -0,0 +1,1532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.longrunning import operations_pb2 # type: ignore
62
+ from google.protobuf import field_mask_pb2 # type: ignore
63
+
64
+ from google.ai.generativelanguage_v1alpha.services.permission_service import pagers
65
+ from google.ai.generativelanguage_v1alpha.types import permission as gag_permission
66
+ from google.ai.generativelanguage_v1alpha.types import permission
67
+ from google.ai.generativelanguage_v1alpha.types import permission_service
68
+
69
+ from .transports.base import DEFAULT_CLIENT_INFO, PermissionServiceTransport
70
+ from .transports.grpc import PermissionServiceGrpcTransport
71
+ from .transports.grpc_asyncio import PermissionServiceGrpcAsyncIOTransport
72
+ from .transports.rest import PermissionServiceRestTransport
73
+
74
+
75
+ class PermissionServiceClientMeta(type):
76
+ """Metaclass for the PermissionService client.
77
+
78
+ This provides class-level methods for building and retrieving
79
+ support objects (e.g. transport) without polluting the client instance
80
+ objects.
81
+ """
82
+
83
+ _transport_registry = (
84
+ OrderedDict()
85
+ ) # type: Dict[str, Type[PermissionServiceTransport]]
86
+ _transport_registry["grpc"] = PermissionServiceGrpcTransport
87
+ _transport_registry["grpc_asyncio"] = PermissionServiceGrpcAsyncIOTransport
88
+ _transport_registry["rest"] = PermissionServiceRestTransport
89
+
90
+ def get_transport_class(
91
+ cls,
92
+ label: Optional[str] = None,
93
+ ) -> Type[PermissionServiceTransport]:
94
+ """Returns an appropriate transport class.
95
+
96
+ Args:
97
+ label: The name of the desired transport. If none is
98
+ provided, then the first transport in the registry is used.
99
+
100
+ Returns:
101
+ The transport class to use.
102
+ """
103
+ # If a specific transport is requested, return that one.
104
+ if label:
105
+ return cls._transport_registry[label]
106
+
107
+ # No transport is requested; return the default (that is, the first one
108
+ # in the dictionary).
109
+ return next(iter(cls._transport_registry.values()))
110
+
111
+
112
+ class PermissionServiceClient(metaclass=PermissionServiceClientMeta):
113
+ """Provides methods for managing permissions to PaLM API
114
+ resources.
115
+ """
116
+
117
+ @staticmethod
118
+ def _get_default_mtls_endpoint(api_endpoint):
119
+ """Converts api endpoint to mTLS endpoint.
120
+
121
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
122
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
123
+ Args:
124
+ api_endpoint (Optional[str]): the api endpoint to convert.
125
+ Returns:
126
+ str: converted mTLS api endpoint.
127
+ """
128
+ if not api_endpoint:
129
+ return api_endpoint
130
+
131
+ mtls_endpoint_re = re.compile(
132
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
133
+ )
134
+
135
+ m = mtls_endpoint_re.match(api_endpoint)
136
+ name, mtls, sandbox, googledomain = m.groups()
137
+ if mtls or not googledomain:
138
+ return api_endpoint
139
+
140
+ if sandbox:
141
+ return api_endpoint.replace(
142
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
143
+ )
144
+
145
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
146
+
147
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
148
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
149
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
150
+ DEFAULT_ENDPOINT
151
+ )
152
+
153
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
154
+ _DEFAULT_UNIVERSE = "googleapis.com"
155
+
156
+ @classmethod
157
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
158
+ """Creates an instance of this client using the provided credentials
159
+ info.
160
+
161
+ Args:
162
+ info (dict): The service account private key info.
163
+ args: Additional arguments to pass to the constructor.
164
+ kwargs: Additional arguments to pass to the constructor.
165
+
166
+ Returns:
167
+ PermissionServiceClient: The constructed client.
168
+ """
169
+ credentials = service_account.Credentials.from_service_account_info(info)
170
+ kwargs["credentials"] = credentials
171
+ return cls(*args, **kwargs)
172
+
173
+ @classmethod
174
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
175
+ """Creates an instance of this client using the provided credentials
176
+ file.
177
+
178
+ Args:
179
+ filename (str): The path to the service account private key json
180
+ file.
181
+ args: Additional arguments to pass to the constructor.
182
+ kwargs: Additional arguments to pass to the constructor.
183
+
184
+ Returns:
185
+ PermissionServiceClient: The constructed client.
186
+ """
187
+ credentials = service_account.Credentials.from_service_account_file(filename)
188
+ kwargs["credentials"] = credentials
189
+ return cls(*args, **kwargs)
190
+
191
+ from_service_account_json = from_service_account_file
192
+
193
+ @property
194
+ def transport(self) -> PermissionServiceTransport:
195
+ """Returns the transport used by the client instance.
196
+
197
+ Returns:
198
+ PermissionServiceTransport: The transport used by the client
199
+ instance.
200
+ """
201
+ return self._transport
202
+
203
+ @staticmethod
204
+ def permission_path(
205
+ tuned_model: str,
206
+ permission: str,
207
+ ) -> str:
208
+ """Returns a fully-qualified permission string."""
209
+ return "tunedModels/{tuned_model}/permissions/{permission}".format(
210
+ tuned_model=tuned_model,
211
+ permission=permission,
212
+ )
213
+
214
+ @staticmethod
215
+ def parse_permission_path(path: str) -> Dict[str, str]:
216
+ """Parses a permission path into its component segments."""
217
+ m = re.match(
218
+ r"^tunedModels/(?P<tuned_model>.+?)/permissions/(?P<permission>.+?)$", path
219
+ )
220
+ return m.groupdict() if m else {}
221
+
222
+ @staticmethod
223
+ def common_billing_account_path(
224
+ billing_account: str,
225
+ ) -> str:
226
+ """Returns a fully-qualified billing_account string."""
227
+ return "billingAccounts/{billing_account}".format(
228
+ billing_account=billing_account,
229
+ )
230
+
231
+ @staticmethod
232
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
233
+ """Parse a billing_account path into its component segments."""
234
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
235
+ return m.groupdict() if m else {}
236
+
237
+ @staticmethod
238
+ def common_folder_path(
239
+ folder: str,
240
+ ) -> str:
241
+ """Returns a fully-qualified folder string."""
242
+ return "folders/{folder}".format(
243
+ folder=folder,
244
+ )
245
+
246
+ @staticmethod
247
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
248
+ """Parse a folder path into its component segments."""
249
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
250
+ return m.groupdict() if m else {}
251
+
252
+ @staticmethod
253
+ def common_organization_path(
254
+ organization: str,
255
+ ) -> str:
256
+ """Returns a fully-qualified organization string."""
257
+ return "organizations/{organization}".format(
258
+ organization=organization,
259
+ )
260
+
261
+ @staticmethod
262
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
263
+ """Parse a organization path into its component segments."""
264
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
265
+ return m.groupdict() if m else {}
266
+
267
+ @staticmethod
268
+ def common_project_path(
269
+ project: str,
270
+ ) -> str:
271
+ """Returns a fully-qualified project string."""
272
+ return "projects/{project}".format(
273
+ project=project,
274
+ )
275
+
276
+ @staticmethod
277
+ def parse_common_project_path(path: str) -> Dict[str, str]:
278
+ """Parse a project path into its component segments."""
279
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
280
+ return m.groupdict() if m else {}
281
+
282
+ @staticmethod
283
+ def common_location_path(
284
+ project: str,
285
+ location: str,
286
+ ) -> str:
287
+ """Returns a fully-qualified location string."""
288
+ return "projects/{project}/locations/{location}".format(
289
+ project=project,
290
+ location=location,
291
+ )
292
+
293
+ @staticmethod
294
+ def parse_common_location_path(path: str) -> Dict[str, str]:
295
+ """Parse a location path into its component segments."""
296
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
297
+ return m.groupdict() if m else {}
298
+
299
+ @classmethod
300
+ def get_mtls_endpoint_and_cert_source(
301
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
302
+ ):
303
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
304
+
305
+ The client cert source is determined in the following order:
306
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
307
+ client cert source is None.
308
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
309
+ default client cert source exists, use the default one; otherwise the client cert
310
+ source is None.
311
+
312
+ The API endpoint is determined in the following order:
313
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
314
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
315
+ default mTLS endpoint; if the environment variable is "never", use the default API
316
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
317
+ use the default API endpoint.
318
+
319
+ More details can be found at https://google.aip.dev/auth/4114.
320
+
321
+ Args:
322
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
323
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
324
+ in this method.
325
+
326
+ Returns:
327
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
328
+ client cert source to use.
329
+
330
+ Raises:
331
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
332
+ """
333
+
334
+ warnings.warn(
335
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
336
+ DeprecationWarning,
337
+ )
338
+ if client_options is None:
339
+ client_options = client_options_lib.ClientOptions()
340
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
341
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
342
+ if use_client_cert not in ("true", "false"):
343
+ raise ValueError(
344
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
345
+ )
346
+ if use_mtls_endpoint not in ("auto", "never", "always"):
347
+ raise MutualTLSChannelError(
348
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
349
+ )
350
+
351
+ # Figure out the client cert source to use.
352
+ client_cert_source = None
353
+ if use_client_cert == "true":
354
+ if client_options.client_cert_source:
355
+ client_cert_source = client_options.client_cert_source
356
+ elif mtls.has_default_client_cert_source():
357
+ client_cert_source = mtls.default_client_cert_source()
358
+
359
+ # Figure out which api endpoint to use.
360
+ if client_options.api_endpoint is not None:
361
+ api_endpoint = client_options.api_endpoint
362
+ elif use_mtls_endpoint == "always" or (
363
+ use_mtls_endpoint == "auto" and client_cert_source
364
+ ):
365
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
366
+ else:
367
+ api_endpoint = cls.DEFAULT_ENDPOINT
368
+
369
+ return api_endpoint, client_cert_source
370
+
371
+ @staticmethod
372
+ def _read_environment_variables():
373
+ """Returns the environment variables used by the client.
374
+
375
+ Returns:
376
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
377
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
378
+
379
+ Raises:
380
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
381
+ any of ["true", "false"].
382
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
383
+ is not any of ["auto", "never", "always"].
384
+ """
385
+ use_client_cert = os.getenv(
386
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
387
+ ).lower()
388
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
389
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
390
+ if use_client_cert not in ("true", "false"):
391
+ raise ValueError(
392
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
393
+ )
394
+ if use_mtls_endpoint not in ("auto", "never", "always"):
395
+ raise MutualTLSChannelError(
396
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
397
+ )
398
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
399
+
400
+ @staticmethod
401
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
402
+ """Return the client cert source to be used by the client.
403
+
404
+ Args:
405
+ provided_cert_source (bytes): The client certificate source provided.
406
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
407
+
408
+ Returns:
409
+ bytes or None: The client cert source to be used by the client.
410
+ """
411
+ client_cert_source = None
412
+ if use_cert_flag:
413
+ if provided_cert_source:
414
+ client_cert_source = provided_cert_source
415
+ elif mtls.has_default_client_cert_source():
416
+ client_cert_source = mtls.default_client_cert_source()
417
+ return client_cert_source
418
+
419
+ @staticmethod
420
+ def _get_api_endpoint(
421
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
422
+ ):
423
+ """Return the API endpoint used by the client.
424
+
425
+ Args:
426
+ api_override (str): The API endpoint override. If specified, this is always
427
+ the return value of this function and the other arguments are not used.
428
+ client_cert_source (bytes): The client certificate source used by the client.
429
+ universe_domain (str): The universe domain used by the client.
430
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
431
+ Possible values are "always", "auto", or "never".
432
+
433
+ Returns:
434
+ str: The API endpoint to be used by the client.
435
+ """
436
+ if api_override is not None:
437
+ api_endpoint = api_override
438
+ elif use_mtls_endpoint == "always" or (
439
+ use_mtls_endpoint == "auto" and client_cert_source
440
+ ):
441
+ _default_universe = PermissionServiceClient._DEFAULT_UNIVERSE
442
+ if universe_domain != _default_universe:
443
+ raise MutualTLSChannelError(
444
+ f"mTLS is not supported in any universe other than {_default_universe}."
445
+ )
446
+ api_endpoint = PermissionServiceClient.DEFAULT_MTLS_ENDPOINT
447
+ else:
448
+ api_endpoint = PermissionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
449
+ UNIVERSE_DOMAIN=universe_domain
450
+ )
451
+ return api_endpoint
452
+
453
+ @staticmethod
454
+ def _get_universe_domain(
455
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
456
+ ) -> str:
457
+ """Return the universe domain used by the client.
458
+
459
+ Args:
460
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
461
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
462
+
463
+ Returns:
464
+ str: The universe domain to be used by the client.
465
+
466
+ Raises:
467
+ ValueError: If the universe domain is an empty string.
468
+ """
469
+ universe_domain = PermissionServiceClient._DEFAULT_UNIVERSE
470
+ if client_universe_domain is not None:
471
+ universe_domain = client_universe_domain
472
+ elif universe_domain_env is not None:
473
+ universe_domain = universe_domain_env
474
+ if len(universe_domain.strip()) == 0:
475
+ raise ValueError("Universe Domain cannot be an empty string.")
476
+ return universe_domain
477
+
478
+ def _validate_universe_domain(self):
479
+ """Validates client's and credentials' universe domains are consistent.
480
+
481
+ Returns:
482
+ bool: True iff the configured universe domain is valid.
483
+
484
+ Raises:
485
+ ValueError: If the configured universe domain is not valid.
486
+ """
487
+
488
+ # NOTE (b/349488459): universe validation is disabled until further notice.
489
+ return True
490
+
491
+ @property
492
+ def api_endpoint(self):
493
+ """Return the API endpoint used by the client instance.
494
+
495
+ Returns:
496
+ str: The API endpoint used by the client instance.
497
+ """
498
+ return self._api_endpoint
499
+
500
+ @property
501
+ def universe_domain(self) -> str:
502
+ """Return the universe domain used by the client instance.
503
+
504
+ Returns:
505
+ str: The universe domain used by the client instance.
506
+ """
507
+ return self._universe_domain
508
+
509
+ def __init__(
510
+ self,
511
+ *,
512
+ credentials: Optional[ga_credentials.Credentials] = None,
513
+ transport: Optional[
514
+ Union[
515
+ str,
516
+ PermissionServiceTransport,
517
+ Callable[..., PermissionServiceTransport],
518
+ ]
519
+ ] = None,
520
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
521
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
522
+ ) -> None:
523
+ """Instantiates the permission service client.
524
+
525
+ Args:
526
+ credentials (Optional[google.auth.credentials.Credentials]): The
527
+ authorization credentials to attach to requests. These
528
+ credentials identify the application to the service; if none
529
+ are specified, the client will attempt to ascertain the
530
+ credentials from the environment.
531
+ transport (Optional[Union[str,PermissionServiceTransport,Callable[..., PermissionServiceTransport]]]):
532
+ The transport to use, or a Callable that constructs and returns a new transport.
533
+ If a Callable is given, it will be called with the same set of initialization
534
+ arguments as used in the PermissionServiceTransport constructor.
535
+ If set to None, a transport is chosen automatically.
536
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
537
+ Custom options for the client.
538
+
539
+ 1. The ``api_endpoint`` property can be used to override the
540
+ default endpoint provided by the client when ``transport`` is
541
+ not explicitly provided. Only if this property is not set and
542
+ ``transport`` was not explicitly provided, the endpoint is
543
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
544
+ variable, which have one of the following values:
545
+ "always" (always use the default mTLS endpoint), "never" (always
546
+ use the default regular endpoint) and "auto" (auto-switch to the
547
+ default mTLS endpoint if client certificate is present; this is
548
+ the default value).
549
+
550
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
551
+ is "true", then the ``client_cert_source`` property can be used
552
+ to provide a client certificate for mTLS transport. If
553
+ not provided, the default SSL client certificate will be used if
554
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
555
+ set, no client certificate will be used.
556
+
557
+ 3. The ``universe_domain`` property can be used to override the
558
+ default "googleapis.com" universe. Note that the ``api_endpoint``
559
+ property still takes precedence; and ``universe_domain`` is
560
+ currently not supported for mTLS.
561
+
562
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
563
+ The client info used to send a user-agent string along with
564
+ API requests. If ``None``, then default info will be used.
565
+ Generally, you only need to set this if you're developing
566
+ your own client library.
567
+
568
+ Raises:
569
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
570
+ creation failed for any reason.
571
+ """
572
+ self._client_options = client_options
573
+ if isinstance(self._client_options, dict):
574
+ self._client_options = client_options_lib.from_dict(self._client_options)
575
+ if self._client_options is None:
576
+ self._client_options = client_options_lib.ClientOptions()
577
+ self._client_options = cast(
578
+ client_options_lib.ClientOptions, self._client_options
579
+ )
580
+
581
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
582
+
583
+ (
584
+ self._use_client_cert,
585
+ self._use_mtls_endpoint,
586
+ self._universe_domain_env,
587
+ ) = PermissionServiceClient._read_environment_variables()
588
+ self._client_cert_source = PermissionServiceClient._get_client_cert_source(
589
+ self._client_options.client_cert_source, self._use_client_cert
590
+ )
591
+ self._universe_domain = PermissionServiceClient._get_universe_domain(
592
+ universe_domain_opt, self._universe_domain_env
593
+ )
594
+ self._api_endpoint = None # updated below, depending on `transport`
595
+
596
+ # Initialize the universe domain validation.
597
+ self._is_universe_domain_valid = False
598
+
599
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
600
+ # Setup logging.
601
+ client_logging.initialize_logging()
602
+
603
+ api_key_value = getattr(self._client_options, "api_key", None)
604
+ if api_key_value and credentials:
605
+ raise ValueError(
606
+ "client_options.api_key and credentials are mutually exclusive"
607
+ )
608
+
609
+ # Save or instantiate the transport.
610
+ # Ordinarily, we provide the transport, but allowing a custom transport
611
+ # instance provides an extensibility point for unusual situations.
612
+ transport_provided = isinstance(transport, PermissionServiceTransport)
613
+ if transport_provided:
614
+ # transport is a PermissionServiceTransport instance.
615
+ if credentials or self._client_options.credentials_file or api_key_value:
616
+ raise ValueError(
617
+ "When providing a transport instance, "
618
+ "provide its credentials directly."
619
+ )
620
+ if self._client_options.scopes:
621
+ raise ValueError(
622
+ "When providing a transport instance, provide its scopes "
623
+ "directly."
624
+ )
625
+ self._transport = cast(PermissionServiceTransport, transport)
626
+ self._api_endpoint = self._transport.host
627
+
628
+ self._api_endpoint = (
629
+ self._api_endpoint
630
+ or PermissionServiceClient._get_api_endpoint(
631
+ self._client_options.api_endpoint,
632
+ self._client_cert_source,
633
+ self._universe_domain,
634
+ self._use_mtls_endpoint,
635
+ )
636
+ )
637
+
638
+ if not transport_provided:
639
+ import google.auth._default # type: ignore
640
+
641
+ if api_key_value and hasattr(
642
+ google.auth._default, "get_api_key_credentials"
643
+ ):
644
+ credentials = google.auth._default.get_api_key_credentials(
645
+ api_key_value
646
+ )
647
+
648
+ transport_init: Union[
649
+ Type[PermissionServiceTransport],
650
+ Callable[..., PermissionServiceTransport],
651
+ ] = (
652
+ PermissionServiceClient.get_transport_class(transport)
653
+ if isinstance(transport, str) or transport is None
654
+ else cast(Callable[..., PermissionServiceTransport], transport)
655
+ )
656
+ # initialize with the provided callable or the passed in class
657
+ self._transport = transport_init(
658
+ credentials=credentials,
659
+ credentials_file=self._client_options.credentials_file,
660
+ host=self._api_endpoint,
661
+ scopes=self._client_options.scopes,
662
+ client_cert_source_for_mtls=self._client_cert_source,
663
+ quota_project_id=self._client_options.quota_project_id,
664
+ client_info=client_info,
665
+ always_use_jwt_access=True,
666
+ api_audience=self._client_options.api_audience,
667
+ )
668
+
669
+ if "async" not in str(self._transport):
670
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
671
+ std_logging.DEBUG
672
+ ): # pragma: NO COVER
673
+ _LOGGER.debug(
674
+ "Created client `google.ai.generativelanguage_v1alpha.PermissionServiceClient`.",
675
+ extra={
676
+ "serviceName": "google.ai.generativelanguage.v1alpha.PermissionService",
677
+ "universeDomain": getattr(
678
+ self._transport._credentials, "universe_domain", ""
679
+ ),
680
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
681
+ "credentialsInfo": getattr(
682
+ self.transport._credentials, "get_cred_info", lambda: None
683
+ )(),
684
+ }
685
+ if hasattr(self._transport, "_credentials")
686
+ else {
687
+ "serviceName": "google.ai.generativelanguage.v1alpha.PermissionService",
688
+ "credentialsType": None,
689
+ },
690
+ )
691
+
692
+ def create_permission(
693
+ self,
694
+ request: Optional[
695
+ Union[permission_service.CreatePermissionRequest, dict]
696
+ ] = None,
697
+ *,
698
+ parent: Optional[str] = None,
699
+ permission: Optional[gag_permission.Permission] = None,
700
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
701
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
702
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
703
+ ) -> gag_permission.Permission:
704
+ r"""Create a permission to a specific resource.
705
+
706
+ .. code-block:: python
707
+
708
+ # This snippet has been automatically generated and should be regarded as a
709
+ # code template only.
710
+ # It will require modifications to work:
711
+ # - It may require correct/in-range values for request initialization.
712
+ # - It may require specifying regional endpoints when creating the service
713
+ # client as shown in:
714
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
715
+ from google.ai import generativelanguage_v1alpha
716
+
717
+ def sample_create_permission():
718
+ # Create a client
719
+ client = generativelanguage_v1alpha.PermissionServiceClient()
720
+
721
+ # Initialize request argument(s)
722
+ request = generativelanguage_v1alpha.CreatePermissionRequest(
723
+ parent="parent_value",
724
+ )
725
+
726
+ # Make the request
727
+ response = client.create_permission(request=request)
728
+
729
+ # Handle the response
730
+ print(response)
731
+
732
+ Args:
733
+ request (Union[google.ai.generativelanguage_v1alpha.types.CreatePermissionRequest, dict]):
734
+ The request object. Request to create a ``Permission``.
735
+ parent (str):
736
+ Required. The parent resource of the ``Permission``.
737
+ Formats: ``tunedModels/{tuned_model}``
738
+ ``corpora/{corpus}``
739
+
740
+ This corresponds to the ``parent`` field
741
+ on the ``request`` instance; if ``request`` is provided, this
742
+ should not be set.
743
+ permission (google.ai.generativelanguage_v1alpha.types.Permission):
744
+ Required. The permission to create.
745
+ This corresponds to the ``permission`` field
746
+ on the ``request`` instance; if ``request`` is provided, this
747
+ should not be set.
748
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
749
+ should be retried.
750
+ timeout (float): The timeout for this request.
751
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
752
+ sent along with the request as metadata. Normally, each value must be of type `str`,
753
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
754
+ be of type `bytes`.
755
+
756
+ Returns:
757
+ google.ai.generativelanguage_v1alpha.types.Permission:
758
+ Permission resource grants user,
759
+ group or the rest of the world access to
760
+ the PaLM API resource (e.g. a tuned
761
+ model, corpus).
762
+
763
+ A role is a collection of permitted
764
+ operations that allows users to perform
765
+ specific actions on PaLM API resources.
766
+ To make them available to users, groups,
767
+ or service accounts, you assign roles.
768
+ When you assign a role, you grant
769
+ permissions that the role contains.
770
+
771
+ There are three concentric roles. Each
772
+ role is a superset of the previous
773
+ role's permitted operations:
774
+
775
+ - reader can use the resource (e.g.
776
+ tuned model, corpus) for inference
777
+ - writer has reader's permissions and
778
+ additionally can edit and share
779
+ - owner has writer's permissions and
780
+ additionally can delete
781
+
782
+ """
783
+ # Create or coerce a protobuf request object.
784
+ # - Quick check: If we got a request object, we should *not* have
785
+ # gotten any keyword arguments that map to the request.
786
+ has_flattened_params = any([parent, permission])
787
+ if request is not None and has_flattened_params:
788
+ raise ValueError(
789
+ "If the `request` argument is set, then none of "
790
+ "the individual field arguments should be set."
791
+ )
792
+
793
+ # - Use the request object if provided (there's no risk of modifying the input as
794
+ # there are no flattened fields), or create one.
795
+ if not isinstance(request, permission_service.CreatePermissionRequest):
796
+ request = permission_service.CreatePermissionRequest(request)
797
+ # If we have keyword arguments corresponding to fields on the
798
+ # request, apply these.
799
+ if parent is not None:
800
+ request.parent = parent
801
+ if permission is not None:
802
+ request.permission = permission
803
+
804
+ # Wrap the RPC method; this adds retry and timeout information,
805
+ # and friendly error handling.
806
+ rpc = self._transport._wrapped_methods[self._transport.create_permission]
807
+
808
+ # Certain fields should be provided within the metadata header;
809
+ # add these here.
810
+ metadata = tuple(metadata) + (
811
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
812
+ )
813
+
814
+ # Validate the universe domain.
815
+ self._validate_universe_domain()
816
+
817
+ # Send the request.
818
+ response = rpc(
819
+ request,
820
+ retry=retry,
821
+ timeout=timeout,
822
+ metadata=metadata,
823
+ )
824
+
825
+ # Done; return the response.
826
+ return response
827
+
828
+ def get_permission(
829
+ self,
830
+ request: Optional[Union[permission_service.GetPermissionRequest, dict]] = None,
831
+ *,
832
+ name: Optional[str] = None,
833
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
834
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
835
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
836
+ ) -> permission.Permission:
837
+ r"""Gets information about a specific Permission.
838
+
839
+ .. code-block:: python
840
+
841
+ # This snippet has been automatically generated and should be regarded as a
842
+ # code template only.
843
+ # It will require modifications to work:
844
+ # - It may require correct/in-range values for request initialization.
845
+ # - It may require specifying regional endpoints when creating the service
846
+ # client as shown in:
847
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
848
+ from google.ai import generativelanguage_v1alpha
849
+
850
+ def sample_get_permission():
851
+ # Create a client
852
+ client = generativelanguage_v1alpha.PermissionServiceClient()
853
+
854
+ # Initialize request argument(s)
855
+ request = generativelanguage_v1alpha.GetPermissionRequest(
856
+ name="name_value",
857
+ )
858
+
859
+ # Make the request
860
+ response = client.get_permission(request=request)
861
+
862
+ # Handle the response
863
+ print(response)
864
+
865
+ Args:
866
+ request (Union[google.ai.generativelanguage_v1alpha.types.GetPermissionRequest, dict]):
867
+ The request object. Request for getting information about a specific
868
+ ``Permission``.
869
+ name (str):
870
+ Required. The resource name of the permission.
871
+
872
+ Formats:
873
+ ``tunedModels/{tuned_model}/permissions/{permission}``
874
+ ``corpora/{corpus}/permissions/{permission}``
875
+
876
+ This corresponds to the ``name`` field
877
+ on the ``request`` instance; if ``request`` is provided, this
878
+ should not be set.
879
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
880
+ should be retried.
881
+ timeout (float): The timeout for this request.
882
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
883
+ sent along with the request as metadata. Normally, each value must be of type `str`,
884
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
885
+ be of type `bytes`.
886
+
887
+ Returns:
888
+ google.ai.generativelanguage_v1alpha.types.Permission:
889
+ Permission resource grants user,
890
+ group or the rest of the world access to
891
+ the PaLM API resource (e.g. a tuned
892
+ model, corpus).
893
+
894
+ A role is a collection of permitted
895
+ operations that allows users to perform
896
+ specific actions on PaLM API resources.
897
+ To make them available to users, groups,
898
+ or service accounts, you assign roles.
899
+ When you assign a role, you grant
900
+ permissions that the role contains.
901
+
902
+ There are three concentric roles. Each
903
+ role is a superset of the previous
904
+ role's permitted operations:
905
+
906
+ - reader can use the resource (e.g.
907
+ tuned model, corpus) for inference
908
+ - writer has reader's permissions and
909
+ additionally can edit and share
910
+ - owner has writer's permissions and
911
+ additionally can delete
912
+
913
+ """
914
+ # Create or coerce a protobuf request object.
915
+ # - Quick check: If we got a request object, we should *not* have
916
+ # gotten any keyword arguments that map to the request.
917
+ has_flattened_params = any([name])
918
+ if request is not None and has_flattened_params:
919
+ raise ValueError(
920
+ "If the `request` argument is set, then none of "
921
+ "the individual field arguments should be set."
922
+ )
923
+
924
+ # - Use the request object if provided (there's no risk of modifying the input as
925
+ # there are no flattened fields), or create one.
926
+ if not isinstance(request, permission_service.GetPermissionRequest):
927
+ request = permission_service.GetPermissionRequest(request)
928
+ # If we have keyword arguments corresponding to fields on the
929
+ # request, apply these.
930
+ if name is not None:
931
+ request.name = name
932
+
933
+ # Wrap the RPC method; this adds retry and timeout information,
934
+ # and friendly error handling.
935
+ rpc = self._transport._wrapped_methods[self._transport.get_permission]
936
+
937
+ # Certain fields should be provided within the metadata header;
938
+ # add these here.
939
+ metadata = tuple(metadata) + (
940
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
941
+ )
942
+
943
+ # Validate the universe domain.
944
+ self._validate_universe_domain()
945
+
946
+ # Send the request.
947
+ response = rpc(
948
+ request,
949
+ retry=retry,
950
+ timeout=timeout,
951
+ metadata=metadata,
952
+ )
953
+
954
+ # Done; return the response.
955
+ return response
956
+
957
+ def list_permissions(
958
+ self,
959
+ request: Optional[
960
+ Union[permission_service.ListPermissionsRequest, dict]
961
+ ] = None,
962
+ *,
963
+ parent: Optional[str] = None,
964
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
965
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
966
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
967
+ ) -> pagers.ListPermissionsPager:
968
+ r"""Lists permissions for the specific resource.
969
+
970
+ .. code-block:: python
971
+
972
+ # This snippet has been automatically generated and should be regarded as a
973
+ # code template only.
974
+ # It will require modifications to work:
975
+ # - It may require correct/in-range values for request initialization.
976
+ # - It may require specifying regional endpoints when creating the service
977
+ # client as shown in:
978
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
979
+ from google.ai import generativelanguage_v1alpha
980
+
981
+ def sample_list_permissions():
982
+ # Create a client
983
+ client = generativelanguage_v1alpha.PermissionServiceClient()
984
+
985
+ # Initialize request argument(s)
986
+ request = generativelanguage_v1alpha.ListPermissionsRequest(
987
+ parent="parent_value",
988
+ )
989
+
990
+ # Make the request
991
+ page_result = client.list_permissions(request=request)
992
+
993
+ # Handle the response
994
+ for response in page_result:
995
+ print(response)
996
+
997
+ Args:
998
+ request (Union[google.ai.generativelanguage_v1alpha.types.ListPermissionsRequest, dict]):
999
+ The request object. Request for listing permissions.
1000
+ parent (str):
1001
+ Required. The parent resource of the permissions.
1002
+ Formats: ``tunedModels/{tuned_model}``
1003
+ ``corpora/{corpus}``
1004
+
1005
+ This corresponds to the ``parent`` field
1006
+ on the ``request`` instance; if ``request`` is provided, this
1007
+ should not be set.
1008
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1009
+ should be retried.
1010
+ timeout (float): The timeout for this request.
1011
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1012
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1013
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1014
+ be of type `bytes`.
1015
+
1016
+ Returns:
1017
+ google.ai.generativelanguage_v1alpha.services.permission_service.pagers.ListPermissionsPager:
1018
+ Response from ListPermissions containing a paginated list of
1019
+ permissions.
1020
+
1021
+ Iterating over this object will yield results and
1022
+ resolve additional pages automatically.
1023
+
1024
+ """
1025
+ # Create or coerce a protobuf request object.
1026
+ # - Quick check: If we got a request object, we should *not* have
1027
+ # gotten any keyword arguments that map to the request.
1028
+ has_flattened_params = any([parent])
1029
+ if request is not None and has_flattened_params:
1030
+ raise ValueError(
1031
+ "If the `request` argument is set, then none of "
1032
+ "the individual field arguments should be set."
1033
+ )
1034
+
1035
+ # - Use the request object if provided (there's no risk of modifying the input as
1036
+ # there are no flattened fields), or create one.
1037
+ if not isinstance(request, permission_service.ListPermissionsRequest):
1038
+ request = permission_service.ListPermissionsRequest(request)
1039
+ # If we have keyword arguments corresponding to fields on the
1040
+ # request, apply these.
1041
+ if parent is not None:
1042
+ request.parent = parent
1043
+
1044
+ # Wrap the RPC method; this adds retry and timeout information,
1045
+ # and friendly error handling.
1046
+ rpc = self._transport._wrapped_methods[self._transport.list_permissions]
1047
+
1048
+ # Certain fields should be provided within the metadata header;
1049
+ # add these here.
1050
+ metadata = tuple(metadata) + (
1051
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
1052
+ )
1053
+
1054
+ # Validate the universe domain.
1055
+ self._validate_universe_domain()
1056
+
1057
+ # Send the request.
1058
+ response = rpc(
1059
+ request,
1060
+ retry=retry,
1061
+ timeout=timeout,
1062
+ metadata=metadata,
1063
+ )
1064
+
1065
+ # This method is paged; wrap the response in a pager, which provides
1066
+ # an `__iter__` convenience method.
1067
+ response = pagers.ListPermissionsPager(
1068
+ method=rpc,
1069
+ request=request,
1070
+ response=response,
1071
+ retry=retry,
1072
+ timeout=timeout,
1073
+ metadata=metadata,
1074
+ )
1075
+
1076
+ # Done; return the response.
1077
+ return response
1078
+
1079
+ def update_permission(
1080
+ self,
1081
+ request: Optional[
1082
+ Union[permission_service.UpdatePermissionRequest, dict]
1083
+ ] = None,
1084
+ *,
1085
+ permission: Optional[gag_permission.Permission] = None,
1086
+ update_mask: Optional[field_mask_pb2.FieldMask] = None,
1087
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1088
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1089
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1090
+ ) -> gag_permission.Permission:
1091
+ r"""Updates the permission.
1092
+
1093
+ .. code-block:: python
1094
+
1095
+ # This snippet has been automatically generated and should be regarded as a
1096
+ # code template only.
1097
+ # It will require modifications to work:
1098
+ # - It may require correct/in-range values for request initialization.
1099
+ # - It may require specifying regional endpoints when creating the service
1100
+ # client as shown in:
1101
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1102
+ from google.ai import generativelanguage_v1alpha
1103
+
1104
+ def sample_update_permission():
1105
+ # Create a client
1106
+ client = generativelanguage_v1alpha.PermissionServiceClient()
1107
+
1108
+ # Initialize request argument(s)
1109
+ request = generativelanguage_v1alpha.UpdatePermissionRequest(
1110
+ )
1111
+
1112
+ # Make the request
1113
+ response = client.update_permission(request=request)
1114
+
1115
+ # Handle the response
1116
+ print(response)
1117
+
1118
+ Args:
1119
+ request (Union[google.ai.generativelanguage_v1alpha.types.UpdatePermissionRequest, dict]):
1120
+ The request object. Request to update the ``Permission``.
1121
+ permission (google.ai.generativelanguage_v1alpha.types.Permission):
1122
+ Required. The permission to update.
1123
+
1124
+ The permission's ``name`` field is used to identify the
1125
+ permission to update.
1126
+
1127
+ This corresponds to the ``permission`` field
1128
+ on the ``request`` instance; if ``request`` is provided, this
1129
+ should not be set.
1130
+ update_mask (google.protobuf.field_mask_pb2.FieldMask):
1131
+ Required. The list of fields to update. Accepted ones:
1132
+
1133
+ - role (``Permission.role`` field)
1134
+
1135
+ This corresponds to the ``update_mask`` field
1136
+ on the ``request`` instance; if ``request`` is provided, this
1137
+ should not be set.
1138
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1139
+ should be retried.
1140
+ timeout (float): The timeout for this request.
1141
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1142
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1143
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1144
+ be of type `bytes`.
1145
+
1146
+ Returns:
1147
+ google.ai.generativelanguage_v1alpha.types.Permission:
1148
+ Permission resource grants user,
1149
+ group or the rest of the world access to
1150
+ the PaLM API resource (e.g. a tuned
1151
+ model, corpus).
1152
+
1153
+ A role is a collection of permitted
1154
+ operations that allows users to perform
1155
+ specific actions on PaLM API resources.
1156
+ To make them available to users, groups,
1157
+ or service accounts, you assign roles.
1158
+ When you assign a role, you grant
1159
+ permissions that the role contains.
1160
+
1161
+ There are three concentric roles. Each
1162
+ role is a superset of the previous
1163
+ role's permitted operations:
1164
+
1165
+ - reader can use the resource (e.g.
1166
+ tuned model, corpus) for inference
1167
+ - writer has reader's permissions and
1168
+ additionally can edit and share
1169
+ - owner has writer's permissions and
1170
+ additionally can delete
1171
+
1172
+ """
1173
+ # Create or coerce a protobuf request object.
1174
+ # - Quick check: If we got a request object, we should *not* have
1175
+ # gotten any keyword arguments that map to the request.
1176
+ has_flattened_params = any([permission, update_mask])
1177
+ if request is not None and has_flattened_params:
1178
+ raise ValueError(
1179
+ "If the `request` argument is set, then none of "
1180
+ "the individual field arguments should be set."
1181
+ )
1182
+
1183
+ # - Use the request object if provided (there's no risk of modifying the input as
1184
+ # there are no flattened fields), or create one.
1185
+ if not isinstance(request, permission_service.UpdatePermissionRequest):
1186
+ request = permission_service.UpdatePermissionRequest(request)
1187
+ # If we have keyword arguments corresponding to fields on the
1188
+ # request, apply these.
1189
+ if permission is not None:
1190
+ request.permission = permission
1191
+ if update_mask is not None:
1192
+ request.update_mask = update_mask
1193
+
1194
+ # Wrap the RPC method; this adds retry and timeout information,
1195
+ # and friendly error handling.
1196
+ rpc = self._transport._wrapped_methods[self._transport.update_permission]
1197
+
1198
+ # Certain fields should be provided within the metadata header;
1199
+ # add these here.
1200
+ metadata = tuple(metadata) + (
1201
+ gapic_v1.routing_header.to_grpc_metadata(
1202
+ (("permission.name", request.permission.name),)
1203
+ ),
1204
+ )
1205
+
1206
+ # Validate the universe domain.
1207
+ self._validate_universe_domain()
1208
+
1209
+ # Send the request.
1210
+ response = rpc(
1211
+ request,
1212
+ retry=retry,
1213
+ timeout=timeout,
1214
+ metadata=metadata,
1215
+ )
1216
+
1217
+ # Done; return the response.
1218
+ return response
1219
+
1220
+ def delete_permission(
1221
+ self,
1222
+ request: Optional[
1223
+ Union[permission_service.DeletePermissionRequest, dict]
1224
+ ] = None,
1225
+ *,
1226
+ name: Optional[str] = None,
1227
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1228
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1229
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1230
+ ) -> None:
1231
+ r"""Deletes the permission.
1232
+
1233
+ .. code-block:: python
1234
+
1235
+ # This snippet has been automatically generated and should be regarded as a
1236
+ # code template only.
1237
+ # It will require modifications to work:
1238
+ # - It may require correct/in-range values for request initialization.
1239
+ # - It may require specifying regional endpoints when creating the service
1240
+ # client as shown in:
1241
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1242
+ from google.ai import generativelanguage_v1alpha
1243
+
1244
+ def sample_delete_permission():
1245
+ # Create a client
1246
+ client = generativelanguage_v1alpha.PermissionServiceClient()
1247
+
1248
+ # Initialize request argument(s)
1249
+ request = generativelanguage_v1alpha.DeletePermissionRequest(
1250
+ name="name_value",
1251
+ )
1252
+
1253
+ # Make the request
1254
+ client.delete_permission(request=request)
1255
+
1256
+ Args:
1257
+ request (Union[google.ai.generativelanguage_v1alpha.types.DeletePermissionRequest, dict]):
1258
+ The request object. Request to delete the ``Permission``.
1259
+ name (str):
1260
+ Required. The resource name of the permission. Formats:
1261
+ ``tunedModels/{tuned_model}/permissions/{permission}``
1262
+ ``corpora/{corpus}/permissions/{permission}``
1263
+
1264
+ This corresponds to the ``name`` field
1265
+ on the ``request`` instance; if ``request`` is provided, this
1266
+ should not be set.
1267
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1268
+ should be retried.
1269
+ timeout (float): The timeout for this request.
1270
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1271
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1272
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1273
+ be of type `bytes`.
1274
+ """
1275
+ # Create or coerce a protobuf request object.
1276
+ # - Quick check: If we got a request object, we should *not* have
1277
+ # gotten any keyword arguments that map to the request.
1278
+ has_flattened_params = any([name])
1279
+ if request is not None and has_flattened_params:
1280
+ raise ValueError(
1281
+ "If the `request` argument is set, then none of "
1282
+ "the individual field arguments should be set."
1283
+ )
1284
+
1285
+ # - Use the request object if provided (there's no risk of modifying the input as
1286
+ # there are no flattened fields), or create one.
1287
+ if not isinstance(request, permission_service.DeletePermissionRequest):
1288
+ request = permission_service.DeletePermissionRequest(request)
1289
+ # If we have keyword arguments corresponding to fields on the
1290
+ # request, apply these.
1291
+ if name is not None:
1292
+ request.name = name
1293
+
1294
+ # Wrap the RPC method; this adds retry and timeout information,
1295
+ # and friendly error handling.
1296
+ rpc = self._transport._wrapped_methods[self._transport.delete_permission]
1297
+
1298
+ # Certain fields should be provided within the metadata header;
1299
+ # add these here.
1300
+ metadata = tuple(metadata) + (
1301
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1302
+ )
1303
+
1304
+ # Validate the universe domain.
1305
+ self._validate_universe_domain()
1306
+
1307
+ # Send the request.
1308
+ rpc(
1309
+ request,
1310
+ retry=retry,
1311
+ timeout=timeout,
1312
+ metadata=metadata,
1313
+ )
1314
+
1315
+ def transfer_ownership(
1316
+ self,
1317
+ request: Optional[
1318
+ Union[permission_service.TransferOwnershipRequest, dict]
1319
+ ] = None,
1320
+ *,
1321
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1322
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1323
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1324
+ ) -> permission_service.TransferOwnershipResponse:
1325
+ r"""Transfers ownership of the tuned model.
1326
+ This is the only way to change ownership of the tuned
1327
+ model. The current owner will be downgraded to writer
1328
+ role.
1329
+
1330
+ .. code-block:: python
1331
+
1332
+ # This snippet has been automatically generated and should be regarded as a
1333
+ # code template only.
1334
+ # It will require modifications to work:
1335
+ # - It may require correct/in-range values for request initialization.
1336
+ # - It may require specifying regional endpoints when creating the service
1337
+ # client as shown in:
1338
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
1339
+ from google.ai import generativelanguage_v1alpha
1340
+
1341
+ def sample_transfer_ownership():
1342
+ # Create a client
1343
+ client = generativelanguage_v1alpha.PermissionServiceClient()
1344
+
1345
+ # Initialize request argument(s)
1346
+ request = generativelanguage_v1alpha.TransferOwnershipRequest(
1347
+ name="name_value",
1348
+ email_address="email_address_value",
1349
+ )
1350
+
1351
+ # Make the request
1352
+ response = client.transfer_ownership(request=request)
1353
+
1354
+ # Handle the response
1355
+ print(response)
1356
+
1357
+ Args:
1358
+ request (Union[google.ai.generativelanguage_v1alpha.types.TransferOwnershipRequest, dict]):
1359
+ The request object. Request to transfer the ownership of
1360
+ the tuned model.
1361
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
1362
+ should be retried.
1363
+ timeout (float): The timeout for this request.
1364
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1365
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1366
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1367
+ be of type `bytes`.
1368
+
1369
+ Returns:
1370
+ google.ai.generativelanguage_v1alpha.types.TransferOwnershipResponse:
1371
+ Response from TransferOwnership.
1372
+ """
1373
+ # Create or coerce a protobuf request object.
1374
+ # - Use the request object if provided (there's no risk of modifying the input as
1375
+ # there are no flattened fields), or create one.
1376
+ if not isinstance(request, permission_service.TransferOwnershipRequest):
1377
+ request = permission_service.TransferOwnershipRequest(request)
1378
+
1379
+ # Wrap the RPC method; this adds retry and timeout information,
1380
+ # and friendly error handling.
1381
+ rpc = self._transport._wrapped_methods[self._transport.transfer_ownership]
1382
+
1383
+ # Certain fields should be provided within the metadata header;
1384
+ # add these here.
1385
+ metadata = tuple(metadata) + (
1386
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1387
+ )
1388
+
1389
+ # Validate the universe domain.
1390
+ self._validate_universe_domain()
1391
+
1392
+ # Send the request.
1393
+ response = rpc(
1394
+ request,
1395
+ retry=retry,
1396
+ timeout=timeout,
1397
+ metadata=metadata,
1398
+ )
1399
+
1400
+ # Done; return the response.
1401
+ return response
1402
+
1403
+ def __enter__(self) -> "PermissionServiceClient":
1404
+ return self
1405
+
1406
+ def __exit__(self, type, value, traceback):
1407
+ """Releases underlying transport's resources.
1408
+
1409
+ .. warning::
1410
+ ONLY use as a context manager if the transport is NOT shared
1411
+ with other clients! Exiting the with block will CLOSE the transport
1412
+ and may cause errors in other clients!
1413
+ """
1414
+ self.transport.close()
1415
+
1416
+ def list_operations(
1417
+ self,
1418
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
1419
+ *,
1420
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1421
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1422
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1423
+ ) -> operations_pb2.ListOperationsResponse:
1424
+ r"""Lists operations that match the specified filter in the request.
1425
+
1426
+ Args:
1427
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
1428
+ The request object. Request message for
1429
+ `ListOperations` method.
1430
+ retry (google.api_core.retry.Retry): Designation of what errors,
1431
+ if any, should be retried.
1432
+ timeout (float): The timeout for this request.
1433
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1434
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1435
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1436
+ be of type `bytes`.
1437
+ Returns:
1438
+ ~.operations_pb2.ListOperationsResponse:
1439
+ Response message for ``ListOperations`` method.
1440
+ """
1441
+ # Create or coerce a protobuf request object.
1442
+ # The request isn't a proto-plus wrapped type,
1443
+ # so it must be constructed via keyword expansion.
1444
+ if isinstance(request, dict):
1445
+ request = operations_pb2.ListOperationsRequest(**request)
1446
+
1447
+ # Wrap the RPC method; this adds retry and timeout information,
1448
+ # and friendly error handling.
1449
+ rpc = self._transport._wrapped_methods[self._transport.list_operations]
1450
+
1451
+ # Certain fields should be provided within the metadata header;
1452
+ # add these here.
1453
+ metadata = tuple(metadata) + (
1454
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1455
+ )
1456
+
1457
+ # Validate the universe domain.
1458
+ self._validate_universe_domain()
1459
+
1460
+ # Send the request.
1461
+ response = rpc(
1462
+ request,
1463
+ retry=retry,
1464
+ timeout=timeout,
1465
+ metadata=metadata,
1466
+ )
1467
+
1468
+ # Done; return the response.
1469
+ return response
1470
+
1471
+ def get_operation(
1472
+ self,
1473
+ request: Optional[operations_pb2.GetOperationRequest] = None,
1474
+ *,
1475
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1476
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1477
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1478
+ ) -> operations_pb2.Operation:
1479
+ r"""Gets the latest state of a long-running operation.
1480
+
1481
+ Args:
1482
+ request (:class:`~.operations_pb2.GetOperationRequest`):
1483
+ The request object. Request message for
1484
+ `GetOperation` method.
1485
+ retry (google.api_core.retry.Retry): Designation of what errors,
1486
+ if any, should be retried.
1487
+ timeout (float): The timeout for this request.
1488
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1489
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1490
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1491
+ be of type `bytes`.
1492
+ Returns:
1493
+ ~.operations_pb2.Operation:
1494
+ An ``Operation`` object.
1495
+ """
1496
+ # Create or coerce a protobuf request object.
1497
+ # The request isn't a proto-plus wrapped type,
1498
+ # so it must be constructed via keyword expansion.
1499
+ if isinstance(request, dict):
1500
+ request = operations_pb2.GetOperationRequest(**request)
1501
+
1502
+ # Wrap the RPC method; this adds retry and timeout information,
1503
+ # and friendly error handling.
1504
+ rpc = self._transport._wrapped_methods[self._transport.get_operation]
1505
+
1506
+ # Certain fields should be provided within the metadata header;
1507
+ # add these here.
1508
+ metadata = tuple(metadata) + (
1509
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1510
+ )
1511
+
1512
+ # Validate the universe domain.
1513
+ self._validate_universe_domain()
1514
+
1515
+ # Send the request.
1516
+ response = rpc(
1517
+ request,
1518
+ retry=retry,
1519
+ timeout=timeout,
1520
+ metadata=metadata,
1521
+ )
1522
+
1523
+ # Done; return the response.
1524
+ return response
1525
+
1526
+
1527
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1528
+ gapic_version=package_version.__version__
1529
+ )
1530
+
1531
+
1532
+ __all__ = ("PermissionServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/pagers.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from typing import (
17
+ Any,
18
+ AsyncIterator,
19
+ Awaitable,
20
+ Callable,
21
+ Iterator,
22
+ Optional,
23
+ Sequence,
24
+ Tuple,
25
+ Union,
26
+ )
27
+
28
+ from google.api_core import gapic_v1
29
+ from google.api_core import retry as retries
30
+ from google.api_core import retry_async as retries_async
31
+
32
+ try:
33
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
34
+ OptionalAsyncRetry = Union[
35
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
36
+ ]
37
+ except AttributeError: # pragma: NO COVER
38
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
39
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
40
+
41
+ from google.ai.generativelanguage_v1alpha.types import permission, permission_service
42
+
43
+
44
+ class ListPermissionsPager:
45
+ """A pager for iterating through ``list_permissions`` requests.
46
+
47
+ This class thinly wraps an initial
48
+ :class:`google.ai.generativelanguage_v1alpha.types.ListPermissionsResponse` object, and
49
+ provides an ``__iter__`` method to iterate through its
50
+ ``permissions`` field.
51
+
52
+ If there are more pages, the ``__iter__`` method will make additional
53
+ ``ListPermissions`` requests and continue to iterate
54
+ through the ``permissions`` field on the
55
+ corresponding responses.
56
+
57
+ All the usual :class:`google.ai.generativelanguage_v1alpha.types.ListPermissionsResponse`
58
+ attributes are available on the pager. If multiple requests are made, only
59
+ the most recent response is retained, and thus used for attribute lookup.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ method: Callable[..., permission_service.ListPermissionsResponse],
65
+ request: permission_service.ListPermissionsRequest,
66
+ response: permission_service.ListPermissionsResponse,
67
+ *,
68
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
69
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
70
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
71
+ ):
72
+ """Instantiate the pager.
73
+
74
+ Args:
75
+ method (Callable): The method that was originally called, and
76
+ which instantiated this pager.
77
+ request (google.ai.generativelanguage_v1alpha.types.ListPermissionsRequest):
78
+ The initial request object.
79
+ response (google.ai.generativelanguage_v1alpha.types.ListPermissionsResponse):
80
+ The initial response object.
81
+ retry (google.api_core.retry.Retry): Designation of what errors,
82
+ if any, should be retried.
83
+ timeout (float): The timeout for this request.
84
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
85
+ sent along with the request as metadata. Normally, each value must be of type `str`,
86
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
87
+ be of type `bytes`.
88
+ """
89
+ self._method = method
90
+ self._request = permission_service.ListPermissionsRequest(request)
91
+ self._response = response
92
+ self._retry = retry
93
+ self._timeout = timeout
94
+ self._metadata = metadata
95
+
96
+ def __getattr__(self, name: str) -> Any:
97
+ return getattr(self._response, name)
98
+
99
+ @property
100
+ def pages(self) -> Iterator[permission_service.ListPermissionsResponse]:
101
+ yield self._response
102
+ while self._response.next_page_token:
103
+ self._request.page_token = self._response.next_page_token
104
+ self._response = self._method(
105
+ self._request,
106
+ retry=self._retry,
107
+ timeout=self._timeout,
108
+ metadata=self._metadata,
109
+ )
110
+ yield self._response
111
+
112
+ def __iter__(self) -> Iterator[permission.Permission]:
113
+ for page in self.pages:
114
+ yield from page.permissions
115
+
116
+ def __repr__(self) -> str:
117
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
118
+
119
+
120
+ class ListPermissionsAsyncPager:
121
+ """A pager for iterating through ``list_permissions`` requests.
122
+
123
+ This class thinly wraps an initial
124
+ :class:`google.ai.generativelanguage_v1alpha.types.ListPermissionsResponse` object, and
125
+ provides an ``__aiter__`` method to iterate through its
126
+ ``permissions`` field.
127
+
128
+ If there are more pages, the ``__aiter__`` method will make additional
129
+ ``ListPermissions`` requests and continue to iterate
130
+ through the ``permissions`` field on the
131
+ corresponding responses.
132
+
133
+ All the usual :class:`google.ai.generativelanguage_v1alpha.types.ListPermissionsResponse`
134
+ attributes are available on the pager. If multiple requests are made, only
135
+ the most recent response is retained, and thus used for attribute lookup.
136
+ """
137
+
138
+ def __init__(
139
+ self,
140
+ method: Callable[..., Awaitable[permission_service.ListPermissionsResponse]],
141
+ request: permission_service.ListPermissionsRequest,
142
+ response: permission_service.ListPermissionsResponse,
143
+ *,
144
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
145
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
146
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
147
+ ):
148
+ """Instantiates the pager.
149
+
150
+ Args:
151
+ method (Callable): The method that was originally called, and
152
+ which instantiated this pager.
153
+ request (google.ai.generativelanguage_v1alpha.types.ListPermissionsRequest):
154
+ The initial request object.
155
+ response (google.ai.generativelanguage_v1alpha.types.ListPermissionsResponse):
156
+ The initial response object.
157
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
158
+ if any, should be retried.
159
+ timeout (float): The timeout for this request.
160
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
161
+ sent along with the request as metadata. Normally, each value must be of type `str`,
162
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
163
+ be of type `bytes`.
164
+ """
165
+ self._method = method
166
+ self._request = permission_service.ListPermissionsRequest(request)
167
+ self._response = response
168
+ self._retry = retry
169
+ self._timeout = timeout
170
+ self._metadata = metadata
171
+
172
+ def __getattr__(self, name: str) -> Any:
173
+ return getattr(self._response, name)
174
+
175
+ @property
176
+ async def pages(self) -> AsyncIterator[permission_service.ListPermissionsResponse]:
177
+ yield self._response
178
+ while self._response.next_page_token:
179
+ self._request.page_token = self._response.next_page_token
180
+ self._response = await self._method(
181
+ self._request,
182
+ retry=self._retry,
183
+ timeout=self._timeout,
184
+ metadata=self._metadata,
185
+ )
186
+ yield self._response
187
+
188
+ def __aiter__(self) -> AsyncIterator[permission.Permission]:
189
+ async def async_generator():
190
+ async for page in self.pages:
191
+ for response in page.permissions:
192
+ yield response
193
+
194
+ return async_generator()
195
+
196
+ def __repr__(self) -> str:
197
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (926 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (23.4 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (63.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/permission_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (22.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .async_client import PredictionServiceAsyncClient
17
+ from .client import PredictionServiceClient
18
+
19
+ __all__ = (
20
+ "PredictionServiceClient",
21
+ "PredictionServiceAsyncClient",
22
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (418 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (23.4 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (39.9 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/async_client.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.longrunning import operations_pb2 # type: ignore
47
+ from google.protobuf import struct_pb2 # type: ignore
48
+
49
+ from google.ai.generativelanguage_v1alpha.types import prediction_service
50
+
51
+ from .client import PredictionServiceClient
52
+ from .transports.base import DEFAULT_CLIENT_INFO, PredictionServiceTransport
53
+ from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
54
+
55
+ try:
56
+ from google.api_core import client_logging # type: ignore
57
+
58
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
59
+ except ImportError: # pragma: NO COVER
60
+ CLIENT_LOGGING_SUPPORTED = False
61
+
62
+ _LOGGER = std_logging.getLogger(__name__)
63
+
64
+
65
+ class PredictionServiceAsyncClient:
66
+ """A service for online predictions and explanations."""
67
+
68
+ _client: PredictionServiceClient
69
+
70
+ # Copy defaults from the synchronous client for use here.
71
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
72
+ DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT
73
+ DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
74
+ _DEFAULT_ENDPOINT_TEMPLATE = PredictionServiceClient._DEFAULT_ENDPOINT_TEMPLATE
75
+ _DEFAULT_UNIVERSE = PredictionServiceClient._DEFAULT_UNIVERSE
76
+
77
+ model_path = staticmethod(PredictionServiceClient.model_path)
78
+ parse_model_path = staticmethod(PredictionServiceClient.parse_model_path)
79
+ common_billing_account_path = staticmethod(
80
+ PredictionServiceClient.common_billing_account_path
81
+ )
82
+ parse_common_billing_account_path = staticmethod(
83
+ PredictionServiceClient.parse_common_billing_account_path
84
+ )
85
+ common_folder_path = staticmethod(PredictionServiceClient.common_folder_path)
86
+ parse_common_folder_path = staticmethod(
87
+ PredictionServiceClient.parse_common_folder_path
88
+ )
89
+ common_organization_path = staticmethod(
90
+ PredictionServiceClient.common_organization_path
91
+ )
92
+ parse_common_organization_path = staticmethod(
93
+ PredictionServiceClient.parse_common_organization_path
94
+ )
95
+ common_project_path = staticmethod(PredictionServiceClient.common_project_path)
96
+ parse_common_project_path = staticmethod(
97
+ PredictionServiceClient.parse_common_project_path
98
+ )
99
+ common_location_path = staticmethod(PredictionServiceClient.common_location_path)
100
+ parse_common_location_path = staticmethod(
101
+ PredictionServiceClient.parse_common_location_path
102
+ )
103
+
104
+ @classmethod
105
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
106
+ """Creates an instance of this client using the provided credentials
107
+ info.
108
+
109
+ Args:
110
+ info (dict): The service account private key info.
111
+ args: Additional arguments to pass to the constructor.
112
+ kwargs: Additional arguments to pass to the constructor.
113
+
114
+ Returns:
115
+ PredictionServiceAsyncClient: The constructed client.
116
+ """
117
+ return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore
118
+
119
+ @classmethod
120
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
121
+ """Creates an instance of this client using the provided credentials
122
+ file.
123
+
124
+ Args:
125
+ filename (str): The path to the service account private key json
126
+ file.
127
+ args: Additional arguments to pass to the constructor.
128
+ kwargs: Additional arguments to pass to the constructor.
129
+
130
+ Returns:
131
+ PredictionServiceAsyncClient: The constructed client.
132
+ """
133
+ return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore
134
+
135
+ from_service_account_json = from_service_account_file
136
+
137
+ @classmethod
138
+ def get_mtls_endpoint_and_cert_source(
139
+ cls, client_options: Optional[ClientOptions] = None
140
+ ):
141
+ """Return the API endpoint and client cert source for mutual TLS.
142
+
143
+ The client cert source is determined in the following order:
144
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
145
+ client cert source is None.
146
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
147
+ default client cert source exists, use the default one; otherwise the client cert
148
+ source is None.
149
+
150
+ The API endpoint is determined in the following order:
151
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
152
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
153
+ default mTLS endpoint; if the environment variable is "never", use the default API
154
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
155
+ use the default API endpoint.
156
+
157
+ More details can be found at https://google.aip.dev/auth/4114.
158
+
159
+ Args:
160
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
161
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
162
+ in this method.
163
+
164
+ Returns:
165
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
166
+ client cert source to use.
167
+
168
+ Raises:
169
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
170
+ """
171
+ return PredictionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
172
+
173
+ @property
174
+ def transport(self) -> PredictionServiceTransport:
175
+ """Returns the transport used by the client instance.
176
+
177
+ Returns:
178
+ PredictionServiceTransport: The transport used by the client instance.
179
+ """
180
+ return self._client.transport
181
+
182
+ @property
183
+ def api_endpoint(self):
184
+ """Return the API endpoint used by the client instance.
185
+
186
+ Returns:
187
+ str: The API endpoint used by the client instance.
188
+ """
189
+ return self._client._api_endpoint
190
+
191
+ @property
192
+ def universe_domain(self) -> str:
193
+ """Return the universe domain used by the client instance.
194
+
195
+ Returns:
196
+ str: The universe domain used
197
+ by the client instance.
198
+ """
199
+ return self._client._universe_domain
200
+
201
+ get_transport_class = PredictionServiceClient.get_transport_class
202
+
203
+ def __init__(
204
+ self,
205
+ *,
206
+ credentials: Optional[ga_credentials.Credentials] = None,
207
+ transport: Optional[
208
+ Union[
209
+ str,
210
+ PredictionServiceTransport,
211
+ Callable[..., PredictionServiceTransport],
212
+ ]
213
+ ] = "grpc_asyncio",
214
+ client_options: Optional[ClientOptions] = None,
215
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
216
+ ) -> None:
217
+ """Instantiates the prediction service async client.
218
+
219
+ Args:
220
+ credentials (Optional[google.auth.credentials.Credentials]): The
221
+ authorization credentials to attach to requests. These
222
+ credentials identify the application to the service; if none
223
+ are specified, the client will attempt to ascertain the
224
+ credentials from the environment.
225
+ transport (Optional[Union[str,PredictionServiceTransport,Callable[..., PredictionServiceTransport]]]):
226
+ The transport to use, or a Callable that constructs and returns a new transport to use.
227
+ If a Callable is given, it will be called with the same set of initialization
228
+ arguments as used in the PredictionServiceTransport constructor.
229
+ If set to None, a transport is chosen automatically.
230
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
231
+ Custom options for the client.
232
+
233
+ 1. The ``api_endpoint`` property can be used to override the
234
+ default endpoint provided by the client when ``transport`` is
235
+ not explicitly provided. Only if this property is not set and
236
+ ``transport`` was not explicitly provided, the endpoint is
237
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
238
+ variable, which have one of the following values:
239
+ "always" (always use the default mTLS endpoint), "never" (always
240
+ use the default regular endpoint) and "auto" (auto-switch to the
241
+ default mTLS endpoint if client certificate is present; this is
242
+ the default value).
243
+
244
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
245
+ is "true", then the ``client_cert_source`` property can be used
246
+ to provide a client certificate for mTLS transport. If
247
+ not provided, the default SSL client certificate will be used if
248
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
249
+ set, no client certificate will be used.
250
+
251
+ 3. The ``universe_domain`` property can be used to override the
252
+ default "googleapis.com" universe. Note that ``api_endpoint``
253
+ property still takes precedence; and ``universe_domain`` is
254
+ currently not supported for mTLS.
255
+
256
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
257
+ The client info used to send a user-agent string along with
258
+ API requests. If ``None``, then default info will be used.
259
+ Generally, you only need to set this if you're developing
260
+ your own client library.
261
+
262
+ Raises:
263
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
264
+ creation failed for any reason.
265
+ """
266
+ self._client = PredictionServiceClient(
267
+ credentials=credentials,
268
+ transport=transport,
269
+ client_options=client_options,
270
+ client_info=client_info,
271
+ )
272
+
273
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
274
+ std_logging.DEBUG
275
+ ): # pragma: NO COVER
276
+ _LOGGER.debug(
277
+ "Created client `google.ai.generativelanguage_v1alpha.PredictionServiceAsyncClient`.",
278
+ extra={
279
+ "serviceName": "google.ai.generativelanguage.v1alpha.PredictionService",
280
+ "universeDomain": getattr(
281
+ self._client._transport._credentials, "universe_domain", ""
282
+ ),
283
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
284
+ "credentialsInfo": getattr(
285
+ self.transport._credentials, "get_cred_info", lambda: None
286
+ )(),
287
+ }
288
+ if hasattr(self._client._transport, "_credentials")
289
+ else {
290
+ "serviceName": "google.ai.generativelanguage.v1alpha.PredictionService",
291
+ "credentialsType": None,
292
+ },
293
+ )
294
+
295
+ async def predict(
296
+ self,
297
+ request: Optional[Union[prediction_service.PredictRequest, dict]] = None,
298
+ *,
299
+ model: Optional[str] = None,
300
+ instances: Optional[MutableSequence[struct_pb2.Value]] = None,
301
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
302
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
303
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
304
+ ) -> prediction_service.PredictResponse:
305
+ r"""Performs a prediction request.
306
+
307
+ .. code-block:: python
308
+
309
+ # This snippet has been automatically generated and should be regarded as a
310
+ # code template only.
311
+ # It will require modifications to work:
312
+ # - It may require correct/in-range values for request initialization.
313
+ # - It may require specifying regional endpoints when creating the service
314
+ # client as shown in:
315
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
316
+ from google.ai import generativelanguage_v1alpha
317
+
318
+ async def sample_predict():
319
+ # Create a client
320
+ client = generativelanguage_v1alpha.PredictionServiceAsyncClient()
321
+
322
+ # Initialize request argument(s)
323
+ instances = generativelanguage_v1alpha.Value()
324
+ instances.null_value = "NULL_VALUE"
325
+
326
+ request = generativelanguage_v1alpha.PredictRequest(
327
+ model="model_value",
328
+ instances=instances,
329
+ )
330
+
331
+ # Make the request
332
+ response = await client.predict(request=request)
333
+
334
+ # Handle the response
335
+ print(response)
336
+
337
+ Args:
338
+ request (Optional[Union[google.ai.generativelanguage_v1alpha.types.PredictRequest, dict]]):
339
+ The request object. Request message for
340
+ [PredictionService.Predict][google.ai.generativelanguage.v1alpha.PredictionService.Predict].
341
+ model (:class:`str`):
342
+ Required. The name of the model for prediction. Format:
343
+ ``name=models/{model}``.
344
+
345
+ This corresponds to the ``model`` field
346
+ on the ``request`` instance; if ``request`` is provided, this
347
+ should not be set.
348
+ instances (:class:`MutableSequence[google.protobuf.struct_pb2.Value]`):
349
+ Required. The instances that are the
350
+ input to the prediction call.
351
+
352
+ This corresponds to the ``instances`` field
353
+ on the ``request`` instance; if ``request`` is provided, this
354
+ should not be set.
355
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
356
+ should be retried.
357
+ timeout (float): The timeout for this request.
358
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
359
+ sent along with the request as metadata. Normally, each value must be of type `str`,
360
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
361
+ be of type `bytes`.
362
+
363
+ Returns:
364
+ google.ai.generativelanguage_v1alpha.types.PredictResponse:
365
+ Response message for [PredictionService.Predict].
366
+ """
367
+ # Create or coerce a protobuf request object.
368
+ # - Quick check: If we got a request object, we should *not* have
369
+ # gotten any keyword arguments that map to the request.
370
+ has_flattened_params = any([model, instances])
371
+ if request is not None and has_flattened_params:
372
+ raise ValueError(
373
+ "If the `request` argument is set, then none of "
374
+ "the individual field arguments should be set."
375
+ )
376
+
377
+ # - Use the request object if provided (there's no risk of modifying the input as
378
+ # there are no flattened fields), or create one.
379
+ if not isinstance(request, prediction_service.PredictRequest):
380
+ request = prediction_service.PredictRequest(request)
381
+
382
+ # If we have keyword arguments corresponding to fields on the
383
+ # request, apply these.
384
+ if model is not None:
385
+ request.model = model
386
+ if instances:
387
+ request.instances.extend(instances)
388
+
389
+ # Wrap the RPC method; this adds retry and timeout information,
390
+ # and friendly error handling.
391
+ rpc = self._client._transport._wrapped_methods[self._client._transport.predict]
392
+
393
+ # Certain fields should be provided within the metadata header;
394
+ # add these here.
395
+ metadata = tuple(metadata) + (
396
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
397
+ )
398
+
399
+ # Validate the universe domain.
400
+ self._client._validate_universe_domain()
401
+
402
+ # Send the request.
403
+ response = await rpc(
404
+ request,
405
+ retry=retry,
406
+ timeout=timeout,
407
+ metadata=metadata,
408
+ )
409
+
410
+ # Done; return the response.
411
+ return response
412
+
413
+ async def list_operations(
414
+ self,
415
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
416
+ *,
417
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
418
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
419
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
420
+ ) -> operations_pb2.ListOperationsResponse:
421
+ r"""Lists operations that match the specified filter in the request.
422
+
423
+ Args:
424
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
425
+ The request object. Request message for
426
+ `ListOperations` method.
427
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
428
+ if any, should be retried.
429
+ timeout (float): The timeout for this request.
430
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
431
+ sent along with the request as metadata. Normally, each value must be of type `str`,
432
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
433
+ be of type `bytes`.
434
+ Returns:
435
+ ~.operations_pb2.ListOperationsResponse:
436
+ Response message for ``ListOperations`` method.
437
+ """
438
+ # Create or coerce a protobuf request object.
439
+ # The request isn't a proto-plus wrapped type,
440
+ # so it must be constructed via keyword expansion.
441
+ if isinstance(request, dict):
442
+ request = operations_pb2.ListOperationsRequest(**request)
443
+
444
+ # Wrap the RPC method; this adds retry and timeout information,
445
+ # and friendly error handling.
446
+ rpc = self.transport._wrapped_methods[self._client._transport.list_operations]
447
+
448
+ # Certain fields should be provided within the metadata header;
449
+ # add these here.
450
+ metadata = tuple(metadata) + (
451
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
452
+ )
453
+
454
+ # Validate the universe domain.
455
+ self._client._validate_universe_domain()
456
+
457
+ # Send the request.
458
+ response = await rpc(
459
+ request,
460
+ retry=retry,
461
+ timeout=timeout,
462
+ metadata=metadata,
463
+ )
464
+
465
+ # Done; return the response.
466
+ return response
467
+
468
+ async def get_operation(
469
+ self,
470
+ request: Optional[operations_pb2.GetOperationRequest] = None,
471
+ *,
472
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
473
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
474
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
475
+ ) -> operations_pb2.Operation:
476
+ r"""Gets the latest state of a long-running operation.
477
+
478
+ Args:
479
+ request (:class:`~.operations_pb2.GetOperationRequest`):
480
+ The request object. Request message for
481
+ `GetOperation` method.
482
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
483
+ if any, should be retried.
484
+ timeout (float): The timeout for this request.
485
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
486
+ sent along with the request as metadata. Normally, each value must be of type `str`,
487
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
488
+ be of type `bytes`.
489
+ Returns:
490
+ ~.operations_pb2.Operation:
491
+ An ``Operation`` object.
492
+ """
493
+ # Create or coerce a protobuf request object.
494
+ # The request isn't a proto-plus wrapped type,
495
+ # so it must be constructed via keyword expansion.
496
+ if isinstance(request, dict):
497
+ request = operations_pb2.GetOperationRequest(**request)
498
+
499
+ # Wrap the RPC method; this adds retry and timeout information,
500
+ # and friendly error handling.
501
+ rpc = self.transport._wrapped_methods[self._client._transport.get_operation]
502
+
503
+ # Certain fields should be provided within the metadata header;
504
+ # add these here.
505
+ metadata = tuple(metadata) + (
506
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
507
+ )
508
+
509
+ # Validate the universe domain.
510
+ self._client._validate_universe_domain()
511
+
512
+ # Send the request.
513
+ response = await rpc(
514
+ request,
515
+ retry=retry,
516
+ timeout=timeout,
517
+ metadata=metadata,
518
+ )
519
+
520
+ # Done; return the response.
521
+ return response
522
+
523
+ async def __aenter__(self) -> "PredictionServiceAsyncClient":
524
+ return self
525
+
526
+ async def __aexit__(self, exc_type, exc, tb):
527
+ await self.transport.close()
528
+
529
+
530
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
531
+ gapic_version=package_version.__version__
532
+ )
533
+
534
+
535
+ __all__ = ("PredictionServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/client.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1alpha import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.longrunning import operations_pb2 # type: ignore
62
+ from google.protobuf import struct_pb2 # type: ignore
63
+
64
+ from google.ai.generativelanguage_v1alpha.types import prediction_service
65
+
66
+ from .transports.base import DEFAULT_CLIENT_INFO, PredictionServiceTransport
67
+ from .transports.grpc import PredictionServiceGrpcTransport
68
+ from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
69
+ from .transports.rest import PredictionServiceRestTransport
70
+
71
+
72
+ class PredictionServiceClientMeta(type):
73
+ """Metaclass for the PredictionService client.
74
+
75
+ This provides class-level methods for building and retrieving
76
+ support objects (e.g. transport) without polluting the client instance
77
+ objects.
78
+ """
79
+
80
+ _transport_registry = (
81
+ OrderedDict()
82
+ ) # type: Dict[str, Type[PredictionServiceTransport]]
83
+ _transport_registry["grpc"] = PredictionServiceGrpcTransport
84
+ _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport
85
+ _transport_registry["rest"] = PredictionServiceRestTransport
86
+
87
+ def get_transport_class(
88
+ cls,
89
+ label: Optional[str] = None,
90
+ ) -> Type[PredictionServiceTransport]:
91
+ """Returns an appropriate transport class.
92
+
93
+ Args:
94
+ label: The name of the desired transport. If none is
95
+ provided, then the first transport in the registry is used.
96
+
97
+ Returns:
98
+ The transport class to use.
99
+ """
100
+ # If a specific transport is requested, return that one.
101
+ if label:
102
+ return cls._transport_registry[label]
103
+
104
+ # No transport is requested; return the default (that is, the first one
105
+ # in the dictionary).
106
+ return next(iter(cls._transport_registry.values()))
107
+
108
+
109
+ class PredictionServiceClient(metaclass=PredictionServiceClientMeta):
110
+ """A service for online predictions and explanations."""
111
+
112
+ @staticmethod
113
+ def _get_default_mtls_endpoint(api_endpoint):
114
+ """Converts api endpoint to mTLS endpoint.
115
+
116
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
117
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
118
+ Args:
119
+ api_endpoint (Optional[str]): the api endpoint to convert.
120
+ Returns:
121
+ str: converted mTLS api endpoint.
122
+ """
123
+ if not api_endpoint:
124
+ return api_endpoint
125
+
126
+ mtls_endpoint_re = re.compile(
127
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
128
+ )
129
+
130
+ m = mtls_endpoint_re.match(api_endpoint)
131
+ name, mtls, sandbox, googledomain = m.groups()
132
+ if mtls or not googledomain:
133
+ return api_endpoint
134
+
135
+ if sandbox:
136
+ return api_endpoint.replace(
137
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
138
+ )
139
+
140
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
141
+
142
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
143
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
144
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
145
+ DEFAULT_ENDPOINT
146
+ )
147
+
148
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
149
+ _DEFAULT_UNIVERSE = "googleapis.com"
150
+
151
+ @classmethod
152
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
153
+ """Creates an instance of this client using the provided credentials
154
+ info.
155
+
156
+ Args:
157
+ info (dict): The service account private key info.
158
+ args: Additional arguments to pass to the constructor.
159
+ kwargs: Additional arguments to pass to the constructor.
160
+
161
+ Returns:
162
+ PredictionServiceClient: The constructed client.
163
+ """
164
+ credentials = service_account.Credentials.from_service_account_info(info)
165
+ kwargs["credentials"] = credentials
166
+ return cls(*args, **kwargs)
167
+
168
+ @classmethod
169
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
170
+ """Creates an instance of this client using the provided credentials
171
+ file.
172
+
173
+ Args:
174
+ filename (str): The path to the service account private key json
175
+ file.
176
+ args: Additional arguments to pass to the constructor.
177
+ kwargs: Additional arguments to pass to the constructor.
178
+
179
+ Returns:
180
+ PredictionServiceClient: The constructed client.
181
+ """
182
+ credentials = service_account.Credentials.from_service_account_file(filename)
183
+ kwargs["credentials"] = credentials
184
+ return cls(*args, **kwargs)
185
+
186
+ from_service_account_json = from_service_account_file
187
+
188
+ @property
189
+ def transport(self) -> PredictionServiceTransport:
190
+ """Returns the transport used by the client instance.
191
+
192
+ Returns:
193
+ PredictionServiceTransport: The transport used by the client
194
+ instance.
195
+ """
196
+ return self._transport
197
+
198
+ @staticmethod
199
+ def model_path(
200
+ model: str,
201
+ ) -> str:
202
+ """Returns a fully-qualified model string."""
203
+ return "models/{model}".format(
204
+ model=model,
205
+ )
206
+
207
+ @staticmethod
208
+ def parse_model_path(path: str) -> Dict[str, str]:
209
+ """Parses a model path into its component segments."""
210
+ m = re.match(r"^models/(?P<model>.+?)$", path)
211
+ return m.groupdict() if m else {}
212
+
213
+ @staticmethod
214
+ def common_billing_account_path(
215
+ billing_account: str,
216
+ ) -> str:
217
+ """Returns a fully-qualified billing_account string."""
218
+ return "billingAccounts/{billing_account}".format(
219
+ billing_account=billing_account,
220
+ )
221
+
222
+ @staticmethod
223
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
224
+ """Parse a billing_account path into its component segments."""
225
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
226
+ return m.groupdict() if m else {}
227
+
228
+ @staticmethod
229
+ def common_folder_path(
230
+ folder: str,
231
+ ) -> str:
232
+ """Returns a fully-qualified folder string."""
233
+ return "folders/{folder}".format(
234
+ folder=folder,
235
+ )
236
+
237
+ @staticmethod
238
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
239
+ """Parse a folder path into its component segments."""
240
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
241
+ return m.groupdict() if m else {}
242
+
243
+ @staticmethod
244
+ def common_organization_path(
245
+ organization: str,
246
+ ) -> str:
247
+ """Returns a fully-qualified organization string."""
248
+ return "organizations/{organization}".format(
249
+ organization=organization,
250
+ )
251
+
252
+ @staticmethod
253
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
254
+ """Parse a organization path into its component segments."""
255
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
256
+ return m.groupdict() if m else {}
257
+
258
+ @staticmethod
259
+ def common_project_path(
260
+ project: str,
261
+ ) -> str:
262
+ """Returns a fully-qualified project string."""
263
+ return "projects/{project}".format(
264
+ project=project,
265
+ )
266
+
267
+ @staticmethod
268
+ def parse_common_project_path(path: str) -> Dict[str, str]:
269
+ """Parse a project path into its component segments."""
270
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
271
+ return m.groupdict() if m else {}
272
+
273
+ @staticmethod
274
+ def common_location_path(
275
+ project: str,
276
+ location: str,
277
+ ) -> str:
278
+ """Returns a fully-qualified location string."""
279
+ return "projects/{project}/locations/{location}".format(
280
+ project=project,
281
+ location=location,
282
+ )
283
+
284
+ @staticmethod
285
+ def parse_common_location_path(path: str) -> Dict[str, str]:
286
+ """Parse a location path into its component segments."""
287
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
288
+ return m.groupdict() if m else {}
289
+
290
+ @classmethod
291
+ def get_mtls_endpoint_and_cert_source(
292
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
293
+ ):
294
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
295
+
296
+ The client cert source is determined in the following order:
297
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
298
+ client cert source is None.
299
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
300
+ default client cert source exists, use the default one; otherwise the client cert
301
+ source is None.
302
+
303
+ The API endpoint is determined in the following order:
304
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
305
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
306
+ default mTLS endpoint; if the environment variable is "never", use the default API
307
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
308
+ use the default API endpoint.
309
+
310
+ More details can be found at https://google.aip.dev/auth/4114.
311
+
312
+ Args:
313
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
314
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
315
+ in this method.
316
+
317
+ Returns:
318
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
319
+ client cert source to use.
320
+
321
+ Raises:
322
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
323
+ """
324
+
325
+ warnings.warn(
326
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
327
+ DeprecationWarning,
328
+ )
329
+ if client_options is None:
330
+ client_options = client_options_lib.ClientOptions()
331
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
332
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
333
+ if use_client_cert not in ("true", "false"):
334
+ raise ValueError(
335
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
336
+ )
337
+ if use_mtls_endpoint not in ("auto", "never", "always"):
338
+ raise MutualTLSChannelError(
339
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
340
+ )
341
+
342
+ # Figure out the client cert source to use.
343
+ client_cert_source = None
344
+ if use_client_cert == "true":
345
+ if client_options.client_cert_source:
346
+ client_cert_source = client_options.client_cert_source
347
+ elif mtls.has_default_client_cert_source():
348
+ client_cert_source = mtls.default_client_cert_source()
349
+
350
+ # Figure out which api endpoint to use.
351
+ if client_options.api_endpoint is not None:
352
+ api_endpoint = client_options.api_endpoint
353
+ elif use_mtls_endpoint == "always" or (
354
+ use_mtls_endpoint == "auto" and client_cert_source
355
+ ):
356
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
357
+ else:
358
+ api_endpoint = cls.DEFAULT_ENDPOINT
359
+
360
+ return api_endpoint, client_cert_source
361
+
362
+ @staticmethod
363
+ def _read_environment_variables():
364
+ """Returns the environment variables used by the client.
365
+
366
+ Returns:
367
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
368
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
369
+
370
+ Raises:
371
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
372
+ any of ["true", "false"].
373
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
374
+ is not any of ["auto", "never", "always"].
375
+ """
376
+ use_client_cert = os.getenv(
377
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
378
+ ).lower()
379
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
380
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
381
+ if use_client_cert not in ("true", "false"):
382
+ raise ValueError(
383
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
384
+ )
385
+ if use_mtls_endpoint not in ("auto", "never", "always"):
386
+ raise MutualTLSChannelError(
387
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
388
+ )
389
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
390
+
391
+ @staticmethod
392
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
393
+ """Return the client cert source to be used by the client.
394
+
395
+ Args:
396
+ provided_cert_source (bytes): The client certificate source provided.
397
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
398
+
399
+ Returns:
400
+ bytes or None: The client cert source to be used by the client.
401
+ """
402
+ client_cert_source = None
403
+ if use_cert_flag:
404
+ if provided_cert_source:
405
+ client_cert_source = provided_cert_source
406
+ elif mtls.has_default_client_cert_source():
407
+ client_cert_source = mtls.default_client_cert_source()
408
+ return client_cert_source
409
+
410
+ @staticmethod
411
+ def _get_api_endpoint(
412
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
413
+ ):
414
+ """Return the API endpoint used by the client.
415
+
416
+ Args:
417
+ api_override (str): The API endpoint override. If specified, this is always
418
+ the return value of this function and the other arguments are not used.
419
+ client_cert_source (bytes): The client certificate source used by the client.
420
+ universe_domain (str): The universe domain used by the client.
421
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
422
+ Possible values are "always", "auto", or "never".
423
+
424
+ Returns:
425
+ str: The API endpoint to be used by the client.
426
+ """
427
+ if api_override is not None:
428
+ api_endpoint = api_override
429
+ elif use_mtls_endpoint == "always" or (
430
+ use_mtls_endpoint == "auto" and client_cert_source
431
+ ):
432
+ _default_universe = PredictionServiceClient._DEFAULT_UNIVERSE
433
+ if universe_domain != _default_universe:
434
+ raise MutualTLSChannelError(
435
+ f"mTLS is not supported in any universe other than {_default_universe}."
436
+ )
437
+ api_endpoint = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
438
+ else:
439
+ api_endpoint = PredictionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
440
+ UNIVERSE_DOMAIN=universe_domain
441
+ )
442
+ return api_endpoint
443
+
444
+ @staticmethod
445
+ def _get_universe_domain(
446
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
447
+ ) -> str:
448
+ """Return the universe domain used by the client.
449
+
450
+ Args:
451
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
452
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
453
+
454
+ Returns:
455
+ str: The universe domain to be used by the client.
456
+
457
+ Raises:
458
+ ValueError: If the universe domain is an empty string.
459
+ """
460
+ universe_domain = PredictionServiceClient._DEFAULT_UNIVERSE
461
+ if client_universe_domain is not None:
462
+ universe_domain = client_universe_domain
463
+ elif universe_domain_env is not None:
464
+ universe_domain = universe_domain_env
465
+ if len(universe_domain.strip()) == 0:
466
+ raise ValueError("Universe Domain cannot be an empty string.")
467
+ return universe_domain
468
+
469
+ def _validate_universe_domain(self):
470
+ """Validates client's and credentials' universe domains are consistent.
471
+
472
+ Returns:
473
+ bool: True iff the configured universe domain is valid.
474
+
475
+ Raises:
476
+ ValueError: If the configured universe domain is not valid.
477
+ """
478
+
479
+ # NOTE (b/349488459): universe validation is disabled until further notice.
480
+ return True
481
+
482
+ @property
483
+ def api_endpoint(self):
484
+ """Return the API endpoint used by the client instance.
485
+
486
+ Returns:
487
+ str: The API endpoint used by the client instance.
488
+ """
489
+ return self._api_endpoint
490
+
491
+ @property
492
+ def universe_domain(self) -> str:
493
+ """Return the universe domain used by the client instance.
494
+
495
+ Returns:
496
+ str: The universe domain used by the client instance.
497
+ """
498
+ return self._universe_domain
499
+
500
+ def __init__(
501
+ self,
502
+ *,
503
+ credentials: Optional[ga_credentials.Credentials] = None,
504
+ transport: Optional[
505
+ Union[
506
+ str,
507
+ PredictionServiceTransport,
508
+ Callable[..., PredictionServiceTransport],
509
+ ]
510
+ ] = None,
511
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
512
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
513
+ ) -> None:
514
+ """Instantiates the prediction service client.
515
+
516
+ Args:
517
+ credentials (Optional[google.auth.credentials.Credentials]): The
518
+ authorization credentials to attach to requests. These
519
+ credentials identify the application to the service; if none
520
+ are specified, the client will attempt to ascertain the
521
+ credentials from the environment.
522
+ transport (Optional[Union[str,PredictionServiceTransport,Callable[..., PredictionServiceTransport]]]):
523
+ The transport to use, or a Callable that constructs and returns a new transport.
524
+ If a Callable is given, it will be called with the same set of initialization
525
+ arguments as used in the PredictionServiceTransport constructor.
526
+ If set to None, a transport is chosen automatically.
527
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
528
+ Custom options for the client.
529
+
530
+ 1. The ``api_endpoint`` property can be used to override the
531
+ default endpoint provided by the client when ``transport`` is
532
+ not explicitly provided. Only if this property is not set and
533
+ ``transport`` was not explicitly provided, the endpoint is
534
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
535
+ variable, which have one of the following values:
536
+ "always" (always use the default mTLS endpoint), "never" (always
537
+ use the default regular endpoint) and "auto" (auto-switch to the
538
+ default mTLS endpoint if client certificate is present; this is
539
+ the default value).
540
+
541
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
542
+ is "true", then the ``client_cert_source`` property can be used
543
+ to provide a client certificate for mTLS transport. If
544
+ not provided, the default SSL client certificate will be used if
545
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
546
+ set, no client certificate will be used.
547
+
548
+ 3. The ``universe_domain`` property can be used to override the
549
+ default "googleapis.com" universe. Note that the ``api_endpoint``
550
+ property still takes precedence; and ``universe_domain`` is
551
+ currently not supported for mTLS.
552
+
553
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
554
+ The client info used to send a user-agent string along with
555
+ API requests. If ``None``, then default info will be used.
556
+ Generally, you only need to set this if you're developing
557
+ your own client library.
558
+
559
+ Raises:
560
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
561
+ creation failed for any reason.
562
+ """
563
+ self._client_options = client_options
564
+ if isinstance(self._client_options, dict):
565
+ self._client_options = client_options_lib.from_dict(self._client_options)
566
+ if self._client_options is None:
567
+ self._client_options = client_options_lib.ClientOptions()
568
+ self._client_options = cast(
569
+ client_options_lib.ClientOptions, self._client_options
570
+ )
571
+
572
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
573
+
574
+ (
575
+ self._use_client_cert,
576
+ self._use_mtls_endpoint,
577
+ self._universe_domain_env,
578
+ ) = PredictionServiceClient._read_environment_variables()
579
+ self._client_cert_source = PredictionServiceClient._get_client_cert_source(
580
+ self._client_options.client_cert_source, self._use_client_cert
581
+ )
582
+ self._universe_domain = PredictionServiceClient._get_universe_domain(
583
+ universe_domain_opt, self._universe_domain_env
584
+ )
585
+ self._api_endpoint = None # updated below, depending on `transport`
586
+
587
+ # Initialize the universe domain validation.
588
+ self._is_universe_domain_valid = False
589
+
590
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
591
+ # Setup logging.
592
+ client_logging.initialize_logging()
593
+
594
+ api_key_value = getattr(self._client_options, "api_key", None)
595
+ if api_key_value and credentials:
596
+ raise ValueError(
597
+ "client_options.api_key and credentials are mutually exclusive"
598
+ )
599
+
600
+ # Save or instantiate the transport.
601
+ # Ordinarily, we provide the transport, but allowing a custom transport
602
+ # instance provides an extensibility point for unusual situations.
603
+ transport_provided = isinstance(transport, PredictionServiceTransport)
604
+ if transport_provided:
605
+ # transport is a PredictionServiceTransport instance.
606
+ if credentials or self._client_options.credentials_file or api_key_value:
607
+ raise ValueError(
608
+ "When providing a transport instance, "
609
+ "provide its credentials directly."
610
+ )
611
+ if self._client_options.scopes:
612
+ raise ValueError(
613
+ "When providing a transport instance, provide its scopes "
614
+ "directly."
615
+ )
616
+ self._transport = cast(PredictionServiceTransport, transport)
617
+ self._api_endpoint = self._transport.host
618
+
619
+ self._api_endpoint = (
620
+ self._api_endpoint
621
+ or PredictionServiceClient._get_api_endpoint(
622
+ self._client_options.api_endpoint,
623
+ self._client_cert_source,
624
+ self._universe_domain,
625
+ self._use_mtls_endpoint,
626
+ )
627
+ )
628
+
629
+ if not transport_provided:
630
+ import google.auth._default # type: ignore
631
+
632
+ if api_key_value and hasattr(
633
+ google.auth._default, "get_api_key_credentials"
634
+ ):
635
+ credentials = google.auth._default.get_api_key_credentials(
636
+ api_key_value
637
+ )
638
+
639
+ transport_init: Union[
640
+ Type[PredictionServiceTransport],
641
+ Callable[..., PredictionServiceTransport],
642
+ ] = (
643
+ PredictionServiceClient.get_transport_class(transport)
644
+ if isinstance(transport, str) or transport is None
645
+ else cast(Callable[..., PredictionServiceTransport], transport)
646
+ )
647
+ # initialize with the provided callable or the passed in class
648
+ self._transport = transport_init(
649
+ credentials=credentials,
650
+ credentials_file=self._client_options.credentials_file,
651
+ host=self._api_endpoint,
652
+ scopes=self._client_options.scopes,
653
+ client_cert_source_for_mtls=self._client_cert_source,
654
+ quota_project_id=self._client_options.quota_project_id,
655
+ client_info=client_info,
656
+ always_use_jwt_access=True,
657
+ api_audience=self._client_options.api_audience,
658
+ )
659
+
660
+ if "async" not in str(self._transport):
661
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
662
+ std_logging.DEBUG
663
+ ): # pragma: NO COVER
664
+ _LOGGER.debug(
665
+ "Created client `google.ai.generativelanguage_v1alpha.PredictionServiceClient`.",
666
+ extra={
667
+ "serviceName": "google.ai.generativelanguage.v1alpha.PredictionService",
668
+ "universeDomain": getattr(
669
+ self._transport._credentials, "universe_domain", ""
670
+ ),
671
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
672
+ "credentialsInfo": getattr(
673
+ self.transport._credentials, "get_cred_info", lambda: None
674
+ )(),
675
+ }
676
+ if hasattr(self._transport, "_credentials")
677
+ else {
678
+ "serviceName": "google.ai.generativelanguage.v1alpha.PredictionService",
679
+ "credentialsType": None,
680
+ },
681
+ )
682
+
683
+ def predict(
684
+ self,
685
+ request: Optional[Union[prediction_service.PredictRequest, dict]] = None,
686
+ *,
687
+ model: Optional[str] = None,
688
+ instances: Optional[MutableSequence[struct_pb2.Value]] = None,
689
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
690
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
691
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
692
+ ) -> prediction_service.PredictResponse:
693
+ r"""Performs a prediction request.
694
+
695
+ .. code-block:: python
696
+
697
+ # This snippet has been automatically generated and should be regarded as a
698
+ # code template only.
699
+ # It will require modifications to work:
700
+ # - It may require correct/in-range values for request initialization.
701
+ # - It may require specifying regional endpoints when creating the service
702
+ # client as shown in:
703
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
704
+ from google.ai import generativelanguage_v1alpha
705
+
706
+ def sample_predict():
707
+ # Create a client
708
+ client = generativelanguage_v1alpha.PredictionServiceClient()
709
+
710
+ # Initialize request argument(s)
711
+ instances = generativelanguage_v1alpha.Value()
712
+ instances.null_value = "NULL_VALUE"
713
+
714
+ request = generativelanguage_v1alpha.PredictRequest(
715
+ model="model_value",
716
+ instances=instances,
717
+ )
718
+
719
+ # Make the request
720
+ response = client.predict(request=request)
721
+
722
+ # Handle the response
723
+ print(response)
724
+
725
+ Args:
726
+ request (Union[google.ai.generativelanguage_v1alpha.types.PredictRequest, dict]):
727
+ The request object. Request message for
728
+ [PredictionService.Predict][google.ai.generativelanguage.v1alpha.PredictionService.Predict].
729
+ model (str):
730
+ Required. The name of the model for prediction. Format:
731
+ ``name=models/{model}``.
732
+
733
+ This corresponds to the ``model`` field
734
+ on the ``request`` instance; if ``request`` is provided, this
735
+ should not be set.
736
+ instances (MutableSequence[google.protobuf.struct_pb2.Value]):
737
+ Required. The instances that are the
738
+ input to the prediction call.
739
+
740
+ This corresponds to the ``instances`` field
741
+ on the ``request`` instance; if ``request`` is provided, this
742
+ should not be set.
743
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
744
+ should be retried.
745
+ timeout (float): The timeout for this request.
746
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
747
+ sent along with the request as metadata. Normally, each value must be of type `str`,
748
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
749
+ be of type `bytes`.
750
+
751
+ Returns:
752
+ google.ai.generativelanguage_v1alpha.types.PredictResponse:
753
+ Response message for [PredictionService.Predict].
754
+ """
755
+ # Create or coerce a protobuf request object.
756
+ # - Quick check: If we got a request object, we should *not* have
757
+ # gotten any keyword arguments that map to the request.
758
+ has_flattened_params = any([model, instances])
759
+ if request is not None and has_flattened_params:
760
+ raise ValueError(
761
+ "If the `request` argument is set, then none of "
762
+ "the individual field arguments should be set."
763
+ )
764
+
765
+ # - Use the request object if provided (there's no risk of modifying the input as
766
+ # there are no flattened fields), or create one.
767
+ if not isinstance(request, prediction_service.PredictRequest):
768
+ request = prediction_service.PredictRequest(request)
769
+ # If we have keyword arguments corresponding to fields on the
770
+ # request, apply these.
771
+ if model is not None:
772
+ request.model = model
773
+ if instances is not None:
774
+ request.instances.extend(instances)
775
+
776
+ # Wrap the RPC method; this adds retry and timeout information,
777
+ # and friendly error handling.
778
+ rpc = self._transport._wrapped_methods[self._transport.predict]
779
+
780
+ # Certain fields should be provided within the metadata header;
781
+ # add these here.
782
+ metadata = tuple(metadata) + (
783
+ gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
784
+ )
785
+
786
+ # Validate the universe domain.
787
+ self._validate_universe_domain()
788
+
789
+ # Send the request.
790
+ response = rpc(
791
+ request,
792
+ retry=retry,
793
+ timeout=timeout,
794
+ metadata=metadata,
795
+ )
796
+
797
+ # Done; return the response.
798
+ return response
799
+
800
+ def __enter__(self) -> "PredictionServiceClient":
801
+ return self
802
+
803
+ def __exit__(self, type, value, traceback):
804
+ """Releases underlying transport's resources.
805
+
806
+ .. warning::
807
+ ONLY use as a context manager if the transport is NOT shared
808
+ with other clients! Exiting the with block will CLOSE the transport
809
+ and may cause errors in other clients!
810
+ """
811
+ self.transport.close()
812
+
813
+ def list_operations(
814
+ self,
815
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
816
+ *,
817
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
818
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
819
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
820
+ ) -> operations_pb2.ListOperationsResponse:
821
+ r"""Lists operations that match the specified filter in the request.
822
+
823
+ Args:
824
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
825
+ The request object. Request message for
826
+ `ListOperations` method.
827
+ retry (google.api_core.retry.Retry): Designation of what errors,
828
+ if any, should be retried.
829
+ timeout (float): The timeout for this request.
830
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
831
+ sent along with the request as metadata. Normally, each value must be of type `str`,
832
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
833
+ be of type `bytes`.
834
+ Returns:
835
+ ~.operations_pb2.ListOperationsResponse:
836
+ Response message for ``ListOperations`` method.
837
+ """
838
+ # Create or coerce a protobuf request object.
839
+ # The request isn't a proto-plus wrapped type,
840
+ # so it must be constructed via keyword expansion.
841
+ if isinstance(request, dict):
842
+ request = operations_pb2.ListOperationsRequest(**request)
843
+
844
+ # Wrap the RPC method; this adds retry and timeout information,
845
+ # and friendly error handling.
846
+ rpc = self._transport._wrapped_methods[self._transport.list_operations]
847
+
848
+ # Certain fields should be provided within the metadata header;
849
+ # add these here.
850
+ metadata = tuple(metadata) + (
851
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
852
+ )
853
+
854
+ # Validate the universe domain.
855
+ self._validate_universe_domain()
856
+
857
+ # Send the request.
858
+ response = rpc(
859
+ request,
860
+ retry=retry,
861
+ timeout=timeout,
862
+ metadata=metadata,
863
+ )
864
+
865
+ # Done; return the response.
866
+ return response
867
+
868
+ def get_operation(
869
+ self,
870
+ request: Optional[operations_pb2.GetOperationRequest] = None,
871
+ *,
872
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
873
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
874
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
875
+ ) -> operations_pb2.Operation:
876
+ r"""Gets the latest state of a long-running operation.
877
+
878
+ Args:
879
+ request (:class:`~.operations_pb2.GetOperationRequest`):
880
+ The request object. Request message for
881
+ `GetOperation` method.
882
+ retry (google.api_core.retry.Retry): Designation of what errors,
883
+ if any, should be retried.
884
+ timeout (float): The timeout for this request.
885
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
886
+ sent along with the request as metadata. Normally, each value must be of type `str`,
887
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
888
+ be of type `bytes`.
889
+ Returns:
890
+ ~.operations_pb2.Operation:
891
+ An ``Operation`` object.
892
+ """
893
+ # Create or coerce a protobuf request object.
894
+ # The request isn't a proto-plus wrapped type,
895
+ # so it must be constructed via keyword expansion.
896
+ if isinstance(request, dict):
897
+ request = operations_pb2.GetOperationRequest(**request)
898
+
899
+ # Wrap the RPC method; this adds retry and timeout information,
900
+ # and friendly error handling.
901
+ rpc = self._transport._wrapped_methods[self._transport.get_operation]
902
+
903
+ # Certain fields should be provided within the metadata header;
904
+ # add these here.
905
+ metadata = tuple(metadata) + (
906
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
907
+ )
908
+
909
+ # Validate the universe domain.
910
+ self._validate_universe_domain()
911
+
912
+ # Send the request.
913
+ response = rpc(
914
+ request,
915
+ retry=retry,
916
+ timeout=timeout,
917
+ metadata=metadata,
918
+ )
919
+
920
+ # Done; return the response.
921
+ return response
922
+
923
+
924
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
925
+ gapic_version=package_version.__version__
926
+ )
927
+
928
+
929
+ __all__ = ("PredictionServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1alpha/services/prediction_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import PredictionServiceTransport
20
+ from .grpc import PredictionServiceGrpcTransport
21
+ from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
22
+ from .rest import PredictionServiceRestInterceptor, PredictionServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]]
26
+ _transport_registry["grpc"] = PredictionServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = PredictionServiceRestTransport
29
+
30
+ __all__ = (
31
+ "PredictionServiceTransport",
32
+ "PredictionServiceGrpcTransport",
33
+ "PredictionServiceGrpcAsyncIOTransport",
34
+ "PredictionServiceRestTransport",
35
+ "PredictionServiceRestInterceptor",
36
+ )