koichi12 commited on
Commit
bcfd742
·
verified ·
1 Parent(s): ef732b7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/__init__.py +89 -0
  2. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/__pycache__/__init__.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/__pycache__/__init__.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/__pycache__/client.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__init__.py +36 -0
  6. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/base.py +320 -0
  12. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/grpc.py +547 -0
  13. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/grpc_asyncio.py +650 -0
  14. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/__init__.py +22 -0
  15. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/async_client.py +720 -0
  16. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/client.py +1107 -0
  17. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/pagers.py +197 -0
  18. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__init__.py +36 -0
  19. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/base.py +220 -0
  26. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/grpc.py +443 -0
  27. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/grpc_asyncio.py +489 -0
  28. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/rest.py +1018 -0
  29. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/rest_base.py +268 -0
  30. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__init__.py +75 -0
  31. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__pycache__/generative_service.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__pycache__/model_service.cpython-311.pyc +0 -0
  33. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__pycache__/safety.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/citation.py +101 -0
  35. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/content.py +133 -0
  36. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/generative_service.py +1129 -0
  37. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/model_service.py +112 -0
  38. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/safety.py +197 -0
  39. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/__pycache__/__init__.cpython-311.pyc +0 -0
  40. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/__pycache__/async_client.cpython-311.pyc +0 -0
  41. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/__pycache__/client.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__init__.py +36 -0
  43. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/__init__.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/base.cpython-311.pyc +0 -0
  45. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/grpc.cpython-311.pyc +0 -0
  46. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc +0 -0
  47. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/rest.cpython-311.pyc +0 -0
  48. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/rest_base.cpython-311.pyc +0 -0
  49. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/base.py +231 -0
  50. .venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc_asyncio.py +486 -0
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/__init__.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from google.ai.generativelanguage_v1 import gapic_version as package_version
17
+
18
+ __version__ = package_version.__version__
19
+
20
+
21
+ from .services.generative_service import (
22
+ GenerativeServiceAsyncClient,
23
+ GenerativeServiceClient,
24
+ )
25
+ from .services.model_service import ModelServiceAsyncClient, ModelServiceClient
26
+ from .types.citation import CitationMetadata, CitationSource
27
+ from .types.content import Blob, Content, Part
28
+ from .types.generative_service import (
29
+ BatchEmbedContentsRequest,
30
+ BatchEmbedContentsResponse,
31
+ Candidate,
32
+ ContentEmbedding,
33
+ CountTokensRequest,
34
+ CountTokensResponse,
35
+ EmbedContentRequest,
36
+ EmbedContentResponse,
37
+ GenerateContentRequest,
38
+ GenerateContentResponse,
39
+ GenerationConfig,
40
+ GroundingChunk,
41
+ GroundingMetadata,
42
+ GroundingSupport,
43
+ LogprobsResult,
44
+ RetrievalMetadata,
45
+ SearchEntryPoint,
46
+ Segment,
47
+ TaskType,
48
+ )
49
+ from .types.model import Model
50
+ from .types.model_service import GetModelRequest, ListModelsRequest, ListModelsResponse
51
+ from .types.safety import HarmCategory, SafetyRating, SafetySetting
52
+
53
+ __all__ = (
54
+ "GenerativeServiceAsyncClient",
55
+ "ModelServiceAsyncClient",
56
+ "BatchEmbedContentsRequest",
57
+ "BatchEmbedContentsResponse",
58
+ "Blob",
59
+ "Candidate",
60
+ "CitationMetadata",
61
+ "CitationSource",
62
+ "Content",
63
+ "ContentEmbedding",
64
+ "CountTokensRequest",
65
+ "CountTokensResponse",
66
+ "EmbedContentRequest",
67
+ "EmbedContentResponse",
68
+ "GenerateContentRequest",
69
+ "GenerateContentResponse",
70
+ "GenerationConfig",
71
+ "GenerativeServiceClient",
72
+ "GetModelRequest",
73
+ "GroundingChunk",
74
+ "GroundingMetadata",
75
+ "GroundingSupport",
76
+ "HarmCategory",
77
+ "ListModelsRequest",
78
+ "ListModelsResponse",
79
+ "LogprobsResult",
80
+ "Model",
81
+ "ModelServiceClient",
82
+ "Part",
83
+ "RetrievalMetadata",
84
+ "SafetyRating",
85
+ "SafetySetting",
86
+ "SearchEntryPoint",
87
+ "Segment",
88
+ "TaskType",
89
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (213 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (413 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (62.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import GenerativeServiceTransport
20
+ from .grpc import GenerativeServiceGrpcTransport
21
+ from .grpc_asyncio import GenerativeServiceGrpcAsyncIOTransport
22
+ from .rest import GenerativeServiceRestInterceptor, GenerativeServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[GenerativeServiceTransport]]
26
+ _transport_registry["grpc"] = GenerativeServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = GenerativeServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = GenerativeServiceRestTransport
29
+
30
+ __all__ = (
31
+ "GenerativeServiceTransport",
32
+ "GenerativeServiceGrpcTransport",
33
+ "GenerativeServiceGrpcAsyncIOTransport",
34
+ "GenerativeServiceRestTransport",
35
+ "GenerativeServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (921 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (11.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (27.6 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (62.7 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (22.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/base.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+
28
+ from google.ai.generativelanguage_v1 import gapic_version as package_version
29
+ from google.ai.generativelanguage_v1.types import generative_service
30
+
31
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
32
+ gapic_version=package_version.__version__
33
+ )
34
+
35
+
36
+ class GenerativeServiceTransport(abc.ABC):
37
+ """Abstract transport class for GenerativeService."""
38
+
39
+ AUTH_SCOPES = ()
40
+
41
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ host: str = DEFAULT_HOST,
47
+ credentials: Optional[ga_credentials.Credentials] = None,
48
+ credentials_file: Optional[str] = None,
49
+ scopes: Optional[Sequence[str]] = None,
50
+ quota_project_id: Optional[str] = None,
51
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
52
+ always_use_jwt_access: Optional[bool] = False,
53
+ api_audience: Optional[str] = None,
54
+ **kwargs,
55
+ ) -> None:
56
+ """Instantiate the transport.
57
+
58
+ Args:
59
+ host (Optional[str]):
60
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
61
+ credentials (Optional[google.auth.credentials.Credentials]): The
62
+ authorization credentials to attach to requests. These
63
+ credentials identify the application to the service; if none
64
+ are specified, the client will attempt to ascertain the
65
+ credentials from the environment.
66
+ credentials_file (Optional[str]): A file with credentials that can
67
+ be loaded with :func:`google.auth.load_credentials_from_file`.
68
+ This argument is mutually exclusive with credentials.
69
+ scopes (Optional[Sequence[str]]): A list of scopes.
70
+ quota_project_id (Optional[str]): An optional project to use for billing
71
+ and quota.
72
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
73
+ The client info used to send a user-agent string along with
74
+ API requests. If ``None``, then default info will be used.
75
+ Generally, you only need to set this if you're developing
76
+ your own client library.
77
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
78
+ be used for service account credentials.
79
+ """
80
+
81
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
82
+
83
+ # Save the scopes.
84
+ self._scopes = scopes
85
+ if not hasattr(self, "_ignore_credentials"):
86
+ self._ignore_credentials: bool = False
87
+
88
+ # If no credentials are provided, then determine the appropriate
89
+ # defaults.
90
+ if credentials and credentials_file:
91
+ raise core_exceptions.DuplicateCredentialArgs(
92
+ "'credentials_file' and 'credentials' are mutually exclusive"
93
+ )
94
+
95
+ if credentials_file is not None:
96
+ credentials, _ = google.auth.load_credentials_from_file(
97
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
98
+ )
99
+ elif credentials is None and not self._ignore_credentials:
100
+ credentials, _ = google.auth.default(
101
+ **scopes_kwargs, quota_project_id=quota_project_id
102
+ )
103
+ # Don't apply audience if the credentials file passed from user.
104
+ if hasattr(credentials, "with_gdch_audience"):
105
+ credentials = credentials.with_gdch_audience(
106
+ api_audience if api_audience else host
107
+ )
108
+
109
+ # If the credentials are service account credentials, then always try to use self signed JWT.
110
+ if (
111
+ always_use_jwt_access
112
+ and isinstance(credentials, service_account.Credentials)
113
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
114
+ ):
115
+ credentials = credentials.with_always_use_jwt_access(True)
116
+
117
+ # Save the credentials.
118
+ self._credentials = credentials
119
+
120
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
121
+ if ":" not in host:
122
+ host += ":443"
123
+ self._host = host
124
+
125
+ @property
126
+ def host(self):
127
+ return self._host
128
+
129
+ def _prep_wrapped_messages(self, client_info):
130
+ # Precompute the wrapped methods.
131
+ self._wrapped_methods = {
132
+ self.generate_content: gapic_v1.method.wrap_method(
133
+ self.generate_content,
134
+ default_retry=retries.Retry(
135
+ initial=1.0,
136
+ maximum=10.0,
137
+ multiplier=1.3,
138
+ predicate=retries.if_exception_type(
139
+ core_exceptions.ServiceUnavailable,
140
+ ),
141
+ deadline=600.0,
142
+ ),
143
+ default_timeout=600.0,
144
+ client_info=client_info,
145
+ ),
146
+ self.stream_generate_content: gapic_v1.method.wrap_method(
147
+ self.stream_generate_content,
148
+ default_retry=retries.Retry(
149
+ initial=1.0,
150
+ maximum=10.0,
151
+ multiplier=1.3,
152
+ predicate=retries.if_exception_type(
153
+ core_exceptions.ServiceUnavailable,
154
+ ),
155
+ deadline=600.0,
156
+ ),
157
+ default_timeout=600.0,
158
+ client_info=client_info,
159
+ ),
160
+ self.embed_content: gapic_v1.method.wrap_method(
161
+ self.embed_content,
162
+ default_retry=retries.Retry(
163
+ initial=1.0,
164
+ maximum=10.0,
165
+ multiplier=1.3,
166
+ predicate=retries.if_exception_type(
167
+ core_exceptions.ServiceUnavailable,
168
+ ),
169
+ deadline=60.0,
170
+ ),
171
+ default_timeout=60.0,
172
+ client_info=client_info,
173
+ ),
174
+ self.batch_embed_contents: gapic_v1.method.wrap_method(
175
+ self.batch_embed_contents,
176
+ default_retry=retries.Retry(
177
+ initial=1.0,
178
+ maximum=10.0,
179
+ multiplier=1.3,
180
+ predicate=retries.if_exception_type(
181
+ core_exceptions.ServiceUnavailable,
182
+ ),
183
+ deadline=60.0,
184
+ ),
185
+ default_timeout=60.0,
186
+ client_info=client_info,
187
+ ),
188
+ self.count_tokens: gapic_v1.method.wrap_method(
189
+ self.count_tokens,
190
+ default_retry=retries.Retry(
191
+ initial=1.0,
192
+ maximum=10.0,
193
+ multiplier=1.3,
194
+ predicate=retries.if_exception_type(
195
+ core_exceptions.ServiceUnavailable,
196
+ ),
197
+ deadline=60.0,
198
+ ),
199
+ default_timeout=60.0,
200
+ client_info=client_info,
201
+ ),
202
+ self.cancel_operation: gapic_v1.method.wrap_method(
203
+ self.cancel_operation,
204
+ default_timeout=None,
205
+ client_info=client_info,
206
+ ),
207
+ self.get_operation: gapic_v1.method.wrap_method(
208
+ self.get_operation,
209
+ default_timeout=None,
210
+ client_info=client_info,
211
+ ),
212
+ self.list_operations: gapic_v1.method.wrap_method(
213
+ self.list_operations,
214
+ default_timeout=None,
215
+ client_info=client_info,
216
+ ),
217
+ }
218
+
219
+ def close(self):
220
+ """Closes resources associated with the transport.
221
+
222
+ .. warning::
223
+ Only call this method if the transport is NOT shared
224
+ with other clients - this may cause errors in other clients!
225
+ """
226
+ raise NotImplementedError()
227
+
228
+ @property
229
+ def generate_content(
230
+ self,
231
+ ) -> Callable[
232
+ [generative_service.GenerateContentRequest],
233
+ Union[
234
+ generative_service.GenerateContentResponse,
235
+ Awaitable[generative_service.GenerateContentResponse],
236
+ ],
237
+ ]:
238
+ raise NotImplementedError()
239
+
240
+ @property
241
+ def stream_generate_content(
242
+ self,
243
+ ) -> Callable[
244
+ [generative_service.GenerateContentRequest],
245
+ Union[
246
+ generative_service.GenerateContentResponse,
247
+ Awaitable[generative_service.GenerateContentResponse],
248
+ ],
249
+ ]:
250
+ raise NotImplementedError()
251
+
252
+ @property
253
+ def embed_content(
254
+ self,
255
+ ) -> Callable[
256
+ [generative_service.EmbedContentRequest],
257
+ Union[
258
+ generative_service.EmbedContentResponse,
259
+ Awaitable[generative_service.EmbedContentResponse],
260
+ ],
261
+ ]:
262
+ raise NotImplementedError()
263
+
264
+ @property
265
+ def batch_embed_contents(
266
+ self,
267
+ ) -> Callable[
268
+ [generative_service.BatchEmbedContentsRequest],
269
+ Union[
270
+ generative_service.BatchEmbedContentsResponse,
271
+ Awaitable[generative_service.BatchEmbedContentsResponse],
272
+ ],
273
+ ]:
274
+ raise NotImplementedError()
275
+
276
+ @property
277
+ def count_tokens(
278
+ self,
279
+ ) -> Callable[
280
+ [generative_service.CountTokensRequest],
281
+ Union[
282
+ generative_service.CountTokensResponse,
283
+ Awaitable[generative_service.CountTokensResponse],
284
+ ],
285
+ ]:
286
+ raise NotImplementedError()
287
+
288
+ @property
289
+ def list_operations(
290
+ self,
291
+ ) -> Callable[
292
+ [operations_pb2.ListOperationsRequest],
293
+ Union[
294
+ operations_pb2.ListOperationsResponse,
295
+ Awaitable[operations_pb2.ListOperationsResponse],
296
+ ],
297
+ ]:
298
+ raise NotImplementedError()
299
+
300
+ @property
301
+ def get_operation(
302
+ self,
303
+ ) -> Callable[
304
+ [operations_pb2.GetOperationRequest],
305
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
306
+ ]:
307
+ raise NotImplementedError()
308
+
309
+ @property
310
+ def cancel_operation(
311
+ self,
312
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:
313
+ raise NotImplementedError()
314
+
315
+ @property
316
+ def kind(self) -> str:
317
+ raise NotImplementedError()
318
+
319
+
320
+ __all__ = ("GenerativeServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/grpc.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json
17
+ import logging as std_logging
18
+ import pickle
19
+ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, grpc_helpers
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.auth.transport.grpc import SslCredentials # type: ignore
26
+ from google.longrunning import operations_pb2 # type: ignore
27
+ from google.protobuf.json_format import MessageToJson
28
+ import google.protobuf.message
29
+ import grpc # type: ignore
30
+ import proto # type: ignore
31
+
32
+ from google.ai.generativelanguage_v1.types import generative_service
33
+
34
+ from .base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport
35
+
36
+ try:
37
+ from google.api_core import client_logging # type: ignore
38
+
39
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
40
+ except ImportError: # pragma: NO COVER
41
+ CLIENT_LOGGING_SUPPORTED = False
42
+
43
+ _LOGGER = std_logging.getLogger(__name__)
44
+
45
+
46
+ class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
47
+ def intercept_unary_unary(self, continuation, client_call_details, request):
48
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
49
+ std_logging.DEBUG
50
+ )
51
+ if logging_enabled: # pragma: NO COVER
52
+ request_metadata = client_call_details.metadata
53
+ if isinstance(request, proto.Message):
54
+ request_payload = type(request).to_json(request)
55
+ elif isinstance(request, google.protobuf.message.Message):
56
+ request_payload = MessageToJson(request)
57
+ else:
58
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
59
+
60
+ request_metadata = {
61
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
62
+ for key, value in request_metadata
63
+ }
64
+ grpc_request = {
65
+ "payload": request_payload,
66
+ "requestMethod": "grpc",
67
+ "metadata": dict(request_metadata),
68
+ }
69
+ _LOGGER.debug(
70
+ f"Sending request for {client_call_details.method}",
71
+ extra={
72
+ "serviceName": "google.ai.generativelanguage.v1.GenerativeService",
73
+ "rpcName": client_call_details.method,
74
+ "request": grpc_request,
75
+ "metadata": grpc_request["metadata"],
76
+ },
77
+ )
78
+
79
+ response = continuation(client_call_details, request)
80
+ if logging_enabled: # pragma: NO COVER
81
+ response_metadata = response.trailing_metadata()
82
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
83
+ metadata = (
84
+ dict([(k, str(v)) for k, v in response_metadata])
85
+ if response_metadata
86
+ else None
87
+ )
88
+ result = response.result()
89
+ if isinstance(result, proto.Message):
90
+ response_payload = type(result).to_json(result)
91
+ elif isinstance(result, google.protobuf.message.Message):
92
+ response_payload = MessageToJson(result)
93
+ else:
94
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
95
+ grpc_response = {
96
+ "payload": response_payload,
97
+ "metadata": metadata,
98
+ "status": "OK",
99
+ }
100
+ _LOGGER.debug(
101
+ f"Received response for {client_call_details.method}.",
102
+ extra={
103
+ "serviceName": "google.ai.generativelanguage.v1.GenerativeService",
104
+ "rpcName": client_call_details.method,
105
+ "response": grpc_response,
106
+ "metadata": grpc_response["metadata"],
107
+ },
108
+ )
109
+ return response
110
+
111
+
112
+ class GenerativeServiceGrpcTransport(GenerativeServiceTransport):
113
+ """gRPC backend transport for GenerativeService.
114
+
115
+ API for using Large Models that generate multimodal content
116
+ and have additional capabilities beyond text generation.
117
+
118
+ This class defines the same methods as the primary client, so the
119
+ primary client can load the underlying transport implementation
120
+ and call it.
121
+
122
+ It sends protocol buffers over the wire using gRPC (which is built on
123
+ top of HTTP/2); the ``grpcio`` package must be installed.
124
+ """
125
+
126
+ _stubs: Dict[str, Callable]
127
+
128
+ def __init__(
129
+ self,
130
+ *,
131
+ host: str = "generativelanguage.googleapis.com",
132
+ credentials: Optional[ga_credentials.Credentials] = None,
133
+ credentials_file: Optional[str] = None,
134
+ scopes: Optional[Sequence[str]] = None,
135
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
136
+ api_mtls_endpoint: Optional[str] = None,
137
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
138
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
139
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
140
+ quota_project_id: Optional[str] = None,
141
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
142
+ always_use_jwt_access: Optional[bool] = False,
143
+ api_audience: Optional[str] = None,
144
+ ) -> None:
145
+ """Instantiate the transport.
146
+
147
+ Args:
148
+ host (Optional[str]):
149
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
150
+ credentials (Optional[google.auth.credentials.Credentials]): The
151
+ authorization credentials to attach to requests. These
152
+ credentials identify the application to the service; if none
153
+ are specified, the client will attempt to ascertain the
154
+ credentials from the environment.
155
+ This argument is ignored if a ``channel`` instance is provided.
156
+ credentials_file (Optional[str]): A file with credentials that can
157
+ be loaded with :func:`google.auth.load_credentials_from_file`.
158
+ This argument is ignored if a ``channel`` instance is provided.
159
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
160
+ ignored if a ``channel`` instance is provided.
161
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
162
+ A ``Channel`` instance through which to make calls, or a Callable
163
+ that constructs and returns one. If set to None, ``self.create_channel``
164
+ is used to create the channel. If a Callable is given, it will be called
165
+ with the same arguments as used in ``self.create_channel``.
166
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
167
+ If provided, it overrides the ``host`` argument and tries to create
168
+ a mutual TLS channel with client SSL credentials from
169
+ ``client_cert_source`` or application default SSL credentials.
170
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
171
+ Deprecated. A callback to provide client SSL certificate bytes and
172
+ private key bytes, both in PEM format. It is ignored if
173
+ ``api_mtls_endpoint`` is None.
174
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
175
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
176
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
177
+ A callback to provide client certificate bytes and private key bytes,
178
+ both in PEM format. It is used to configure a mutual TLS channel. It is
179
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
180
+ quota_project_id (Optional[str]): An optional project to use for billing
181
+ and quota.
182
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
183
+ The client info used to send a user-agent string along with
184
+ API requests. If ``None``, then default info will be used.
185
+ Generally, you only need to set this if you're developing
186
+ your own client library.
187
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
188
+ be used for service account credentials.
189
+
190
+ Raises:
191
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
192
+ creation failed for any reason.
193
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
194
+ and ``credentials_file`` are passed.
195
+ """
196
+ self._grpc_channel = None
197
+ self._ssl_channel_credentials = ssl_channel_credentials
198
+ self._stubs: Dict[str, Callable] = {}
199
+
200
+ if api_mtls_endpoint:
201
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
202
+ if client_cert_source:
203
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
204
+
205
+ if isinstance(channel, grpc.Channel):
206
+ # Ignore credentials if a channel was passed.
207
+ credentials = None
208
+ self._ignore_credentials = True
209
+ # If a channel was explicitly provided, set it.
210
+ self._grpc_channel = channel
211
+ self._ssl_channel_credentials = None
212
+
213
+ else:
214
+ if api_mtls_endpoint:
215
+ host = api_mtls_endpoint
216
+
217
+ # Create SSL credentials with client_cert_source or application
218
+ # default SSL credentials.
219
+ if client_cert_source:
220
+ cert, key = client_cert_source()
221
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
222
+ certificate_chain=cert, private_key=key
223
+ )
224
+ else:
225
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
226
+
227
+ else:
228
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
229
+ cert, key = client_cert_source_for_mtls()
230
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
231
+ certificate_chain=cert, private_key=key
232
+ )
233
+
234
+ # The base transport sets the host, credentials and scopes
235
+ super().__init__(
236
+ host=host,
237
+ credentials=credentials,
238
+ credentials_file=credentials_file,
239
+ scopes=scopes,
240
+ quota_project_id=quota_project_id,
241
+ client_info=client_info,
242
+ always_use_jwt_access=always_use_jwt_access,
243
+ api_audience=api_audience,
244
+ )
245
+
246
+ if not self._grpc_channel:
247
+ # initialize with the provided callable or the default channel
248
+ channel_init = channel or type(self).create_channel
249
+ self._grpc_channel = channel_init(
250
+ self._host,
251
+ # use the credentials which are saved
252
+ credentials=self._credentials,
253
+ # Set ``credentials_file`` to ``None`` here as
254
+ # the credentials that we saved earlier should be used.
255
+ credentials_file=None,
256
+ scopes=self._scopes,
257
+ ssl_credentials=self._ssl_channel_credentials,
258
+ quota_project_id=quota_project_id,
259
+ options=[
260
+ ("grpc.max_send_message_length", -1),
261
+ ("grpc.max_receive_message_length", -1),
262
+ ],
263
+ )
264
+
265
+ self._interceptor = _LoggingClientInterceptor()
266
+ self._logged_channel = grpc.intercept_channel(
267
+ self._grpc_channel, self._interceptor
268
+ )
269
+
270
+ # Wrap messages. This must be done after self._logged_channel exists
271
+ self._prep_wrapped_messages(client_info)
272
+
273
+ @classmethod
274
+ def create_channel(
275
+ cls,
276
+ host: str = "generativelanguage.googleapis.com",
277
+ credentials: Optional[ga_credentials.Credentials] = None,
278
+ credentials_file: Optional[str] = None,
279
+ scopes: Optional[Sequence[str]] = None,
280
+ quota_project_id: Optional[str] = None,
281
+ **kwargs,
282
+ ) -> grpc.Channel:
283
+ """Create and return a gRPC channel object.
284
+ Args:
285
+ host (Optional[str]): The host for the channel to use.
286
+ credentials (Optional[~.Credentials]): The
287
+ authorization credentials to attach to requests. These
288
+ credentials identify this application to the service. If
289
+ none are specified, the client will attempt to ascertain
290
+ the credentials from the environment.
291
+ credentials_file (Optional[str]): A file with credentials that can
292
+ be loaded with :func:`google.auth.load_credentials_from_file`.
293
+ This argument is mutually exclusive with credentials.
294
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
295
+ service. These are only used when credentials are not specified and
296
+ are passed to :func:`google.auth.default`.
297
+ quota_project_id (Optional[str]): An optional project to use for billing
298
+ and quota.
299
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
300
+ channel creation.
301
+ Returns:
302
+ grpc.Channel: A gRPC channel object.
303
+
304
+ Raises:
305
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
306
+ and ``credentials_file`` are passed.
307
+ """
308
+
309
+ return grpc_helpers.create_channel(
310
+ host,
311
+ credentials=credentials,
312
+ credentials_file=credentials_file,
313
+ quota_project_id=quota_project_id,
314
+ default_scopes=cls.AUTH_SCOPES,
315
+ scopes=scopes,
316
+ default_host=cls.DEFAULT_HOST,
317
+ **kwargs,
318
+ )
319
+
320
+ @property
321
+ def grpc_channel(self) -> grpc.Channel:
322
+ """Return the channel designed to connect to this service."""
323
+ return self._grpc_channel
324
+
325
+ @property
326
+ def generate_content(
327
+ self,
328
+ ) -> Callable[
329
+ [generative_service.GenerateContentRequest],
330
+ generative_service.GenerateContentResponse,
331
+ ]:
332
+ r"""Return a callable for the generate content method over gRPC.
333
+
334
+ Generates a model response given an input
335
+ ``GenerateContentRequest``. Refer to the `text generation
336
+ guide <https://ai.google.dev/gemini-api/docs/text-generation>`__
337
+ for detailed usage information. Input capabilities differ
338
+ between models, including tuned models. Refer to the `model
339
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
340
+ and `tuning
341
+ guide <https://ai.google.dev/gemini-api/docs/model-tuning>`__
342
+ for details.
343
+
344
+ Returns:
345
+ Callable[[~.GenerateContentRequest],
346
+ ~.GenerateContentResponse]:
347
+ A function that, when called, will call the underlying RPC
348
+ on the server.
349
+ """
350
+ # Generate a "stub function" on-the-fly which will actually make
351
+ # the request.
352
+ # gRPC handles serialization and deserialization, so we just need
353
+ # to pass in the functions for each.
354
+ if "generate_content" not in self._stubs:
355
+ self._stubs["generate_content"] = self._logged_channel.unary_unary(
356
+ "/google.ai.generativelanguage.v1.GenerativeService/GenerateContent",
357
+ request_serializer=generative_service.GenerateContentRequest.serialize,
358
+ response_deserializer=generative_service.GenerateContentResponse.deserialize,
359
+ )
360
+ return self._stubs["generate_content"]
361
+
362
+ @property
363
+ def stream_generate_content(
364
+ self,
365
+ ) -> Callable[
366
+ [generative_service.GenerateContentRequest],
367
+ generative_service.GenerateContentResponse,
368
+ ]:
369
+ r"""Return a callable for the stream generate content method over gRPC.
370
+
371
+ Generates a `streamed
372
+ response <https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream>`__
373
+ from the model given an input ``GenerateContentRequest``.
374
+
375
+ Returns:
376
+ Callable[[~.GenerateContentRequest],
377
+ ~.GenerateContentResponse]:
378
+ A function that, when called, will call the underlying RPC
379
+ on the server.
380
+ """
381
+ # Generate a "stub function" on-the-fly which will actually make
382
+ # the request.
383
+ # gRPC handles serialization and deserialization, so we just need
384
+ # to pass in the functions for each.
385
+ if "stream_generate_content" not in self._stubs:
386
+ self._stubs["stream_generate_content"] = self._logged_channel.unary_stream(
387
+ "/google.ai.generativelanguage.v1.GenerativeService/StreamGenerateContent",
388
+ request_serializer=generative_service.GenerateContentRequest.serialize,
389
+ response_deserializer=generative_service.GenerateContentResponse.deserialize,
390
+ )
391
+ return self._stubs["stream_generate_content"]
392
+
393
+ @property
394
+ def embed_content(
395
+ self,
396
+ ) -> Callable[
397
+ [generative_service.EmbedContentRequest],
398
+ generative_service.EmbedContentResponse,
399
+ ]:
400
+ r"""Return a callable for the embed content method over gRPC.
401
+
402
+ Generates a text embedding vector from the input ``Content``
403
+ using the specified `Gemini Embedding
404
+ model <https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding>`__.
405
+
406
+ Returns:
407
+ Callable[[~.EmbedContentRequest],
408
+ ~.EmbedContentResponse]:
409
+ A function that, when called, will call the underlying RPC
410
+ on the server.
411
+ """
412
+ # Generate a "stub function" on-the-fly which will actually make
413
+ # the request.
414
+ # gRPC handles serialization and deserialization, so we just need
415
+ # to pass in the functions for each.
416
+ if "embed_content" not in self._stubs:
417
+ self._stubs["embed_content"] = self._logged_channel.unary_unary(
418
+ "/google.ai.generativelanguage.v1.GenerativeService/EmbedContent",
419
+ request_serializer=generative_service.EmbedContentRequest.serialize,
420
+ response_deserializer=generative_service.EmbedContentResponse.deserialize,
421
+ )
422
+ return self._stubs["embed_content"]
423
+
424
+ @property
425
+ def batch_embed_contents(
426
+ self,
427
+ ) -> Callable[
428
+ [generative_service.BatchEmbedContentsRequest],
429
+ generative_service.BatchEmbedContentsResponse,
430
+ ]:
431
+ r"""Return a callable for the batch embed contents method over gRPC.
432
+
433
+ Generates multiple embedding vectors from the input ``Content``
434
+ which consists of a batch of strings represented as
435
+ ``EmbedContentRequest`` objects.
436
+
437
+ Returns:
438
+ Callable[[~.BatchEmbedContentsRequest],
439
+ ~.BatchEmbedContentsResponse]:
440
+ A function that, when called, will call the underlying RPC
441
+ on the server.
442
+ """
443
+ # Generate a "stub function" on-the-fly which will actually make
444
+ # the request.
445
+ # gRPC handles serialization and deserialization, so we just need
446
+ # to pass in the functions for each.
447
+ if "batch_embed_contents" not in self._stubs:
448
+ self._stubs["batch_embed_contents"] = self._logged_channel.unary_unary(
449
+ "/google.ai.generativelanguage.v1.GenerativeService/BatchEmbedContents",
450
+ request_serializer=generative_service.BatchEmbedContentsRequest.serialize,
451
+ response_deserializer=generative_service.BatchEmbedContentsResponse.deserialize,
452
+ )
453
+ return self._stubs["batch_embed_contents"]
454
+
455
+ @property
456
+ def count_tokens(
457
+ self,
458
+ ) -> Callable[
459
+ [generative_service.CountTokensRequest], generative_service.CountTokensResponse
460
+ ]:
461
+ r"""Return a callable for the count tokens method over gRPC.
462
+
463
+ Runs a model's tokenizer on input ``Content`` and returns the
464
+ token count. Refer to the `tokens
465
+ guide <https://ai.google.dev/gemini-api/docs/tokens>`__ to learn
466
+ more about tokens.
467
+
468
+ Returns:
469
+ Callable[[~.CountTokensRequest],
470
+ ~.CountTokensResponse]:
471
+ A function that, when called, will call the underlying RPC
472
+ on the server.
473
+ """
474
+ # Generate a "stub function" on-the-fly which will actually make
475
+ # the request.
476
+ # gRPC handles serialization and deserialization, so we just need
477
+ # to pass in the functions for each.
478
+ if "count_tokens" not in self._stubs:
479
+ self._stubs["count_tokens"] = self._logged_channel.unary_unary(
480
+ "/google.ai.generativelanguage.v1.GenerativeService/CountTokens",
481
+ request_serializer=generative_service.CountTokensRequest.serialize,
482
+ response_deserializer=generative_service.CountTokensResponse.deserialize,
483
+ )
484
+ return self._stubs["count_tokens"]
485
+
486
+ def close(self):
487
+ self._logged_channel.close()
488
+
489
+ @property
490
+ def cancel_operation(
491
+ self,
492
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
493
+ r"""Return a callable for the cancel_operation method over gRPC."""
494
+ # Generate a "stub function" on-the-fly which will actually make
495
+ # the request.
496
+ # gRPC handles serialization and deserialization, so we just need
497
+ # to pass in the functions for each.
498
+ if "cancel_operation" not in self._stubs:
499
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
500
+ "/google.longrunning.Operations/CancelOperation",
501
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
502
+ response_deserializer=None,
503
+ )
504
+ return self._stubs["cancel_operation"]
505
+
506
+ @property
507
+ def get_operation(
508
+ self,
509
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
510
+ r"""Return a callable for the get_operation method over gRPC."""
511
+ # Generate a "stub function" on-the-fly which will actually make
512
+ # the request.
513
+ # gRPC handles serialization and deserialization, so we just need
514
+ # to pass in the functions for each.
515
+ if "get_operation" not in self._stubs:
516
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
517
+ "/google.longrunning.Operations/GetOperation",
518
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
519
+ response_deserializer=operations_pb2.Operation.FromString,
520
+ )
521
+ return self._stubs["get_operation"]
522
+
523
+ @property
524
+ def list_operations(
525
+ self,
526
+ ) -> Callable[
527
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
528
+ ]:
529
+ r"""Return a callable for the list_operations method over gRPC."""
530
+ # Generate a "stub function" on-the-fly which will actually make
531
+ # the request.
532
+ # gRPC handles serialization and deserialization, so we just need
533
+ # to pass in the functions for each.
534
+ if "list_operations" not in self._stubs:
535
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
536
+ "/google.longrunning.Operations/ListOperations",
537
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
538
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
539
+ )
540
+ return self._stubs["list_operations"]
541
+
542
+ @property
543
+ def kind(self) -> str:
544
+ return "grpc"
545
+
546
+
547
+ __all__ = ("GenerativeServiceGrpcTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/generative_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.longrunning import operations_pb2 # type: ignore
29
+ from google.protobuf.json_format import MessageToJson
30
+ import google.protobuf.message
31
+ import grpc # type: ignore
32
+ from grpc.experimental import aio # type: ignore
33
+ import proto # type: ignore
34
+
35
+ from google.ai.generativelanguage_v1.types import generative_service
36
+
37
+ from .base import DEFAULT_CLIENT_INFO, GenerativeServiceTransport
38
+ from .grpc import GenerativeServiceGrpcTransport
39
+
40
+ try:
41
+ from google.api_core import client_logging # type: ignore
42
+
43
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
44
+ except ImportError: # pragma: NO COVER
45
+ CLIENT_LOGGING_SUPPORTED = False
46
+
47
+ _LOGGER = std_logging.getLogger(__name__)
48
+
49
+
50
+ class _LoggingClientAIOInterceptor(
51
+ grpc.aio.UnaryUnaryClientInterceptor
52
+ ): # pragma: NO COVER
53
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
54
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
55
+ std_logging.DEBUG
56
+ )
57
+ if logging_enabled: # pragma: NO COVER
58
+ request_metadata = client_call_details.metadata
59
+ if isinstance(request, proto.Message):
60
+ request_payload = type(request).to_json(request)
61
+ elif isinstance(request, google.protobuf.message.Message):
62
+ request_payload = MessageToJson(request)
63
+ else:
64
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
65
+
66
+ request_metadata = {
67
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
68
+ for key, value in request_metadata
69
+ }
70
+ grpc_request = {
71
+ "payload": request_payload,
72
+ "requestMethod": "grpc",
73
+ "metadata": dict(request_metadata),
74
+ }
75
+ _LOGGER.debug(
76
+ f"Sending request for {client_call_details.method}",
77
+ extra={
78
+ "serviceName": "google.ai.generativelanguage.v1.GenerativeService",
79
+ "rpcName": str(client_call_details.method),
80
+ "request": grpc_request,
81
+ "metadata": grpc_request["metadata"],
82
+ },
83
+ )
84
+ response = await continuation(client_call_details, request)
85
+ if logging_enabled: # pragma: NO COVER
86
+ response_metadata = await response.trailing_metadata()
87
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
88
+ metadata = (
89
+ dict([(k, str(v)) for k, v in response_metadata])
90
+ if response_metadata
91
+ else None
92
+ )
93
+ result = await response
94
+ if isinstance(result, proto.Message):
95
+ response_payload = type(result).to_json(result)
96
+ elif isinstance(result, google.protobuf.message.Message):
97
+ response_payload = MessageToJson(result)
98
+ else:
99
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
100
+ grpc_response = {
101
+ "payload": response_payload,
102
+ "metadata": metadata,
103
+ "status": "OK",
104
+ }
105
+ _LOGGER.debug(
106
+ f"Received response to rpc {client_call_details.method}.",
107
+ extra={
108
+ "serviceName": "google.ai.generativelanguage.v1.GenerativeService",
109
+ "rpcName": str(client_call_details.method),
110
+ "response": grpc_response,
111
+ "metadata": grpc_response["metadata"],
112
+ },
113
+ )
114
+ return response
115
+
116
+
117
+ class GenerativeServiceGrpcAsyncIOTransport(GenerativeServiceTransport):
118
+ """gRPC AsyncIO backend transport for GenerativeService.
119
+
120
+ API for using Large Models that generate multimodal content
121
+ and have additional capabilities beyond text generation.
122
+
123
+ This class defines the same methods as the primary client, so the
124
+ primary client can load the underlying transport implementation
125
+ and call it.
126
+
127
+ It sends protocol buffers over the wire using gRPC (which is built on
128
+ top of HTTP/2); the ``grpcio`` package must be installed.
129
+ """
130
+
131
+ _grpc_channel: aio.Channel
132
+ _stubs: Dict[str, Callable] = {}
133
+
134
+ @classmethod
135
+ def create_channel(
136
+ cls,
137
+ host: str = "generativelanguage.googleapis.com",
138
+ credentials: Optional[ga_credentials.Credentials] = None,
139
+ credentials_file: Optional[str] = None,
140
+ scopes: Optional[Sequence[str]] = None,
141
+ quota_project_id: Optional[str] = None,
142
+ **kwargs,
143
+ ) -> aio.Channel:
144
+ """Create and return a gRPC AsyncIO channel object.
145
+ Args:
146
+ host (Optional[str]): The host for the channel to use.
147
+ credentials (Optional[~.Credentials]): The
148
+ authorization credentials to attach to requests. These
149
+ credentials identify this application to the service. If
150
+ none are specified, the client will attempt to ascertain
151
+ the credentials from the environment.
152
+ credentials_file (Optional[str]): A file with credentials that can
153
+ be loaded with :func:`google.auth.load_credentials_from_file`.
154
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
155
+ service. These are only used when credentials are not specified and
156
+ are passed to :func:`google.auth.default`.
157
+ quota_project_id (Optional[str]): An optional project to use for billing
158
+ and quota.
159
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
160
+ channel creation.
161
+ Returns:
162
+ aio.Channel: A gRPC AsyncIO channel object.
163
+ """
164
+
165
+ return grpc_helpers_async.create_channel(
166
+ host,
167
+ credentials=credentials,
168
+ credentials_file=credentials_file,
169
+ quota_project_id=quota_project_id,
170
+ default_scopes=cls.AUTH_SCOPES,
171
+ scopes=scopes,
172
+ default_host=cls.DEFAULT_HOST,
173
+ **kwargs,
174
+ )
175
+
176
+ def __init__(
177
+ self,
178
+ *,
179
+ host: str = "generativelanguage.googleapis.com",
180
+ credentials: Optional[ga_credentials.Credentials] = None,
181
+ credentials_file: Optional[str] = None,
182
+ scopes: Optional[Sequence[str]] = None,
183
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
184
+ api_mtls_endpoint: Optional[str] = None,
185
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
186
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
187
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
188
+ quota_project_id: Optional[str] = None,
189
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
190
+ always_use_jwt_access: Optional[bool] = False,
191
+ api_audience: Optional[str] = None,
192
+ ) -> None:
193
+ """Instantiate the transport.
194
+
195
+ Args:
196
+ host (Optional[str]):
197
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
198
+ credentials (Optional[google.auth.credentials.Credentials]): The
199
+ authorization credentials to attach to requests. These
200
+ credentials identify the application to the service; if none
201
+ are specified, the client will attempt to ascertain the
202
+ credentials from the environment.
203
+ This argument is ignored if a ``channel`` instance is provided.
204
+ credentials_file (Optional[str]): A file with credentials that can
205
+ be loaded with :func:`google.auth.load_credentials_from_file`.
206
+ This argument is ignored if a ``channel`` instance is provided.
207
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
208
+ service. These are only used when credentials are not specified and
209
+ are passed to :func:`google.auth.default`.
210
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
211
+ A ``Channel`` instance through which to make calls, or a Callable
212
+ that constructs and returns one. If set to None, ``self.create_channel``
213
+ is used to create the channel. If a Callable is given, it will be called
214
+ with the same arguments as used in ``self.create_channel``.
215
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
216
+ If provided, it overrides the ``host`` argument and tries to create
217
+ a mutual TLS channel with client SSL credentials from
218
+ ``client_cert_source`` or application default SSL credentials.
219
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
220
+ Deprecated. A callback to provide client SSL certificate bytes and
221
+ private key bytes, both in PEM format. It is ignored if
222
+ ``api_mtls_endpoint`` is None.
223
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
224
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
225
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
226
+ A callback to provide client certificate bytes and private key bytes,
227
+ both in PEM format. It is used to configure a mutual TLS channel. It is
228
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
229
+ quota_project_id (Optional[str]): An optional project to use for billing
230
+ and quota.
231
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
232
+ The client info used to send a user-agent string along with
233
+ API requests. If ``None``, then default info will be used.
234
+ Generally, you only need to set this if you're developing
235
+ your own client library.
236
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
237
+ be used for service account credentials.
238
+
239
+ Raises:
240
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
241
+ creation failed for any reason.
242
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
243
+ and ``credentials_file`` are passed.
244
+ """
245
+ self._grpc_channel = None
246
+ self._ssl_channel_credentials = ssl_channel_credentials
247
+ self._stubs: Dict[str, Callable] = {}
248
+
249
+ if api_mtls_endpoint:
250
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
251
+ if client_cert_source:
252
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
253
+
254
+ if isinstance(channel, aio.Channel):
255
+ # Ignore credentials if a channel was passed.
256
+ credentials = None
257
+ self._ignore_credentials = True
258
+ # If a channel was explicitly provided, set it.
259
+ self._grpc_channel = channel
260
+ self._ssl_channel_credentials = None
261
+ else:
262
+ if api_mtls_endpoint:
263
+ host = api_mtls_endpoint
264
+
265
+ # Create SSL credentials with client_cert_source or application
266
+ # default SSL credentials.
267
+ if client_cert_source:
268
+ cert, key = client_cert_source()
269
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
270
+ certificate_chain=cert, private_key=key
271
+ )
272
+ else:
273
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
274
+
275
+ else:
276
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
277
+ cert, key = client_cert_source_for_mtls()
278
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
279
+ certificate_chain=cert, private_key=key
280
+ )
281
+
282
+ # The base transport sets the host, credentials and scopes
283
+ super().__init__(
284
+ host=host,
285
+ credentials=credentials,
286
+ credentials_file=credentials_file,
287
+ scopes=scopes,
288
+ quota_project_id=quota_project_id,
289
+ client_info=client_info,
290
+ always_use_jwt_access=always_use_jwt_access,
291
+ api_audience=api_audience,
292
+ )
293
+
294
+ if not self._grpc_channel:
295
+ # initialize with the provided callable or the default channel
296
+ channel_init = channel or type(self).create_channel
297
+ self._grpc_channel = channel_init(
298
+ self._host,
299
+ # use the credentials which are saved
300
+ credentials=self._credentials,
301
+ # Set ``credentials_file`` to ``None`` here as
302
+ # the credentials that we saved earlier should be used.
303
+ credentials_file=None,
304
+ scopes=self._scopes,
305
+ ssl_credentials=self._ssl_channel_credentials,
306
+ quota_project_id=quota_project_id,
307
+ options=[
308
+ ("grpc.max_send_message_length", -1),
309
+ ("grpc.max_receive_message_length", -1),
310
+ ],
311
+ )
312
+
313
+ self._interceptor = _LoggingClientAIOInterceptor()
314
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
315
+ self._logged_channel = self._grpc_channel
316
+ self._wrap_with_kind = (
317
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
318
+ )
319
+ # Wrap messages. This must be done after self._logged_channel exists
320
+ self._prep_wrapped_messages(client_info)
321
+
322
+ @property
323
+ def grpc_channel(self) -> aio.Channel:
324
+ """Create the channel designed to connect to this service.
325
+
326
+ This property caches on the instance; repeated calls return
327
+ the same channel.
328
+ """
329
+ # Return the channel from cache.
330
+ return self._grpc_channel
331
+
332
+ @property
333
+ def generate_content(
334
+ self,
335
+ ) -> Callable[
336
+ [generative_service.GenerateContentRequest],
337
+ Awaitable[generative_service.GenerateContentResponse],
338
+ ]:
339
+ r"""Return a callable for the generate content method over gRPC.
340
+
341
+ Generates a model response given an input
342
+ ``GenerateContentRequest``. Refer to the `text generation
343
+ guide <https://ai.google.dev/gemini-api/docs/text-generation>`__
344
+ for detailed usage information. Input capabilities differ
345
+ between models, including tuned models. Refer to the `model
346
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
347
+ and `tuning
348
+ guide <https://ai.google.dev/gemini-api/docs/model-tuning>`__
349
+ for details.
350
+
351
+ Returns:
352
+ Callable[[~.GenerateContentRequest],
353
+ Awaitable[~.GenerateContentResponse]]:
354
+ A function that, when called, will call the underlying RPC
355
+ on the server.
356
+ """
357
+ # Generate a "stub function" on-the-fly which will actually make
358
+ # the request.
359
+ # gRPC handles serialization and deserialization, so we just need
360
+ # to pass in the functions for each.
361
+ if "generate_content" not in self._stubs:
362
+ self._stubs["generate_content"] = self._logged_channel.unary_unary(
363
+ "/google.ai.generativelanguage.v1.GenerativeService/GenerateContent",
364
+ request_serializer=generative_service.GenerateContentRequest.serialize,
365
+ response_deserializer=generative_service.GenerateContentResponse.deserialize,
366
+ )
367
+ return self._stubs["generate_content"]
368
+
369
+ @property
370
+ def stream_generate_content(
371
+ self,
372
+ ) -> Callable[
373
+ [generative_service.GenerateContentRequest],
374
+ Awaitable[generative_service.GenerateContentResponse],
375
+ ]:
376
+ r"""Return a callable for the stream generate content method over gRPC.
377
+
378
+ Generates a `streamed
379
+ response <https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream>`__
380
+ from the model given an input ``GenerateContentRequest``.
381
+
382
+ Returns:
383
+ Callable[[~.GenerateContentRequest],
384
+ Awaitable[~.GenerateContentResponse]]:
385
+ A function that, when called, will call the underlying RPC
386
+ on the server.
387
+ """
388
+ # Generate a "stub function" on-the-fly which will actually make
389
+ # the request.
390
+ # gRPC handles serialization and deserialization, so we just need
391
+ # to pass in the functions for each.
392
+ if "stream_generate_content" not in self._stubs:
393
+ self._stubs["stream_generate_content"] = self._logged_channel.unary_stream(
394
+ "/google.ai.generativelanguage.v1.GenerativeService/StreamGenerateContent",
395
+ request_serializer=generative_service.GenerateContentRequest.serialize,
396
+ response_deserializer=generative_service.GenerateContentResponse.deserialize,
397
+ )
398
+ return self._stubs["stream_generate_content"]
399
+
400
+ @property
401
+ def embed_content(
402
+ self,
403
+ ) -> Callable[
404
+ [generative_service.EmbedContentRequest],
405
+ Awaitable[generative_service.EmbedContentResponse],
406
+ ]:
407
+ r"""Return a callable for the embed content method over gRPC.
408
+
409
+ Generates a text embedding vector from the input ``Content``
410
+ using the specified `Gemini Embedding
411
+ model <https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding>`__.
412
+
413
+ Returns:
414
+ Callable[[~.EmbedContentRequest],
415
+ Awaitable[~.EmbedContentResponse]]:
416
+ A function that, when called, will call the underlying RPC
417
+ on the server.
418
+ """
419
+ # Generate a "stub function" on-the-fly which will actually make
420
+ # the request.
421
+ # gRPC handles serialization and deserialization, so we just need
422
+ # to pass in the functions for each.
423
+ if "embed_content" not in self._stubs:
424
+ self._stubs["embed_content"] = self._logged_channel.unary_unary(
425
+ "/google.ai.generativelanguage.v1.GenerativeService/EmbedContent",
426
+ request_serializer=generative_service.EmbedContentRequest.serialize,
427
+ response_deserializer=generative_service.EmbedContentResponse.deserialize,
428
+ )
429
+ return self._stubs["embed_content"]
430
+
431
+ @property
432
+ def batch_embed_contents(
433
+ self,
434
+ ) -> Callable[
435
+ [generative_service.BatchEmbedContentsRequest],
436
+ Awaitable[generative_service.BatchEmbedContentsResponse],
437
+ ]:
438
+ r"""Return a callable for the batch embed contents method over gRPC.
439
+
440
+ Generates multiple embedding vectors from the input ``Content``
441
+ which consists of a batch of strings represented as
442
+ ``EmbedContentRequest`` objects.
443
+
444
+ Returns:
445
+ Callable[[~.BatchEmbedContentsRequest],
446
+ Awaitable[~.BatchEmbedContentsResponse]]:
447
+ A function that, when called, will call the underlying RPC
448
+ on the server.
449
+ """
450
+ # Generate a "stub function" on-the-fly which will actually make
451
+ # the request.
452
+ # gRPC handles serialization and deserialization, so we just need
453
+ # to pass in the functions for each.
454
+ if "batch_embed_contents" not in self._stubs:
455
+ self._stubs["batch_embed_contents"] = self._logged_channel.unary_unary(
456
+ "/google.ai.generativelanguage.v1.GenerativeService/BatchEmbedContents",
457
+ request_serializer=generative_service.BatchEmbedContentsRequest.serialize,
458
+ response_deserializer=generative_service.BatchEmbedContentsResponse.deserialize,
459
+ )
460
+ return self._stubs["batch_embed_contents"]
461
+
462
+ @property
463
+ def count_tokens(
464
+ self,
465
+ ) -> Callable[
466
+ [generative_service.CountTokensRequest],
467
+ Awaitable[generative_service.CountTokensResponse],
468
+ ]:
469
+ r"""Return a callable for the count tokens method over gRPC.
470
+
471
+ Runs a model's tokenizer on input ``Content`` and returns the
472
+ token count. Refer to the `tokens
473
+ guide <https://ai.google.dev/gemini-api/docs/tokens>`__ to learn
474
+ more about tokens.
475
+
476
+ Returns:
477
+ Callable[[~.CountTokensRequest],
478
+ Awaitable[~.CountTokensResponse]]:
479
+ A function that, when called, will call the underlying RPC
480
+ on the server.
481
+ """
482
+ # Generate a "stub function" on-the-fly which will actually make
483
+ # the request.
484
+ # gRPC handles serialization and deserialization, so we just need
485
+ # to pass in the functions for each.
486
+ if "count_tokens" not in self._stubs:
487
+ self._stubs["count_tokens"] = self._logged_channel.unary_unary(
488
+ "/google.ai.generativelanguage.v1.GenerativeService/CountTokens",
489
+ request_serializer=generative_service.CountTokensRequest.serialize,
490
+ response_deserializer=generative_service.CountTokensResponse.deserialize,
491
+ )
492
+ return self._stubs["count_tokens"]
493
+
494
+ def _prep_wrapped_messages(self, client_info):
495
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
496
+ self._wrapped_methods = {
497
+ self.generate_content: self._wrap_method(
498
+ self.generate_content,
499
+ default_retry=retries.AsyncRetry(
500
+ initial=1.0,
501
+ maximum=10.0,
502
+ multiplier=1.3,
503
+ predicate=retries.if_exception_type(
504
+ core_exceptions.ServiceUnavailable,
505
+ ),
506
+ deadline=600.0,
507
+ ),
508
+ default_timeout=600.0,
509
+ client_info=client_info,
510
+ ),
511
+ self.stream_generate_content: self._wrap_method(
512
+ self.stream_generate_content,
513
+ default_retry=retries.AsyncRetry(
514
+ initial=1.0,
515
+ maximum=10.0,
516
+ multiplier=1.3,
517
+ predicate=retries.if_exception_type(
518
+ core_exceptions.ServiceUnavailable,
519
+ ),
520
+ deadline=600.0,
521
+ ),
522
+ default_timeout=600.0,
523
+ client_info=client_info,
524
+ ),
525
+ self.embed_content: self._wrap_method(
526
+ self.embed_content,
527
+ default_retry=retries.AsyncRetry(
528
+ initial=1.0,
529
+ maximum=10.0,
530
+ multiplier=1.3,
531
+ predicate=retries.if_exception_type(
532
+ core_exceptions.ServiceUnavailable,
533
+ ),
534
+ deadline=60.0,
535
+ ),
536
+ default_timeout=60.0,
537
+ client_info=client_info,
538
+ ),
539
+ self.batch_embed_contents: self._wrap_method(
540
+ self.batch_embed_contents,
541
+ default_retry=retries.AsyncRetry(
542
+ initial=1.0,
543
+ maximum=10.0,
544
+ multiplier=1.3,
545
+ predicate=retries.if_exception_type(
546
+ core_exceptions.ServiceUnavailable,
547
+ ),
548
+ deadline=60.0,
549
+ ),
550
+ default_timeout=60.0,
551
+ client_info=client_info,
552
+ ),
553
+ self.count_tokens: self._wrap_method(
554
+ self.count_tokens,
555
+ default_retry=retries.AsyncRetry(
556
+ initial=1.0,
557
+ maximum=10.0,
558
+ multiplier=1.3,
559
+ predicate=retries.if_exception_type(
560
+ core_exceptions.ServiceUnavailable,
561
+ ),
562
+ deadline=60.0,
563
+ ),
564
+ default_timeout=60.0,
565
+ client_info=client_info,
566
+ ),
567
+ self.cancel_operation: self._wrap_method(
568
+ self.cancel_operation,
569
+ default_timeout=None,
570
+ client_info=client_info,
571
+ ),
572
+ self.get_operation: self._wrap_method(
573
+ self.get_operation,
574
+ default_timeout=None,
575
+ client_info=client_info,
576
+ ),
577
+ self.list_operations: self._wrap_method(
578
+ self.list_operations,
579
+ default_timeout=None,
580
+ client_info=client_info,
581
+ ),
582
+ }
583
+
584
+ def _wrap_method(self, func, *args, **kwargs):
585
+ if self._wrap_with_kind: # pragma: NO COVER
586
+ kwargs["kind"] = self.kind
587
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
588
+
589
+ def close(self):
590
+ return self._logged_channel.close()
591
+
592
+ @property
593
+ def kind(self) -> str:
594
+ return "grpc_asyncio"
595
+
596
+ @property
597
+ def cancel_operation(
598
+ self,
599
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
600
+ r"""Return a callable for the cancel_operation method over gRPC."""
601
+ # Generate a "stub function" on-the-fly which will actually make
602
+ # the request.
603
+ # gRPC handles serialization and deserialization, so we just need
604
+ # to pass in the functions for each.
605
+ if "cancel_operation" not in self._stubs:
606
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
607
+ "/google.longrunning.Operations/CancelOperation",
608
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
609
+ response_deserializer=None,
610
+ )
611
+ return self._stubs["cancel_operation"]
612
+
613
+ @property
614
+ def get_operation(
615
+ self,
616
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
617
+ r"""Return a callable for the get_operation method over gRPC."""
618
+ # Generate a "stub function" on-the-fly which will actually make
619
+ # the request.
620
+ # gRPC handles serialization and deserialization, so we just need
621
+ # to pass in the functions for each.
622
+ if "get_operation" not in self._stubs:
623
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
624
+ "/google.longrunning.Operations/GetOperation",
625
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
626
+ response_deserializer=operations_pb2.Operation.FromString,
627
+ )
628
+ return self._stubs["get_operation"]
629
+
630
+ @property
631
+ def list_operations(
632
+ self,
633
+ ) -> Callable[
634
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
635
+ ]:
636
+ r"""Return a callable for the list_operations method over gRPC."""
637
+ # Generate a "stub function" on-the-fly which will actually make
638
+ # the request.
639
+ # gRPC handles serialization and deserialization, so we just need
640
+ # to pass in the functions for each.
641
+ if "list_operations" not in self._stubs:
642
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
643
+ "/google.longrunning.Operations/ListOperations",
644
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
645
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
646
+ )
647
+ return self._stubs["list_operations"]
648
+
649
+
650
+ __all__ = ("GenerativeServiceGrpcAsyncIOTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .async_client import ModelServiceAsyncClient
17
+ from .client import ModelServiceClient
18
+
19
+ __all__ = (
20
+ "ModelServiceClient",
21
+ "ModelServiceAsyncClient",
22
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/async_client.py ADDED
@@ -0,0 +1,720 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import re
19
+ from typing import (
20
+ Callable,
21
+ Dict,
22
+ Mapping,
23
+ MutableMapping,
24
+ MutableSequence,
25
+ Optional,
26
+ Sequence,
27
+ Tuple,
28
+ Type,
29
+ Union,
30
+ )
31
+
32
+ from google.api_core import exceptions as core_exceptions
33
+ from google.api_core import gapic_v1
34
+ from google.api_core import retry_async as retries
35
+ from google.api_core.client_options import ClientOptions
36
+ from google.auth import credentials as ga_credentials # type: ignore
37
+ from google.oauth2 import service_account # type: ignore
38
+
39
+ from google.ai.generativelanguage_v1 import gapic_version as package_version
40
+
41
+ try:
42
+ OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None]
43
+ except AttributeError: # pragma: NO COVER
44
+ OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore
45
+
46
+ from google.longrunning import operations_pb2 # type: ignore
47
+
48
+ from google.ai.generativelanguage_v1.services.model_service import pagers
49
+ from google.ai.generativelanguage_v1.types import model, model_service
50
+
51
+ from .client import ModelServiceClient
52
+ from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport
53
+ from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
54
+
55
+ try:
56
+ from google.api_core import client_logging # type: ignore
57
+
58
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
59
+ except ImportError: # pragma: NO COVER
60
+ CLIENT_LOGGING_SUPPORTED = False
61
+
62
+ _LOGGER = std_logging.getLogger(__name__)
63
+
64
+
65
+ class ModelServiceAsyncClient:
66
+ """Provides methods for getting metadata information about
67
+ Generative Models.
68
+ """
69
+
70
+ _client: ModelServiceClient
71
+
72
+ # Copy defaults from the synchronous client for use here.
73
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
74
+ DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT
75
+ DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
76
+ _DEFAULT_ENDPOINT_TEMPLATE = ModelServiceClient._DEFAULT_ENDPOINT_TEMPLATE
77
+ _DEFAULT_UNIVERSE = ModelServiceClient._DEFAULT_UNIVERSE
78
+
79
+ model_path = staticmethod(ModelServiceClient.model_path)
80
+ parse_model_path = staticmethod(ModelServiceClient.parse_model_path)
81
+ common_billing_account_path = staticmethod(
82
+ ModelServiceClient.common_billing_account_path
83
+ )
84
+ parse_common_billing_account_path = staticmethod(
85
+ ModelServiceClient.parse_common_billing_account_path
86
+ )
87
+ common_folder_path = staticmethod(ModelServiceClient.common_folder_path)
88
+ parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path)
89
+ common_organization_path = staticmethod(ModelServiceClient.common_organization_path)
90
+ parse_common_organization_path = staticmethod(
91
+ ModelServiceClient.parse_common_organization_path
92
+ )
93
+ common_project_path = staticmethod(ModelServiceClient.common_project_path)
94
+ parse_common_project_path = staticmethod(
95
+ ModelServiceClient.parse_common_project_path
96
+ )
97
+ common_location_path = staticmethod(ModelServiceClient.common_location_path)
98
+ parse_common_location_path = staticmethod(
99
+ ModelServiceClient.parse_common_location_path
100
+ )
101
+
102
+ @classmethod
103
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
104
+ """Creates an instance of this client using the provided credentials
105
+ info.
106
+
107
+ Args:
108
+ info (dict): The service account private key info.
109
+ args: Additional arguments to pass to the constructor.
110
+ kwargs: Additional arguments to pass to the constructor.
111
+
112
+ Returns:
113
+ ModelServiceAsyncClient: The constructed client.
114
+ """
115
+ return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore
116
+
117
+ @classmethod
118
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
119
+ """Creates an instance of this client using the provided credentials
120
+ file.
121
+
122
+ Args:
123
+ filename (str): The path to the service account private key json
124
+ file.
125
+ args: Additional arguments to pass to the constructor.
126
+ kwargs: Additional arguments to pass to the constructor.
127
+
128
+ Returns:
129
+ ModelServiceAsyncClient: The constructed client.
130
+ """
131
+ return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore
132
+
133
+ from_service_account_json = from_service_account_file
134
+
135
+ @classmethod
136
+ def get_mtls_endpoint_and_cert_source(
137
+ cls, client_options: Optional[ClientOptions] = None
138
+ ):
139
+ """Return the API endpoint and client cert source for mutual TLS.
140
+
141
+ The client cert source is determined in the following order:
142
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
143
+ client cert source is None.
144
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
145
+ default client cert source exists, use the default one; otherwise the client cert
146
+ source is None.
147
+
148
+ The API endpoint is determined in the following order:
149
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
150
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
151
+ default mTLS endpoint; if the environment variable is "never", use the default API
152
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
153
+ use the default API endpoint.
154
+
155
+ More details can be found at https://google.aip.dev/auth/4114.
156
+
157
+ Args:
158
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
159
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
160
+ in this method.
161
+
162
+ Returns:
163
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
164
+ client cert source to use.
165
+
166
+ Raises:
167
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
168
+ """
169
+ return ModelServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
170
+
171
+ @property
172
+ def transport(self) -> ModelServiceTransport:
173
+ """Returns the transport used by the client instance.
174
+
175
+ Returns:
176
+ ModelServiceTransport: The transport used by the client instance.
177
+ """
178
+ return self._client.transport
179
+
180
+ @property
181
+ def api_endpoint(self):
182
+ """Return the API endpoint used by the client instance.
183
+
184
+ Returns:
185
+ str: The API endpoint used by the client instance.
186
+ """
187
+ return self._client._api_endpoint
188
+
189
+ @property
190
+ def universe_domain(self) -> str:
191
+ """Return the universe domain used by the client instance.
192
+
193
+ Returns:
194
+ str: The universe domain used
195
+ by the client instance.
196
+ """
197
+ return self._client._universe_domain
198
+
199
+ get_transport_class = ModelServiceClient.get_transport_class
200
+
201
+ def __init__(
202
+ self,
203
+ *,
204
+ credentials: Optional[ga_credentials.Credentials] = None,
205
+ transport: Optional[
206
+ Union[str, ModelServiceTransport, Callable[..., ModelServiceTransport]]
207
+ ] = "grpc_asyncio",
208
+ client_options: Optional[ClientOptions] = None,
209
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
210
+ ) -> None:
211
+ """Instantiates the model service async client.
212
+
213
+ Args:
214
+ credentials (Optional[google.auth.credentials.Credentials]): The
215
+ authorization credentials to attach to requests. These
216
+ credentials identify the application to the service; if none
217
+ are specified, the client will attempt to ascertain the
218
+ credentials from the environment.
219
+ transport (Optional[Union[str,ModelServiceTransport,Callable[..., ModelServiceTransport]]]):
220
+ The transport to use, or a Callable that constructs and returns a new transport to use.
221
+ If a Callable is given, it will be called with the same set of initialization
222
+ arguments as used in the ModelServiceTransport constructor.
223
+ If set to None, a transport is chosen automatically.
224
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
225
+ Custom options for the client.
226
+
227
+ 1. The ``api_endpoint`` property can be used to override the
228
+ default endpoint provided by the client when ``transport`` is
229
+ not explicitly provided. Only if this property is not set and
230
+ ``transport`` was not explicitly provided, the endpoint is
231
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
232
+ variable, which have one of the following values:
233
+ "always" (always use the default mTLS endpoint), "never" (always
234
+ use the default regular endpoint) and "auto" (auto-switch to the
235
+ default mTLS endpoint if client certificate is present; this is
236
+ the default value).
237
+
238
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
239
+ is "true", then the ``client_cert_source`` property can be used
240
+ to provide a client certificate for mTLS transport. If
241
+ not provided, the default SSL client certificate will be used if
242
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
243
+ set, no client certificate will be used.
244
+
245
+ 3. The ``universe_domain`` property can be used to override the
246
+ default "googleapis.com" universe. Note that ``api_endpoint``
247
+ property still takes precedence; and ``universe_domain`` is
248
+ currently not supported for mTLS.
249
+
250
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
251
+ The client info used to send a user-agent string along with
252
+ API requests. If ``None``, then default info will be used.
253
+ Generally, you only need to set this if you're developing
254
+ your own client library.
255
+
256
+ Raises:
257
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
258
+ creation failed for any reason.
259
+ """
260
+ self._client = ModelServiceClient(
261
+ credentials=credentials,
262
+ transport=transport,
263
+ client_options=client_options,
264
+ client_info=client_info,
265
+ )
266
+
267
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
268
+ std_logging.DEBUG
269
+ ): # pragma: NO COVER
270
+ _LOGGER.debug(
271
+ "Created client `google.ai.generativelanguage_v1.ModelServiceAsyncClient`.",
272
+ extra={
273
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
274
+ "universeDomain": getattr(
275
+ self._client._transport._credentials, "universe_domain", ""
276
+ ),
277
+ "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}",
278
+ "credentialsInfo": getattr(
279
+ self.transport._credentials, "get_cred_info", lambda: None
280
+ )(),
281
+ }
282
+ if hasattr(self._client._transport, "_credentials")
283
+ else {
284
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
285
+ "credentialsType": None,
286
+ },
287
+ )
288
+
289
+ async def get_model(
290
+ self,
291
+ request: Optional[Union[model_service.GetModelRequest, dict]] = None,
292
+ *,
293
+ name: Optional[str] = None,
294
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
295
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
296
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
297
+ ) -> model.Model:
298
+ r"""Gets information about a specific ``Model`` such as its version
299
+ number, token limits,
300
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
301
+ and other metadata. Refer to the `Gemini models
302
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
303
+ for detailed model information.
304
+
305
+ .. code-block:: python
306
+
307
+ # This snippet has been automatically generated and should be regarded as a
308
+ # code template only.
309
+ # It will require modifications to work:
310
+ # - It may require correct/in-range values for request initialization.
311
+ # - It may require specifying regional endpoints when creating the service
312
+ # client as shown in:
313
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
314
+ from google.ai import generativelanguage_v1
315
+
316
+ async def sample_get_model():
317
+ # Create a client
318
+ client = generativelanguage_v1.ModelServiceAsyncClient()
319
+
320
+ # Initialize request argument(s)
321
+ request = generativelanguage_v1.GetModelRequest(
322
+ name="name_value",
323
+ )
324
+
325
+ # Make the request
326
+ response = await client.get_model(request=request)
327
+
328
+ # Handle the response
329
+ print(response)
330
+
331
+ Args:
332
+ request (Optional[Union[google.ai.generativelanguage_v1.types.GetModelRequest, dict]]):
333
+ The request object. Request for getting information about
334
+ a specific Model.
335
+ name (:class:`str`):
336
+ Required. The resource name of the model.
337
+
338
+ This name should match a model name returned by the
339
+ ``ListModels`` method.
340
+
341
+ Format: ``models/{model}``
342
+
343
+ This corresponds to the ``name`` field
344
+ on the ``request`` instance; if ``request`` is provided, this
345
+ should not be set.
346
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
347
+ should be retried.
348
+ timeout (float): The timeout for this request.
349
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
350
+ sent along with the request as metadata. Normally, each value must be of type `str`,
351
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
352
+ be of type `bytes`.
353
+
354
+ Returns:
355
+ google.ai.generativelanguage_v1.types.Model:
356
+ Information about a Generative
357
+ Language Model.
358
+
359
+ """
360
+ # Create or coerce a protobuf request object.
361
+ # - Quick check: If we got a request object, we should *not* have
362
+ # gotten any keyword arguments that map to the request.
363
+ has_flattened_params = any([name])
364
+ if request is not None and has_flattened_params:
365
+ raise ValueError(
366
+ "If the `request` argument is set, then none of "
367
+ "the individual field arguments should be set."
368
+ )
369
+
370
+ # - Use the request object if provided (there's no risk of modifying the input as
371
+ # there are no flattened fields), or create one.
372
+ if not isinstance(request, model_service.GetModelRequest):
373
+ request = model_service.GetModelRequest(request)
374
+
375
+ # If we have keyword arguments corresponding to fields on the
376
+ # request, apply these.
377
+ if name is not None:
378
+ request.name = name
379
+
380
+ # Wrap the RPC method; this adds retry and timeout information,
381
+ # and friendly error handling.
382
+ rpc = self._client._transport._wrapped_methods[
383
+ self._client._transport.get_model
384
+ ]
385
+
386
+ # Certain fields should be provided within the metadata header;
387
+ # add these here.
388
+ metadata = tuple(metadata) + (
389
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
390
+ )
391
+
392
+ # Validate the universe domain.
393
+ self._client._validate_universe_domain()
394
+
395
+ # Send the request.
396
+ response = await rpc(
397
+ request,
398
+ retry=retry,
399
+ timeout=timeout,
400
+ metadata=metadata,
401
+ )
402
+
403
+ # Done; return the response.
404
+ return response
405
+
406
+ async def list_models(
407
+ self,
408
+ request: Optional[Union[model_service.ListModelsRequest, dict]] = None,
409
+ *,
410
+ page_size: Optional[int] = None,
411
+ page_token: Optional[str] = None,
412
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
413
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
414
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
415
+ ) -> pagers.ListModelsAsyncPager:
416
+ r"""Lists the
417
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
418
+ available through the Gemini API.
419
+
420
+ .. code-block:: python
421
+
422
+ # This snippet has been automatically generated and should be regarded as a
423
+ # code template only.
424
+ # It will require modifications to work:
425
+ # - It may require correct/in-range values for request initialization.
426
+ # - It may require specifying regional endpoints when creating the service
427
+ # client as shown in:
428
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
429
+ from google.ai import generativelanguage_v1
430
+
431
+ async def sample_list_models():
432
+ # Create a client
433
+ client = generativelanguage_v1.ModelServiceAsyncClient()
434
+
435
+ # Initialize request argument(s)
436
+ request = generativelanguage_v1.ListModelsRequest(
437
+ )
438
+
439
+ # Make the request
440
+ page_result = client.list_models(request=request)
441
+
442
+ # Handle the response
443
+ async for response in page_result:
444
+ print(response)
445
+
446
+ Args:
447
+ request (Optional[Union[google.ai.generativelanguage_v1.types.ListModelsRequest, dict]]):
448
+ The request object. Request for listing all Models.
449
+ page_size (:class:`int`):
450
+ The maximum number of ``Models`` to return (per page).
451
+
452
+ If unspecified, 50 models will be returned per page.
453
+ This method returns at most 1000 models per page, even
454
+ if you pass a larger page_size.
455
+
456
+ This corresponds to the ``page_size`` field
457
+ on the ``request`` instance; if ``request`` is provided, this
458
+ should not be set.
459
+ page_token (:class:`str`):
460
+ A page token, received from a previous ``ListModels``
461
+ call.
462
+
463
+ Provide the ``page_token`` returned by one request as an
464
+ argument to the next request to retrieve the next page.
465
+
466
+ When paginating, all other parameters provided to
467
+ ``ListModels`` must match the call that provided the
468
+ page token.
469
+
470
+ This corresponds to the ``page_token`` field
471
+ on the ``request`` instance; if ``request`` is provided, this
472
+ should not be set.
473
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
474
+ should be retried.
475
+ timeout (float): The timeout for this request.
476
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
477
+ sent along with the request as metadata. Normally, each value must be of type `str`,
478
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
479
+ be of type `bytes`.
480
+
481
+ Returns:
482
+ google.ai.generativelanguage_v1.services.model_service.pagers.ListModelsAsyncPager:
483
+ Response from ListModel containing a paginated list of
484
+ Models.
485
+
486
+ Iterating over this object will yield results and
487
+ resolve additional pages automatically.
488
+
489
+ """
490
+ # Create or coerce a protobuf request object.
491
+ # - Quick check: If we got a request object, we should *not* have
492
+ # gotten any keyword arguments that map to the request.
493
+ has_flattened_params = any([page_size, page_token])
494
+ if request is not None and has_flattened_params:
495
+ raise ValueError(
496
+ "If the `request` argument is set, then none of "
497
+ "the individual field arguments should be set."
498
+ )
499
+
500
+ # - Use the request object if provided (there's no risk of modifying the input as
501
+ # there are no flattened fields), or create one.
502
+ if not isinstance(request, model_service.ListModelsRequest):
503
+ request = model_service.ListModelsRequest(request)
504
+
505
+ # If we have keyword arguments corresponding to fields on the
506
+ # request, apply these.
507
+ if page_size is not None:
508
+ request.page_size = page_size
509
+ if page_token is not None:
510
+ request.page_token = page_token
511
+
512
+ # Wrap the RPC method; this adds retry and timeout information,
513
+ # and friendly error handling.
514
+ rpc = self._client._transport._wrapped_methods[
515
+ self._client._transport.list_models
516
+ ]
517
+
518
+ # Validate the universe domain.
519
+ self._client._validate_universe_domain()
520
+
521
+ # Send the request.
522
+ response = await rpc(
523
+ request,
524
+ retry=retry,
525
+ timeout=timeout,
526
+ metadata=metadata,
527
+ )
528
+
529
+ # This method is paged; wrap the response in a pager, which provides
530
+ # an `__aiter__` convenience method.
531
+ response = pagers.ListModelsAsyncPager(
532
+ method=rpc,
533
+ request=request,
534
+ response=response,
535
+ retry=retry,
536
+ timeout=timeout,
537
+ metadata=metadata,
538
+ )
539
+
540
+ # Done; return the response.
541
+ return response
542
+
543
+ async def list_operations(
544
+ self,
545
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
546
+ *,
547
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
548
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
549
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
550
+ ) -> operations_pb2.ListOperationsResponse:
551
+ r"""Lists operations that match the specified filter in the request.
552
+
553
+ Args:
554
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
555
+ The request object. Request message for
556
+ `ListOperations` method.
557
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
558
+ if any, should be retried.
559
+ timeout (float): The timeout for this request.
560
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
561
+ sent along with the request as metadata. Normally, each value must be of type `str`,
562
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
563
+ be of type `bytes`.
564
+ Returns:
565
+ ~.operations_pb2.ListOperationsResponse:
566
+ Response message for ``ListOperations`` method.
567
+ """
568
+ # Create or coerce a protobuf request object.
569
+ # The request isn't a proto-plus wrapped type,
570
+ # so it must be constructed via keyword expansion.
571
+ if isinstance(request, dict):
572
+ request = operations_pb2.ListOperationsRequest(**request)
573
+
574
+ # Wrap the RPC method; this adds retry and timeout information,
575
+ # and friendly error handling.
576
+ rpc = self.transport._wrapped_methods[self._client._transport.list_operations]
577
+
578
+ # Certain fields should be provided within the metadata header;
579
+ # add these here.
580
+ metadata = tuple(metadata) + (
581
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
582
+ )
583
+
584
+ # Validate the universe domain.
585
+ self._client._validate_universe_domain()
586
+
587
+ # Send the request.
588
+ response = await rpc(
589
+ request,
590
+ retry=retry,
591
+ timeout=timeout,
592
+ metadata=metadata,
593
+ )
594
+
595
+ # Done; return the response.
596
+ return response
597
+
598
+ async def get_operation(
599
+ self,
600
+ request: Optional[operations_pb2.GetOperationRequest] = None,
601
+ *,
602
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
603
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
604
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
605
+ ) -> operations_pb2.Operation:
606
+ r"""Gets the latest state of a long-running operation.
607
+
608
+ Args:
609
+ request (:class:`~.operations_pb2.GetOperationRequest`):
610
+ The request object. Request message for
611
+ `GetOperation` method.
612
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
613
+ if any, should be retried.
614
+ timeout (float): The timeout for this request.
615
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
616
+ sent along with the request as metadata. Normally, each value must be of type `str`,
617
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
618
+ be of type `bytes`.
619
+ Returns:
620
+ ~.operations_pb2.Operation:
621
+ An ``Operation`` object.
622
+ """
623
+ # Create or coerce a protobuf request object.
624
+ # The request isn't a proto-plus wrapped type,
625
+ # so it must be constructed via keyword expansion.
626
+ if isinstance(request, dict):
627
+ request = operations_pb2.GetOperationRequest(**request)
628
+
629
+ # Wrap the RPC method; this adds retry and timeout information,
630
+ # and friendly error handling.
631
+ rpc = self.transport._wrapped_methods[self._client._transport.get_operation]
632
+
633
+ # Certain fields should be provided within the metadata header;
634
+ # add these here.
635
+ metadata = tuple(metadata) + (
636
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
637
+ )
638
+
639
+ # Validate the universe domain.
640
+ self._client._validate_universe_domain()
641
+
642
+ # Send the request.
643
+ response = await rpc(
644
+ request,
645
+ retry=retry,
646
+ timeout=timeout,
647
+ metadata=metadata,
648
+ )
649
+
650
+ # Done; return the response.
651
+ return response
652
+
653
+ async def cancel_operation(
654
+ self,
655
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
656
+ *,
657
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
658
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
659
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
660
+ ) -> None:
661
+ r"""Starts asynchronous cancellation on a long-running operation.
662
+
663
+ The server makes a best effort to cancel the operation, but success
664
+ is not guaranteed. If the server doesn't support this method, it returns
665
+ `google.rpc.Code.UNIMPLEMENTED`.
666
+
667
+ Args:
668
+ request (:class:`~.operations_pb2.CancelOperationRequest`):
669
+ The request object. Request message for
670
+ `CancelOperation` method.
671
+ retry (google.api_core.retry_async.AsyncRetry): Designation of what errors,
672
+ if any, should be retried.
673
+ timeout (float): The timeout for this request.
674
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
675
+ sent along with the request as metadata. Normally, each value must be of type `str`,
676
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
677
+ be of type `bytes`.
678
+ Returns:
679
+ None
680
+ """
681
+ # Create or coerce a protobuf request object.
682
+ # The request isn't a proto-plus wrapped type,
683
+ # so it must be constructed via keyword expansion.
684
+ if isinstance(request, dict):
685
+ request = operations_pb2.CancelOperationRequest(**request)
686
+
687
+ # Wrap the RPC method; this adds retry and timeout information,
688
+ # and friendly error handling.
689
+ rpc = self.transport._wrapped_methods[self._client._transport.cancel_operation]
690
+
691
+ # Certain fields should be provided within the metadata header;
692
+ # add these here.
693
+ metadata = tuple(metadata) + (
694
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
695
+ )
696
+
697
+ # Validate the universe domain.
698
+ self._client._validate_universe_domain()
699
+
700
+ # Send the request.
701
+ await rpc(
702
+ request,
703
+ retry=retry,
704
+ timeout=timeout,
705
+ metadata=metadata,
706
+ )
707
+
708
+ async def __aenter__(self) -> "ModelServiceAsyncClient":
709
+ return self
710
+
711
+ async def __aexit__(self, exc_type, exc, tb):
712
+ await self.transport.close()
713
+
714
+
715
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
716
+ gapic_version=package_version.__version__
717
+ )
718
+
719
+
720
+ __all__ = ("ModelServiceAsyncClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/client.py ADDED
@@ -0,0 +1,1107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ import logging as std_logging
18
+ import os
19
+ import re
20
+ from typing import (
21
+ Callable,
22
+ Dict,
23
+ Mapping,
24
+ MutableMapping,
25
+ MutableSequence,
26
+ Optional,
27
+ Sequence,
28
+ Tuple,
29
+ Type,
30
+ Union,
31
+ cast,
32
+ )
33
+ import warnings
34
+
35
+ from google.api_core import client_options as client_options_lib
36
+ from google.api_core import exceptions as core_exceptions
37
+ from google.api_core import gapic_v1
38
+ from google.api_core import retry as retries
39
+ from google.auth import credentials as ga_credentials # type: ignore
40
+ from google.auth.exceptions import MutualTLSChannelError # type: ignore
41
+ from google.auth.transport import mtls # type: ignore
42
+ from google.auth.transport.grpc import SslCredentials # type: ignore
43
+ from google.oauth2 import service_account # type: ignore
44
+
45
+ from google.ai.generativelanguage_v1 import gapic_version as package_version
46
+
47
+ try:
48
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
49
+ except AttributeError: # pragma: NO COVER
50
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
51
+
52
+ try:
53
+ from google.api_core import client_logging # type: ignore
54
+
55
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
56
+ except ImportError: # pragma: NO COVER
57
+ CLIENT_LOGGING_SUPPORTED = False
58
+
59
+ _LOGGER = std_logging.getLogger(__name__)
60
+
61
+ from google.longrunning import operations_pb2 # type: ignore
62
+
63
+ from google.ai.generativelanguage_v1.services.model_service import pagers
64
+ from google.ai.generativelanguage_v1.types import model, model_service
65
+
66
+ from .transports.base import DEFAULT_CLIENT_INFO, ModelServiceTransport
67
+ from .transports.grpc import ModelServiceGrpcTransport
68
+ from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
69
+ from .transports.rest import ModelServiceRestTransport
70
+
71
+
72
+ class ModelServiceClientMeta(type):
73
+ """Metaclass for the ModelService client.
74
+
75
+ This provides class-level methods for building and retrieving
76
+ support objects (e.g. transport) without polluting the client instance
77
+ objects.
78
+ """
79
+
80
+ _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
81
+ _transport_registry["grpc"] = ModelServiceGrpcTransport
82
+ _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
83
+ _transport_registry["rest"] = ModelServiceRestTransport
84
+
85
+ def get_transport_class(
86
+ cls,
87
+ label: Optional[str] = None,
88
+ ) -> Type[ModelServiceTransport]:
89
+ """Returns an appropriate transport class.
90
+
91
+ Args:
92
+ label: The name of the desired transport. If none is
93
+ provided, then the first transport in the registry is used.
94
+
95
+ Returns:
96
+ The transport class to use.
97
+ """
98
+ # If a specific transport is requested, return that one.
99
+ if label:
100
+ return cls._transport_registry[label]
101
+
102
+ # No transport is requested; return the default (that is, the first one
103
+ # in the dictionary).
104
+ return next(iter(cls._transport_registry.values()))
105
+
106
+
107
+ class ModelServiceClient(metaclass=ModelServiceClientMeta):
108
+ """Provides methods for getting metadata information about
109
+ Generative Models.
110
+ """
111
+
112
+ @staticmethod
113
+ def _get_default_mtls_endpoint(api_endpoint):
114
+ """Converts api endpoint to mTLS endpoint.
115
+
116
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
117
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
118
+ Args:
119
+ api_endpoint (Optional[str]): the api endpoint to convert.
120
+ Returns:
121
+ str: converted mTLS api endpoint.
122
+ """
123
+ if not api_endpoint:
124
+ return api_endpoint
125
+
126
+ mtls_endpoint_re = re.compile(
127
+ r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
128
+ )
129
+
130
+ m = mtls_endpoint_re.match(api_endpoint)
131
+ name, mtls, sandbox, googledomain = m.groups()
132
+ if mtls or not googledomain:
133
+ return api_endpoint
134
+
135
+ if sandbox:
136
+ return api_endpoint.replace(
137
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
138
+ )
139
+
140
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
141
+
142
+ # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead.
143
+ DEFAULT_ENDPOINT = "generativelanguage.googleapis.com"
144
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
145
+ DEFAULT_ENDPOINT
146
+ )
147
+
148
+ _DEFAULT_ENDPOINT_TEMPLATE = "generativelanguage.{UNIVERSE_DOMAIN}"
149
+ _DEFAULT_UNIVERSE = "googleapis.com"
150
+
151
+ @classmethod
152
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
153
+ """Creates an instance of this client using the provided credentials
154
+ info.
155
+
156
+ Args:
157
+ info (dict): The service account private key info.
158
+ args: Additional arguments to pass to the constructor.
159
+ kwargs: Additional arguments to pass to the constructor.
160
+
161
+ Returns:
162
+ ModelServiceClient: The constructed client.
163
+ """
164
+ credentials = service_account.Credentials.from_service_account_info(info)
165
+ kwargs["credentials"] = credentials
166
+ return cls(*args, **kwargs)
167
+
168
+ @classmethod
169
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
170
+ """Creates an instance of this client using the provided credentials
171
+ file.
172
+
173
+ Args:
174
+ filename (str): The path to the service account private key json
175
+ file.
176
+ args: Additional arguments to pass to the constructor.
177
+ kwargs: Additional arguments to pass to the constructor.
178
+
179
+ Returns:
180
+ ModelServiceClient: The constructed client.
181
+ """
182
+ credentials = service_account.Credentials.from_service_account_file(filename)
183
+ kwargs["credentials"] = credentials
184
+ return cls(*args, **kwargs)
185
+
186
+ from_service_account_json = from_service_account_file
187
+
188
+ @property
189
+ def transport(self) -> ModelServiceTransport:
190
+ """Returns the transport used by the client instance.
191
+
192
+ Returns:
193
+ ModelServiceTransport: The transport used by the client
194
+ instance.
195
+ """
196
+ return self._transport
197
+
198
+ @staticmethod
199
+ def model_path(
200
+ model: str,
201
+ ) -> str:
202
+ """Returns a fully-qualified model string."""
203
+ return "models/{model}".format(
204
+ model=model,
205
+ )
206
+
207
+ @staticmethod
208
+ def parse_model_path(path: str) -> Dict[str, str]:
209
+ """Parses a model path into its component segments."""
210
+ m = re.match(r"^models/(?P<model>.+?)$", path)
211
+ return m.groupdict() if m else {}
212
+
213
+ @staticmethod
214
+ def common_billing_account_path(
215
+ billing_account: str,
216
+ ) -> str:
217
+ """Returns a fully-qualified billing_account string."""
218
+ return "billingAccounts/{billing_account}".format(
219
+ billing_account=billing_account,
220
+ )
221
+
222
+ @staticmethod
223
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
224
+ """Parse a billing_account path into its component segments."""
225
+ m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
226
+ return m.groupdict() if m else {}
227
+
228
+ @staticmethod
229
+ def common_folder_path(
230
+ folder: str,
231
+ ) -> str:
232
+ """Returns a fully-qualified folder string."""
233
+ return "folders/{folder}".format(
234
+ folder=folder,
235
+ )
236
+
237
+ @staticmethod
238
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
239
+ """Parse a folder path into its component segments."""
240
+ m = re.match(r"^folders/(?P<folder>.+?)$", path)
241
+ return m.groupdict() if m else {}
242
+
243
+ @staticmethod
244
+ def common_organization_path(
245
+ organization: str,
246
+ ) -> str:
247
+ """Returns a fully-qualified organization string."""
248
+ return "organizations/{organization}".format(
249
+ organization=organization,
250
+ )
251
+
252
+ @staticmethod
253
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
254
+ """Parse a organization path into its component segments."""
255
+ m = re.match(r"^organizations/(?P<organization>.+?)$", path)
256
+ return m.groupdict() if m else {}
257
+
258
+ @staticmethod
259
+ def common_project_path(
260
+ project: str,
261
+ ) -> str:
262
+ """Returns a fully-qualified project string."""
263
+ return "projects/{project}".format(
264
+ project=project,
265
+ )
266
+
267
+ @staticmethod
268
+ def parse_common_project_path(path: str) -> Dict[str, str]:
269
+ """Parse a project path into its component segments."""
270
+ m = re.match(r"^projects/(?P<project>.+?)$", path)
271
+ return m.groupdict() if m else {}
272
+
273
+ @staticmethod
274
+ def common_location_path(
275
+ project: str,
276
+ location: str,
277
+ ) -> str:
278
+ """Returns a fully-qualified location string."""
279
+ return "projects/{project}/locations/{location}".format(
280
+ project=project,
281
+ location=location,
282
+ )
283
+
284
+ @staticmethod
285
+ def parse_common_location_path(path: str) -> Dict[str, str]:
286
+ """Parse a location path into its component segments."""
287
+ m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
288
+ return m.groupdict() if m else {}
289
+
290
+ @classmethod
291
+ def get_mtls_endpoint_and_cert_source(
292
+ cls, client_options: Optional[client_options_lib.ClientOptions] = None
293
+ ):
294
+ """Deprecated. Return the API endpoint and client cert source for mutual TLS.
295
+
296
+ The client cert source is determined in the following order:
297
+ (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
298
+ client cert source is None.
299
+ (2) if `client_options.client_cert_source` is provided, use the provided one; if the
300
+ default client cert source exists, use the default one; otherwise the client cert
301
+ source is None.
302
+
303
+ The API endpoint is determined in the following order:
304
+ (1) if `client_options.api_endpoint` if provided, use the provided one.
305
+ (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
306
+ default mTLS endpoint; if the environment variable is "never", use the default API
307
+ endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
308
+ use the default API endpoint.
309
+
310
+ More details can be found at https://google.aip.dev/auth/4114.
311
+
312
+ Args:
313
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
314
+ client. Only the `api_endpoint` and `client_cert_source` properties may be used
315
+ in this method.
316
+
317
+ Returns:
318
+ Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
319
+ client cert source to use.
320
+
321
+ Raises:
322
+ google.auth.exceptions.MutualTLSChannelError: If any errors happen.
323
+ """
324
+
325
+ warnings.warn(
326
+ "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.",
327
+ DeprecationWarning,
328
+ )
329
+ if client_options is None:
330
+ client_options = client_options_lib.ClientOptions()
331
+ use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
332
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
333
+ if use_client_cert not in ("true", "false"):
334
+ raise ValueError(
335
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
336
+ )
337
+ if use_mtls_endpoint not in ("auto", "never", "always"):
338
+ raise MutualTLSChannelError(
339
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
340
+ )
341
+
342
+ # Figure out the client cert source to use.
343
+ client_cert_source = None
344
+ if use_client_cert == "true":
345
+ if client_options.client_cert_source:
346
+ client_cert_source = client_options.client_cert_source
347
+ elif mtls.has_default_client_cert_source():
348
+ client_cert_source = mtls.default_client_cert_source()
349
+
350
+ # Figure out which api endpoint to use.
351
+ if client_options.api_endpoint is not None:
352
+ api_endpoint = client_options.api_endpoint
353
+ elif use_mtls_endpoint == "always" or (
354
+ use_mtls_endpoint == "auto" and client_cert_source
355
+ ):
356
+ api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
357
+ else:
358
+ api_endpoint = cls.DEFAULT_ENDPOINT
359
+
360
+ return api_endpoint, client_cert_source
361
+
362
+ @staticmethod
363
+ def _read_environment_variables():
364
+ """Returns the environment variables used by the client.
365
+
366
+ Returns:
367
+ Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE,
368
+ GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables.
369
+
370
+ Raises:
371
+ ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not
372
+ any of ["true", "false"].
373
+ google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT
374
+ is not any of ["auto", "never", "always"].
375
+ """
376
+ use_client_cert = os.getenv(
377
+ "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"
378
+ ).lower()
379
+ use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower()
380
+ universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN")
381
+ if use_client_cert not in ("true", "false"):
382
+ raise ValueError(
383
+ "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
384
+ )
385
+ if use_mtls_endpoint not in ("auto", "never", "always"):
386
+ raise MutualTLSChannelError(
387
+ "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
388
+ )
389
+ return use_client_cert == "true", use_mtls_endpoint, universe_domain_env
390
+
391
+ @staticmethod
392
+ def _get_client_cert_source(provided_cert_source, use_cert_flag):
393
+ """Return the client cert source to be used by the client.
394
+
395
+ Args:
396
+ provided_cert_source (bytes): The client certificate source provided.
397
+ use_cert_flag (bool): A flag indicating whether to use the client certificate.
398
+
399
+ Returns:
400
+ bytes or None: The client cert source to be used by the client.
401
+ """
402
+ client_cert_source = None
403
+ if use_cert_flag:
404
+ if provided_cert_source:
405
+ client_cert_source = provided_cert_source
406
+ elif mtls.has_default_client_cert_source():
407
+ client_cert_source = mtls.default_client_cert_source()
408
+ return client_cert_source
409
+
410
+ @staticmethod
411
+ def _get_api_endpoint(
412
+ api_override, client_cert_source, universe_domain, use_mtls_endpoint
413
+ ):
414
+ """Return the API endpoint used by the client.
415
+
416
+ Args:
417
+ api_override (str): The API endpoint override. If specified, this is always
418
+ the return value of this function and the other arguments are not used.
419
+ client_cert_source (bytes): The client certificate source used by the client.
420
+ universe_domain (str): The universe domain used by the client.
421
+ use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters.
422
+ Possible values are "always", "auto", or "never".
423
+
424
+ Returns:
425
+ str: The API endpoint to be used by the client.
426
+ """
427
+ if api_override is not None:
428
+ api_endpoint = api_override
429
+ elif use_mtls_endpoint == "always" or (
430
+ use_mtls_endpoint == "auto" and client_cert_source
431
+ ):
432
+ _default_universe = ModelServiceClient._DEFAULT_UNIVERSE
433
+ if universe_domain != _default_universe:
434
+ raise MutualTLSChannelError(
435
+ f"mTLS is not supported in any universe other than {_default_universe}."
436
+ )
437
+ api_endpoint = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
438
+ else:
439
+ api_endpoint = ModelServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format(
440
+ UNIVERSE_DOMAIN=universe_domain
441
+ )
442
+ return api_endpoint
443
+
444
+ @staticmethod
445
+ def _get_universe_domain(
446
+ client_universe_domain: Optional[str], universe_domain_env: Optional[str]
447
+ ) -> str:
448
+ """Return the universe domain used by the client.
449
+
450
+ Args:
451
+ client_universe_domain (Optional[str]): The universe domain configured via the client options.
452
+ universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable.
453
+
454
+ Returns:
455
+ str: The universe domain to be used by the client.
456
+
457
+ Raises:
458
+ ValueError: If the universe domain is an empty string.
459
+ """
460
+ universe_domain = ModelServiceClient._DEFAULT_UNIVERSE
461
+ if client_universe_domain is not None:
462
+ universe_domain = client_universe_domain
463
+ elif universe_domain_env is not None:
464
+ universe_domain = universe_domain_env
465
+ if len(universe_domain.strip()) == 0:
466
+ raise ValueError("Universe Domain cannot be an empty string.")
467
+ return universe_domain
468
+
469
+ def _validate_universe_domain(self):
470
+ """Validates client's and credentials' universe domains are consistent.
471
+
472
+ Returns:
473
+ bool: True iff the configured universe domain is valid.
474
+
475
+ Raises:
476
+ ValueError: If the configured universe domain is not valid.
477
+ """
478
+
479
+ # NOTE (b/349488459): universe validation is disabled until further notice.
480
+ return True
481
+
482
+ @property
483
+ def api_endpoint(self):
484
+ """Return the API endpoint used by the client instance.
485
+
486
+ Returns:
487
+ str: The API endpoint used by the client instance.
488
+ """
489
+ return self._api_endpoint
490
+
491
+ @property
492
+ def universe_domain(self) -> str:
493
+ """Return the universe domain used by the client instance.
494
+
495
+ Returns:
496
+ str: The universe domain used by the client instance.
497
+ """
498
+ return self._universe_domain
499
+
500
+ def __init__(
501
+ self,
502
+ *,
503
+ credentials: Optional[ga_credentials.Credentials] = None,
504
+ transport: Optional[
505
+ Union[str, ModelServiceTransport, Callable[..., ModelServiceTransport]]
506
+ ] = None,
507
+ client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
508
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
509
+ ) -> None:
510
+ """Instantiates the model service client.
511
+
512
+ Args:
513
+ credentials (Optional[google.auth.credentials.Credentials]): The
514
+ authorization credentials to attach to requests. These
515
+ credentials identify the application to the service; if none
516
+ are specified, the client will attempt to ascertain the
517
+ credentials from the environment.
518
+ transport (Optional[Union[str,ModelServiceTransport,Callable[..., ModelServiceTransport]]]):
519
+ The transport to use, or a Callable that constructs and returns a new transport.
520
+ If a Callable is given, it will be called with the same set of initialization
521
+ arguments as used in the ModelServiceTransport constructor.
522
+ If set to None, a transport is chosen automatically.
523
+ client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]):
524
+ Custom options for the client.
525
+
526
+ 1. The ``api_endpoint`` property can be used to override the
527
+ default endpoint provided by the client when ``transport`` is
528
+ not explicitly provided. Only if this property is not set and
529
+ ``transport`` was not explicitly provided, the endpoint is
530
+ determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment
531
+ variable, which have one of the following values:
532
+ "always" (always use the default mTLS endpoint), "never" (always
533
+ use the default regular endpoint) and "auto" (auto-switch to the
534
+ default mTLS endpoint if client certificate is present; this is
535
+ the default value).
536
+
537
+ 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
538
+ is "true", then the ``client_cert_source`` property can be used
539
+ to provide a client certificate for mTLS transport. If
540
+ not provided, the default SSL client certificate will be used if
541
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
542
+ set, no client certificate will be used.
543
+
544
+ 3. The ``universe_domain`` property can be used to override the
545
+ default "googleapis.com" universe. Note that the ``api_endpoint``
546
+ property still takes precedence; and ``universe_domain`` is
547
+ currently not supported for mTLS.
548
+
549
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
550
+ The client info used to send a user-agent string along with
551
+ API requests. If ``None``, then default info will be used.
552
+ Generally, you only need to set this if you're developing
553
+ your own client library.
554
+
555
+ Raises:
556
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
557
+ creation failed for any reason.
558
+ """
559
+ self._client_options = client_options
560
+ if isinstance(self._client_options, dict):
561
+ self._client_options = client_options_lib.from_dict(self._client_options)
562
+ if self._client_options is None:
563
+ self._client_options = client_options_lib.ClientOptions()
564
+ self._client_options = cast(
565
+ client_options_lib.ClientOptions, self._client_options
566
+ )
567
+
568
+ universe_domain_opt = getattr(self._client_options, "universe_domain", None)
569
+
570
+ (
571
+ self._use_client_cert,
572
+ self._use_mtls_endpoint,
573
+ self._universe_domain_env,
574
+ ) = ModelServiceClient._read_environment_variables()
575
+ self._client_cert_source = ModelServiceClient._get_client_cert_source(
576
+ self._client_options.client_cert_source, self._use_client_cert
577
+ )
578
+ self._universe_domain = ModelServiceClient._get_universe_domain(
579
+ universe_domain_opt, self._universe_domain_env
580
+ )
581
+ self._api_endpoint = None # updated below, depending on `transport`
582
+
583
+ # Initialize the universe domain validation.
584
+ self._is_universe_domain_valid = False
585
+
586
+ if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER
587
+ # Setup logging.
588
+ client_logging.initialize_logging()
589
+
590
+ api_key_value = getattr(self._client_options, "api_key", None)
591
+ if api_key_value and credentials:
592
+ raise ValueError(
593
+ "client_options.api_key and credentials are mutually exclusive"
594
+ )
595
+
596
+ # Save or instantiate the transport.
597
+ # Ordinarily, we provide the transport, but allowing a custom transport
598
+ # instance provides an extensibility point for unusual situations.
599
+ transport_provided = isinstance(transport, ModelServiceTransport)
600
+ if transport_provided:
601
+ # transport is a ModelServiceTransport instance.
602
+ if credentials or self._client_options.credentials_file or api_key_value:
603
+ raise ValueError(
604
+ "When providing a transport instance, "
605
+ "provide its credentials directly."
606
+ )
607
+ if self._client_options.scopes:
608
+ raise ValueError(
609
+ "When providing a transport instance, provide its scopes "
610
+ "directly."
611
+ )
612
+ self._transport = cast(ModelServiceTransport, transport)
613
+ self._api_endpoint = self._transport.host
614
+
615
+ self._api_endpoint = self._api_endpoint or ModelServiceClient._get_api_endpoint(
616
+ self._client_options.api_endpoint,
617
+ self._client_cert_source,
618
+ self._universe_domain,
619
+ self._use_mtls_endpoint,
620
+ )
621
+
622
+ if not transport_provided:
623
+ import google.auth._default # type: ignore
624
+
625
+ if api_key_value and hasattr(
626
+ google.auth._default, "get_api_key_credentials"
627
+ ):
628
+ credentials = google.auth._default.get_api_key_credentials(
629
+ api_key_value
630
+ )
631
+
632
+ transport_init: Union[
633
+ Type[ModelServiceTransport], Callable[..., ModelServiceTransport]
634
+ ] = (
635
+ ModelServiceClient.get_transport_class(transport)
636
+ if isinstance(transport, str) or transport is None
637
+ else cast(Callable[..., ModelServiceTransport], transport)
638
+ )
639
+ # initialize with the provided callable or the passed in class
640
+ self._transport = transport_init(
641
+ credentials=credentials,
642
+ credentials_file=self._client_options.credentials_file,
643
+ host=self._api_endpoint,
644
+ scopes=self._client_options.scopes,
645
+ client_cert_source_for_mtls=self._client_cert_source,
646
+ quota_project_id=self._client_options.quota_project_id,
647
+ client_info=client_info,
648
+ always_use_jwt_access=True,
649
+ api_audience=self._client_options.api_audience,
650
+ )
651
+
652
+ if "async" not in str(self._transport):
653
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
654
+ std_logging.DEBUG
655
+ ): # pragma: NO COVER
656
+ _LOGGER.debug(
657
+ "Created client `google.ai.generativelanguage_v1.ModelServiceClient`.",
658
+ extra={
659
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
660
+ "universeDomain": getattr(
661
+ self._transport._credentials, "universe_domain", ""
662
+ ),
663
+ "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}",
664
+ "credentialsInfo": getattr(
665
+ self.transport._credentials, "get_cred_info", lambda: None
666
+ )(),
667
+ }
668
+ if hasattr(self._transport, "_credentials")
669
+ else {
670
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
671
+ "credentialsType": None,
672
+ },
673
+ )
674
+
675
+ def get_model(
676
+ self,
677
+ request: Optional[Union[model_service.GetModelRequest, dict]] = None,
678
+ *,
679
+ name: Optional[str] = None,
680
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
681
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
682
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
683
+ ) -> model.Model:
684
+ r"""Gets information about a specific ``Model`` such as its version
685
+ number, token limits,
686
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
687
+ and other metadata. Refer to the `Gemini models
688
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
689
+ for detailed model information.
690
+
691
+ .. code-block:: python
692
+
693
+ # This snippet has been automatically generated and should be regarded as a
694
+ # code template only.
695
+ # It will require modifications to work:
696
+ # - It may require correct/in-range values for request initialization.
697
+ # - It may require specifying regional endpoints when creating the service
698
+ # client as shown in:
699
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
700
+ from google.ai import generativelanguage_v1
701
+
702
+ def sample_get_model():
703
+ # Create a client
704
+ client = generativelanguage_v1.ModelServiceClient()
705
+
706
+ # Initialize request argument(s)
707
+ request = generativelanguage_v1.GetModelRequest(
708
+ name="name_value",
709
+ )
710
+
711
+ # Make the request
712
+ response = client.get_model(request=request)
713
+
714
+ # Handle the response
715
+ print(response)
716
+
717
+ Args:
718
+ request (Union[google.ai.generativelanguage_v1.types.GetModelRequest, dict]):
719
+ The request object. Request for getting information about
720
+ a specific Model.
721
+ name (str):
722
+ Required. The resource name of the model.
723
+
724
+ This name should match a model name returned by the
725
+ ``ListModels`` method.
726
+
727
+ Format: ``models/{model}``
728
+
729
+ This corresponds to the ``name`` field
730
+ on the ``request`` instance; if ``request`` is provided, this
731
+ should not be set.
732
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
733
+ should be retried.
734
+ timeout (float): The timeout for this request.
735
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
736
+ sent along with the request as metadata. Normally, each value must be of type `str`,
737
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
738
+ be of type `bytes`.
739
+
740
+ Returns:
741
+ google.ai.generativelanguage_v1.types.Model:
742
+ Information about a Generative
743
+ Language Model.
744
+
745
+ """
746
+ # Create or coerce a protobuf request object.
747
+ # - Quick check: If we got a request object, we should *not* have
748
+ # gotten any keyword arguments that map to the request.
749
+ has_flattened_params = any([name])
750
+ if request is not None and has_flattened_params:
751
+ raise ValueError(
752
+ "If the `request` argument is set, then none of "
753
+ "the individual field arguments should be set."
754
+ )
755
+
756
+ # - Use the request object if provided (there's no risk of modifying the input as
757
+ # there are no flattened fields), or create one.
758
+ if not isinstance(request, model_service.GetModelRequest):
759
+ request = model_service.GetModelRequest(request)
760
+ # If we have keyword arguments corresponding to fields on the
761
+ # request, apply these.
762
+ if name is not None:
763
+ request.name = name
764
+
765
+ # Wrap the RPC method; this adds retry and timeout information,
766
+ # and friendly error handling.
767
+ rpc = self._transport._wrapped_methods[self._transport.get_model]
768
+
769
+ # Certain fields should be provided within the metadata header;
770
+ # add these here.
771
+ metadata = tuple(metadata) + (
772
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
773
+ )
774
+
775
+ # Validate the universe domain.
776
+ self._validate_universe_domain()
777
+
778
+ # Send the request.
779
+ response = rpc(
780
+ request,
781
+ retry=retry,
782
+ timeout=timeout,
783
+ metadata=metadata,
784
+ )
785
+
786
+ # Done; return the response.
787
+ return response
788
+
789
+ def list_models(
790
+ self,
791
+ request: Optional[Union[model_service.ListModelsRequest, dict]] = None,
792
+ *,
793
+ page_size: Optional[int] = None,
794
+ page_token: Optional[str] = None,
795
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
796
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
797
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
798
+ ) -> pagers.ListModelsPager:
799
+ r"""Lists the
800
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
801
+ available through the Gemini API.
802
+
803
+ .. code-block:: python
804
+
805
+ # This snippet has been automatically generated and should be regarded as a
806
+ # code template only.
807
+ # It will require modifications to work:
808
+ # - It may require correct/in-range values for request initialization.
809
+ # - It may require specifying regional endpoints when creating the service
810
+ # client as shown in:
811
+ # https://googleapis.dev/python/google-api-core/latest/client_options.html
812
+ from google.ai import generativelanguage_v1
813
+
814
+ def sample_list_models():
815
+ # Create a client
816
+ client = generativelanguage_v1.ModelServiceClient()
817
+
818
+ # Initialize request argument(s)
819
+ request = generativelanguage_v1.ListModelsRequest(
820
+ )
821
+
822
+ # Make the request
823
+ page_result = client.list_models(request=request)
824
+
825
+ # Handle the response
826
+ for response in page_result:
827
+ print(response)
828
+
829
+ Args:
830
+ request (Union[google.ai.generativelanguage_v1.types.ListModelsRequest, dict]):
831
+ The request object. Request for listing all Models.
832
+ page_size (int):
833
+ The maximum number of ``Models`` to return (per page).
834
+
835
+ If unspecified, 50 models will be returned per page.
836
+ This method returns at most 1000 models per page, even
837
+ if you pass a larger page_size.
838
+
839
+ This corresponds to the ``page_size`` field
840
+ on the ``request`` instance; if ``request`` is provided, this
841
+ should not be set.
842
+ page_token (str):
843
+ A page token, received from a previous ``ListModels``
844
+ call.
845
+
846
+ Provide the ``page_token`` returned by one request as an
847
+ argument to the next request to retrieve the next page.
848
+
849
+ When paginating, all other parameters provided to
850
+ ``ListModels`` must match the call that provided the
851
+ page token.
852
+
853
+ This corresponds to the ``page_token`` field
854
+ on the ``request`` instance; if ``request`` is provided, this
855
+ should not be set.
856
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
857
+ should be retried.
858
+ timeout (float): The timeout for this request.
859
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
860
+ sent along with the request as metadata. Normally, each value must be of type `str`,
861
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
862
+ be of type `bytes`.
863
+
864
+ Returns:
865
+ google.ai.generativelanguage_v1.services.model_service.pagers.ListModelsPager:
866
+ Response from ListModel containing a paginated list of
867
+ Models.
868
+
869
+ Iterating over this object will yield results and
870
+ resolve additional pages automatically.
871
+
872
+ """
873
+ # Create or coerce a protobuf request object.
874
+ # - Quick check: If we got a request object, we should *not* have
875
+ # gotten any keyword arguments that map to the request.
876
+ has_flattened_params = any([page_size, page_token])
877
+ if request is not None and has_flattened_params:
878
+ raise ValueError(
879
+ "If the `request` argument is set, then none of "
880
+ "the individual field arguments should be set."
881
+ )
882
+
883
+ # - Use the request object if provided (there's no risk of modifying the input as
884
+ # there are no flattened fields), or create one.
885
+ if not isinstance(request, model_service.ListModelsRequest):
886
+ request = model_service.ListModelsRequest(request)
887
+ # If we have keyword arguments corresponding to fields on the
888
+ # request, apply these.
889
+ if page_size is not None:
890
+ request.page_size = page_size
891
+ if page_token is not None:
892
+ request.page_token = page_token
893
+
894
+ # Wrap the RPC method; this adds retry and timeout information,
895
+ # and friendly error handling.
896
+ rpc = self._transport._wrapped_methods[self._transport.list_models]
897
+
898
+ # Validate the universe domain.
899
+ self._validate_universe_domain()
900
+
901
+ # Send the request.
902
+ response = rpc(
903
+ request,
904
+ retry=retry,
905
+ timeout=timeout,
906
+ metadata=metadata,
907
+ )
908
+
909
+ # This method is paged; wrap the response in a pager, which provides
910
+ # an `__iter__` convenience method.
911
+ response = pagers.ListModelsPager(
912
+ method=rpc,
913
+ request=request,
914
+ response=response,
915
+ retry=retry,
916
+ timeout=timeout,
917
+ metadata=metadata,
918
+ )
919
+
920
+ # Done; return the response.
921
+ return response
922
+
923
+ def __enter__(self) -> "ModelServiceClient":
924
+ return self
925
+
926
+ def __exit__(self, type, value, traceback):
927
+ """Releases underlying transport's resources.
928
+
929
+ .. warning::
930
+ ONLY use as a context manager if the transport is NOT shared
931
+ with other clients! Exiting the with block will CLOSE the transport
932
+ and may cause errors in other clients!
933
+ """
934
+ self.transport.close()
935
+
936
+ def list_operations(
937
+ self,
938
+ request: Optional[operations_pb2.ListOperationsRequest] = None,
939
+ *,
940
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
941
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
942
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
943
+ ) -> operations_pb2.ListOperationsResponse:
944
+ r"""Lists operations that match the specified filter in the request.
945
+
946
+ Args:
947
+ request (:class:`~.operations_pb2.ListOperationsRequest`):
948
+ The request object. Request message for
949
+ `ListOperations` method.
950
+ retry (google.api_core.retry.Retry): Designation of what errors,
951
+ if any, should be retried.
952
+ timeout (float): The timeout for this request.
953
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
954
+ sent along with the request as metadata. Normally, each value must be of type `str`,
955
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
956
+ be of type `bytes`.
957
+ Returns:
958
+ ~.operations_pb2.ListOperationsResponse:
959
+ Response message for ``ListOperations`` method.
960
+ """
961
+ # Create or coerce a protobuf request object.
962
+ # The request isn't a proto-plus wrapped type,
963
+ # so it must be constructed via keyword expansion.
964
+ if isinstance(request, dict):
965
+ request = operations_pb2.ListOperationsRequest(**request)
966
+
967
+ # Wrap the RPC method; this adds retry and timeout information,
968
+ # and friendly error handling.
969
+ rpc = self._transport._wrapped_methods[self._transport.list_operations]
970
+
971
+ # Certain fields should be provided within the metadata header;
972
+ # add these here.
973
+ metadata = tuple(metadata) + (
974
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
975
+ )
976
+
977
+ # Validate the universe domain.
978
+ self._validate_universe_domain()
979
+
980
+ # Send the request.
981
+ response = rpc(
982
+ request,
983
+ retry=retry,
984
+ timeout=timeout,
985
+ metadata=metadata,
986
+ )
987
+
988
+ # Done; return the response.
989
+ return response
990
+
991
+ def get_operation(
992
+ self,
993
+ request: Optional[operations_pb2.GetOperationRequest] = None,
994
+ *,
995
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
996
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
997
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
998
+ ) -> operations_pb2.Operation:
999
+ r"""Gets the latest state of a long-running operation.
1000
+
1001
+ Args:
1002
+ request (:class:`~.operations_pb2.GetOperationRequest`):
1003
+ The request object. Request message for
1004
+ `GetOperation` method.
1005
+ retry (google.api_core.retry.Retry): Designation of what errors,
1006
+ if any, should be retried.
1007
+ timeout (float): The timeout for this request.
1008
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1009
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1010
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1011
+ be of type `bytes`.
1012
+ Returns:
1013
+ ~.operations_pb2.Operation:
1014
+ An ``Operation`` object.
1015
+ """
1016
+ # Create or coerce a protobuf request object.
1017
+ # The request isn't a proto-plus wrapped type,
1018
+ # so it must be constructed via keyword expansion.
1019
+ if isinstance(request, dict):
1020
+ request = operations_pb2.GetOperationRequest(**request)
1021
+
1022
+ # Wrap the RPC method; this adds retry and timeout information,
1023
+ # and friendly error handling.
1024
+ rpc = self._transport._wrapped_methods[self._transport.get_operation]
1025
+
1026
+ # Certain fields should be provided within the metadata header;
1027
+ # add these here.
1028
+ metadata = tuple(metadata) + (
1029
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1030
+ )
1031
+
1032
+ # Validate the universe domain.
1033
+ self._validate_universe_domain()
1034
+
1035
+ # Send the request.
1036
+ response = rpc(
1037
+ request,
1038
+ retry=retry,
1039
+ timeout=timeout,
1040
+ metadata=metadata,
1041
+ )
1042
+
1043
+ # Done; return the response.
1044
+ return response
1045
+
1046
+ def cancel_operation(
1047
+ self,
1048
+ request: Optional[operations_pb2.CancelOperationRequest] = None,
1049
+ *,
1050
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
1051
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
1052
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
1053
+ ) -> None:
1054
+ r"""Starts asynchronous cancellation on a long-running operation.
1055
+
1056
+ The server makes a best effort to cancel the operation, but success
1057
+ is not guaranteed. If the server doesn't support this method, it returns
1058
+ `google.rpc.Code.UNIMPLEMENTED`.
1059
+
1060
+ Args:
1061
+ request (:class:`~.operations_pb2.CancelOperationRequest`):
1062
+ The request object. Request message for
1063
+ `CancelOperation` method.
1064
+ retry (google.api_core.retry.Retry): Designation of what errors,
1065
+ if any, should be retried.
1066
+ timeout (float): The timeout for this request.
1067
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
1068
+ sent along with the request as metadata. Normally, each value must be of type `str`,
1069
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
1070
+ be of type `bytes`.
1071
+ Returns:
1072
+ None
1073
+ """
1074
+ # Create or coerce a protobuf request object.
1075
+ # The request isn't a proto-plus wrapped type,
1076
+ # so it must be constructed via keyword expansion.
1077
+ if isinstance(request, dict):
1078
+ request = operations_pb2.CancelOperationRequest(**request)
1079
+
1080
+ # Wrap the RPC method; this adds retry and timeout information,
1081
+ # and friendly error handling.
1082
+ rpc = self._transport._wrapped_methods[self._transport.cancel_operation]
1083
+
1084
+ # Certain fields should be provided within the metadata header;
1085
+ # add these here.
1086
+ metadata = tuple(metadata) + (
1087
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
1088
+ )
1089
+
1090
+ # Validate the universe domain.
1091
+ self._validate_universe_domain()
1092
+
1093
+ # Send the request.
1094
+ rpc(
1095
+ request,
1096
+ retry=retry,
1097
+ timeout=timeout,
1098
+ metadata=metadata,
1099
+ )
1100
+
1101
+
1102
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
1103
+ gapic_version=package_version.__version__
1104
+ )
1105
+
1106
+
1107
+ __all__ = ("ModelServiceClient",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/pagers.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from typing import (
17
+ Any,
18
+ AsyncIterator,
19
+ Awaitable,
20
+ Callable,
21
+ Iterator,
22
+ Optional,
23
+ Sequence,
24
+ Tuple,
25
+ Union,
26
+ )
27
+
28
+ from google.api_core import gapic_v1
29
+ from google.api_core import retry as retries
30
+ from google.api_core import retry_async as retries_async
31
+
32
+ try:
33
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
34
+ OptionalAsyncRetry = Union[
35
+ retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None
36
+ ]
37
+ except AttributeError: # pragma: NO COVER
38
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
39
+ OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore
40
+
41
+ from google.ai.generativelanguage_v1.types import model, model_service
42
+
43
+
44
+ class ListModelsPager:
45
+ """A pager for iterating through ``list_models`` requests.
46
+
47
+ This class thinly wraps an initial
48
+ :class:`google.ai.generativelanguage_v1.types.ListModelsResponse` object, and
49
+ provides an ``__iter__`` method to iterate through its
50
+ ``models`` field.
51
+
52
+ If there are more pages, the ``__iter__`` method will make additional
53
+ ``ListModels`` requests and continue to iterate
54
+ through the ``models`` field on the
55
+ corresponding responses.
56
+
57
+ All the usual :class:`google.ai.generativelanguage_v1.types.ListModelsResponse`
58
+ attributes are available on the pager. If multiple requests are made, only
59
+ the most recent response is retained, and thus used for attribute lookup.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ method: Callable[..., model_service.ListModelsResponse],
65
+ request: model_service.ListModelsRequest,
66
+ response: model_service.ListModelsResponse,
67
+ *,
68
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
69
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
70
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
71
+ ):
72
+ """Instantiate the pager.
73
+
74
+ Args:
75
+ method (Callable): The method that was originally called, and
76
+ which instantiated this pager.
77
+ request (google.ai.generativelanguage_v1.types.ListModelsRequest):
78
+ The initial request object.
79
+ response (google.ai.generativelanguage_v1.types.ListModelsResponse):
80
+ The initial response object.
81
+ retry (google.api_core.retry.Retry): Designation of what errors,
82
+ if any, should be retried.
83
+ timeout (float): The timeout for this request.
84
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
85
+ sent along with the request as metadata. Normally, each value must be of type `str`,
86
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
87
+ be of type `bytes`.
88
+ """
89
+ self._method = method
90
+ self._request = model_service.ListModelsRequest(request)
91
+ self._response = response
92
+ self._retry = retry
93
+ self._timeout = timeout
94
+ self._metadata = metadata
95
+
96
+ def __getattr__(self, name: str) -> Any:
97
+ return getattr(self._response, name)
98
+
99
+ @property
100
+ def pages(self) -> Iterator[model_service.ListModelsResponse]:
101
+ yield self._response
102
+ while self._response.next_page_token:
103
+ self._request.page_token = self._response.next_page_token
104
+ self._response = self._method(
105
+ self._request,
106
+ retry=self._retry,
107
+ timeout=self._timeout,
108
+ metadata=self._metadata,
109
+ )
110
+ yield self._response
111
+
112
+ def __iter__(self) -> Iterator[model.Model]:
113
+ for page in self.pages:
114
+ yield from page.models
115
+
116
+ def __repr__(self) -> str:
117
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
118
+
119
+
120
+ class ListModelsAsyncPager:
121
+ """A pager for iterating through ``list_models`` requests.
122
+
123
+ This class thinly wraps an initial
124
+ :class:`google.ai.generativelanguage_v1.types.ListModelsResponse` object, and
125
+ provides an ``__aiter__`` method to iterate through its
126
+ ``models`` field.
127
+
128
+ If there are more pages, the ``__aiter__`` method will make additional
129
+ ``ListModels`` requests and continue to iterate
130
+ through the ``models`` field on the
131
+ corresponding responses.
132
+
133
+ All the usual :class:`google.ai.generativelanguage_v1.types.ListModelsResponse`
134
+ attributes are available on the pager. If multiple requests are made, only
135
+ the most recent response is retained, and thus used for attribute lookup.
136
+ """
137
+
138
+ def __init__(
139
+ self,
140
+ method: Callable[..., Awaitable[model_service.ListModelsResponse]],
141
+ request: model_service.ListModelsRequest,
142
+ response: model_service.ListModelsResponse,
143
+ *,
144
+ retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT,
145
+ timeout: Union[float, object] = gapic_v1.method.DEFAULT,
146
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = ()
147
+ ):
148
+ """Instantiates the pager.
149
+
150
+ Args:
151
+ method (Callable): The method that was originally called, and
152
+ which instantiated this pager.
153
+ request (google.ai.generativelanguage_v1.types.ListModelsRequest):
154
+ The initial request object.
155
+ response (google.ai.generativelanguage_v1.types.ListModelsResponse):
156
+ The initial response object.
157
+ retry (google.api_core.retry.AsyncRetry): Designation of what errors,
158
+ if any, should be retried.
159
+ timeout (float): The timeout for this request.
160
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
161
+ sent along with the request as metadata. Normally, each value must be of type `str`,
162
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
163
+ be of type `bytes`.
164
+ """
165
+ self._method = method
166
+ self._request = model_service.ListModelsRequest(request)
167
+ self._response = response
168
+ self._retry = retry
169
+ self._timeout = timeout
170
+ self._metadata = metadata
171
+
172
+ def __getattr__(self, name: str) -> Any:
173
+ return getattr(self._response, name)
174
+
175
+ @property
176
+ async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]:
177
+ yield self._response
178
+ while self._response.next_page_token:
179
+ self._request.page_token = self._response.next_page_token
180
+ self._response = await self._method(
181
+ self._request,
182
+ retry=self._retry,
183
+ timeout=self._timeout,
184
+ metadata=self._metadata,
185
+ )
186
+ yield self._response
187
+
188
+ def __aiter__(self) -> AsyncIterator[model.Model]:
189
+ async def async_generator():
190
+ async for page in self.pages:
191
+ for response in page.models:
192
+ yield response
193
+
194
+ return async_generator()
195
+
196
+ def __repr__(self) -> str:
197
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import ModelServiceTransport
20
+ from .grpc import ModelServiceGrpcTransport
21
+ from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport
22
+ from .rest import ModelServiceRestInterceptor, ModelServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]]
26
+ _transport_registry["grpc"] = ModelServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = ModelServiceRestTransport
29
+
30
+ __all__ = (
31
+ "ModelServiceTransport",
32
+ "ModelServiceGrpcTransport",
33
+ "ModelServiceGrpcAsyncIOTransport",
34
+ "ModelServiceRestTransport",
35
+ "ModelServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (891 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (8.88 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (20 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (22.2 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (40.1 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (12.9 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/base.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+
28
+ from google.ai.generativelanguage_v1 import gapic_version as package_version
29
+ from google.ai.generativelanguage_v1.types import model, model_service
30
+
31
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
32
+ gapic_version=package_version.__version__
33
+ )
34
+
35
+
36
+ class ModelServiceTransport(abc.ABC):
37
+ """Abstract transport class for ModelService."""
38
+
39
+ AUTH_SCOPES = ()
40
+
41
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ host: str = DEFAULT_HOST,
47
+ credentials: Optional[ga_credentials.Credentials] = None,
48
+ credentials_file: Optional[str] = None,
49
+ scopes: Optional[Sequence[str]] = None,
50
+ quota_project_id: Optional[str] = None,
51
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
52
+ always_use_jwt_access: Optional[bool] = False,
53
+ api_audience: Optional[str] = None,
54
+ **kwargs,
55
+ ) -> None:
56
+ """Instantiate the transport.
57
+
58
+ Args:
59
+ host (Optional[str]):
60
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
61
+ credentials (Optional[google.auth.credentials.Credentials]): The
62
+ authorization credentials to attach to requests. These
63
+ credentials identify the application to the service; if none
64
+ are specified, the client will attempt to ascertain the
65
+ credentials from the environment.
66
+ credentials_file (Optional[str]): A file with credentials that can
67
+ be loaded with :func:`google.auth.load_credentials_from_file`.
68
+ This argument is mutually exclusive with credentials.
69
+ scopes (Optional[Sequence[str]]): A list of scopes.
70
+ quota_project_id (Optional[str]): An optional project to use for billing
71
+ and quota.
72
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
73
+ The client info used to send a user-agent string along with
74
+ API requests. If ``None``, then default info will be used.
75
+ Generally, you only need to set this if you're developing
76
+ your own client library.
77
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
78
+ be used for service account credentials.
79
+ """
80
+
81
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
82
+
83
+ # Save the scopes.
84
+ self._scopes = scopes
85
+ if not hasattr(self, "_ignore_credentials"):
86
+ self._ignore_credentials: bool = False
87
+
88
+ # If no credentials are provided, then determine the appropriate
89
+ # defaults.
90
+ if credentials and credentials_file:
91
+ raise core_exceptions.DuplicateCredentialArgs(
92
+ "'credentials_file' and 'credentials' are mutually exclusive"
93
+ )
94
+
95
+ if credentials_file is not None:
96
+ credentials, _ = google.auth.load_credentials_from_file(
97
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
98
+ )
99
+ elif credentials is None and not self._ignore_credentials:
100
+ credentials, _ = google.auth.default(
101
+ **scopes_kwargs, quota_project_id=quota_project_id
102
+ )
103
+ # Don't apply audience if the credentials file passed from user.
104
+ if hasattr(credentials, "with_gdch_audience"):
105
+ credentials = credentials.with_gdch_audience(
106
+ api_audience if api_audience else host
107
+ )
108
+
109
+ # If the credentials are service account credentials, then always try to use self signed JWT.
110
+ if (
111
+ always_use_jwt_access
112
+ and isinstance(credentials, service_account.Credentials)
113
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
114
+ ):
115
+ credentials = credentials.with_always_use_jwt_access(True)
116
+
117
+ # Save the credentials.
118
+ self._credentials = credentials
119
+
120
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
121
+ if ":" not in host:
122
+ host += ":443"
123
+ self._host = host
124
+
125
+ @property
126
+ def host(self):
127
+ return self._host
128
+
129
+ def _prep_wrapped_messages(self, client_info):
130
+ # Precompute the wrapped methods.
131
+ self._wrapped_methods = {
132
+ self.get_model: gapic_v1.method.wrap_method(
133
+ self.get_model,
134
+ default_timeout=None,
135
+ client_info=client_info,
136
+ ),
137
+ self.list_models: gapic_v1.method.wrap_method(
138
+ self.list_models,
139
+ default_timeout=None,
140
+ client_info=client_info,
141
+ ),
142
+ self.cancel_operation: gapic_v1.method.wrap_method(
143
+ self.cancel_operation,
144
+ default_timeout=None,
145
+ client_info=client_info,
146
+ ),
147
+ self.get_operation: gapic_v1.method.wrap_method(
148
+ self.get_operation,
149
+ default_timeout=None,
150
+ client_info=client_info,
151
+ ),
152
+ self.list_operations: gapic_v1.method.wrap_method(
153
+ self.list_operations,
154
+ default_timeout=None,
155
+ client_info=client_info,
156
+ ),
157
+ }
158
+
159
+ def close(self):
160
+ """Closes resources associated with the transport.
161
+
162
+ .. warning::
163
+ Only call this method if the transport is NOT shared
164
+ with other clients - this may cause errors in other clients!
165
+ """
166
+ raise NotImplementedError()
167
+
168
+ @property
169
+ def get_model(
170
+ self,
171
+ ) -> Callable[
172
+ [model_service.GetModelRequest], Union[model.Model, Awaitable[model.Model]]
173
+ ]:
174
+ raise NotImplementedError()
175
+
176
+ @property
177
+ def list_models(
178
+ self,
179
+ ) -> Callable[
180
+ [model_service.ListModelsRequest],
181
+ Union[
182
+ model_service.ListModelsResponse,
183
+ Awaitable[model_service.ListModelsResponse],
184
+ ],
185
+ ]:
186
+ raise NotImplementedError()
187
+
188
+ @property
189
+ def list_operations(
190
+ self,
191
+ ) -> Callable[
192
+ [operations_pb2.ListOperationsRequest],
193
+ Union[
194
+ operations_pb2.ListOperationsResponse,
195
+ Awaitable[operations_pb2.ListOperationsResponse],
196
+ ],
197
+ ]:
198
+ raise NotImplementedError()
199
+
200
+ @property
201
+ def get_operation(
202
+ self,
203
+ ) -> Callable[
204
+ [operations_pb2.GetOperationRequest],
205
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
206
+ ]:
207
+ raise NotImplementedError()
208
+
209
+ @property
210
+ def cancel_operation(
211
+ self,
212
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None,]:
213
+ raise NotImplementedError()
214
+
215
+ @property
216
+ def kind(self) -> str:
217
+ raise NotImplementedError()
218
+
219
+
220
+ __all__ = ("ModelServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/grpc.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json
17
+ import logging as std_logging
18
+ import pickle
19
+ from typing import Callable, Dict, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import gapic_v1, grpc_helpers
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.auth.transport.grpc import SslCredentials # type: ignore
26
+ from google.longrunning import operations_pb2 # type: ignore
27
+ from google.protobuf.json_format import MessageToJson
28
+ import google.protobuf.message
29
+ import grpc # type: ignore
30
+ import proto # type: ignore
31
+
32
+ from google.ai.generativelanguage_v1.types import model, model_service
33
+
34
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
35
+
36
+ try:
37
+ from google.api_core import client_logging # type: ignore
38
+
39
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
40
+ except ImportError: # pragma: NO COVER
41
+ CLIENT_LOGGING_SUPPORTED = False
42
+
43
+ _LOGGER = std_logging.getLogger(__name__)
44
+
45
+
46
+ class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER
47
+ def intercept_unary_unary(self, continuation, client_call_details, request):
48
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
49
+ std_logging.DEBUG
50
+ )
51
+ if logging_enabled: # pragma: NO COVER
52
+ request_metadata = client_call_details.metadata
53
+ if isinstance(request, proto.Message):
54
+ request_payload = type(request).to_json(request)
55
+ elif isinstance(request, google.protobuf.message.Message):
56
+ request_payload = MessageToJson(request)
57
+ else:
58
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
59
+
60
+ request_metadata = {
61
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
62
+ for key, value in request_metadata
63
+ }
64
+ grpc_request = {
65
+ "payload": request_payload,
66
+ "requestMethod": "grpc",
67
+ "metadata": dict(request_metadata),
68
+ }
69
+ _LOGGER.debug(
70
+ f"Sending request for {client_call_details.method}",
71
+ extra={
72
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
73
+ "rpcName": client_call_details.method,
74
+ "request": grpc_request,
75
+ "metadata": grpc_request["metadata"],
76
+ },
77
+ )
78
+
79
+ response = continuation(client_call_details, request)
80
+ if logging_enabled: # pragma: NO COVER
81
+ response_metadata = response.trailing_metadata()
82
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
83
+ metadata = (
84
+ dict([(k, str(v)) for k, v in response_metadata])
85
+ if response_metadata
86
+ else None
87
+ )
88
+ result = response.result()
89
+ if isinstance(result, proto.Message):
90
+ response_payload = type(result).to_json(result)
91
+ elif isinstance(result, google.protobuf.message.Message):
92
+ response_payload = MessageToJson(result)
93
+ else:
94
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
95
+ grpc_response = {
96
+ "payload": response_payload,
97
+ "metadata": metadata,
98
+ "status": "OK",
99
+ }
100
+ _LOGGER.debug(
101
+ f"Received response for {client_call_details.method}.",
102
+ extra={
103
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
104
+ "rpcName": client_call_details.method,
105
+ "response": grpc_response,
106
+ "metadata": grpc_response["metadata"],
107
+ },
108
+ )
109
+ return response
110
+
111
+
112
+ class ModelServiceGrpcTransport(ModelServiceTransport):
113
+ """gRPC backend transport for ModelService.
114
+
115
+ Provides methods for getting metadata information about
116
+ Generative Models.
117
+
118
+ This class defines the same methods as the primary client, so the
119
+ primary client can load the underlying transport implementation
120
+ and call it.
121
+
122
+ It sends protocol buffers over the wire using gRPC (which is built on
123
+ top of HTTP/2); the ``grpcio`` package must be installed.
124
+ """
125
+
126
+ _stubs: Dict[str, Callable]
127
+
128
+ def __init__(
129
+ self,
130
+ *,
131
+ host: str = "generativelanguage.googleapis.com",
132
+ credentials: Optional[ga_credentials.Credentials] = None,
133
+ credentials_file: Optional[str] = None,
134
+ scopes: Optional[Sequence[str]] = None,
135
+ channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None,
136
+ api_mtls_endpoint: Optional[str] = None,
137
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
138
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
139
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
140
+ quota_project_id: Optional[str] = None,
141
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
142
+ always_use_jwt_access: Optional[bool] = False,
143
+ api_audience: Optional[str] = None,
144
+ ) -> None:
145
+ """Instantiate the transport.
146
+
147
+ Args:
148
+ host (Optional[str]):
149
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
150
+ credentials (Optional[google.auth.credentials.Credentials]): The
151
+ authorization credentials to attach to requests. These
152
+ credentials identify the application to the service; if none
153
+ are specified, the client will attempt to ascertain the
154
+ credentials from the environment.
155
+ This argument is ignored if a ``channel`` instance is provided.
156
+ credentials_file (Optional[str]): A file with credentials that can
157
+ be loaded with :func:`google.auth.load_credentials_from_file`.
158
+ This argument is ignored if a ``channel`` instance is provided.
159
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
160
+ ignored if a ``channel`` instance is provided.
161
+ channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]):
162
+ A ``Channel`` instance through which to make calls, or a Callable
163
+ that constructs and returns one. If set to None, ``self.create_channel``
164
+ is used to create the channel. If a Callable is given, it will be called
165
+ with the same arguments as used in ``self.create_channel``.
166
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
167
+ If provided, it overrides the ``host`` argument and tries to create
168
+ a mutual TLS channel with client SSL credentials from
169
+ ``client_cert_source`` or application default SSL credentials.
170
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
171
+ Deprecated. A callback to provide client SSL certificate bytes and
172
+ private key bytes, both in PEM format. It is ignored if
173
+ ``api_mtls_endpoint`` is None.
174
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
175
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
176
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
177
+ A callback to provide client certificate bytes and private key bytes,
178
+ both in PEM format. It is used to configure a mutual TLS channel. It is
179
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
180
+ quota_project_id (Optional[str]): An optional project to use for billing
181
+ and quota.
182
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
183
+ The client info used to send a user-agent string along with
184
+ API requests. If ``None``, then default info will be used.
185
+ Generally, you only need to set this if you're developing
186
+ your own client library.
187
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
188
+ be used for service account credentials.
189
+
190
+ Raises:
191
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
192
+ creation failed for any reason.
193
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
194
+ and ``credentials_file`` are passed.
195
+ """
196
+ self._grpc_channel = None
197
+ self._ssl_channel_credentials = ssl_channel_credentials
198
+ self._stubs: Dict[str, Callable] = {}
199
+
200
+ if api_mtls_endpoint:
201
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
202
+ if client_cert_source:
203
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
204
+
205
+ if isinstance(channel, grpc.Channel):
206
+ # Ignore credentials if a channel was passed.
207
+ credentials = None
208
+ self._ignore_credentials = True
209
+ # If a channel was explicitly provided, set it.
210
+ self._grpc_channel = channel
211
+ self._ssl_channel_credentials = None
212
+
213
+ else:
214
+ if api_mtls_endpoint:
215
+ host = api_mtls_endpoint
216
+
217
+ # Create SSL credentials with client_cert_source or application
218
+ # default SSL credentials.
219
+ if client_cert_source:
220
+ cert, key = client_cert_source()
221
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
222
+ certificate_chain=cert, private_key=key
223
+ )
224
+ else:
225
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
226
+
227
+ else:
228
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
229
+ cert, key = client_cert_source_for_mtls()
230
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
231
+ certificate_chain=cert, private_key=key
232
+ )
233
+
234
+ # The base transport sets the host, credentials and scopes
235
+ super().__init__(
236
+ host=host,
237
+ credentials=credentials,
238
+ credentials_file=credentials_file,
239
+ scopes=scopes,
240
+ quota_project_id=quota_project_id,
241
+ client_info=client_info,
242
+ always_use_jwt_access=always_use_jwt_access,
243
+ api_audience=api_audience,
244
+ )
245
+
246
+ if not self._grpc_channel:
247
+ # initialize with the provided callable or the default channel
248
+ channel_init = channel or type(self).create_channel
249
+ self._grpc_channel = channel_init(
250
+ self._host,
251
+ # use the credentials which are saved
252
+ credentials=self._credentials,
253
+ # Set ``credentials_file`` to ``None`` here as
254
+ # the credentials that we saved earlier should be used.
255
+ credentials_file=None,
256
+ scopes=self._scopes,
257
+ ssl_credentials=self._ssl_channel_credentials,
258
+ quota_project_id=quota_project_id,
259
+ options=[
260
+ ("grpc.max_send_message_length", -1),
261
+ ("grpc.max_receive_message_length", -1),
262
+ ],
263
+ )
264
+
265
+ self._interceptor = _LoggingClientInterceptor()
266
+ self._logged_channel = grpc.intercept_channel(
267
+ self._grpc_channel, self._interceptor
268
+ )
269
+
270
+ # Wrap messages. This must be done after self._logged_channel exists
271
+ self._prep_wrapped_messages(client_info)
272
+
273
+ @classmethod
274
+ def create_channel(
275
+ cls,
276
+ host: str = "generativelanguage.googleapis.com",
277
+ credentials: Optional[ga_credentials.Credentials] = None,
278
+ credentials_file: Optional[str] = None,
279
+ scopes: Optional[Sequence[str]] = None,
280
+ quota_project_id: Optional[str] = None,
281
+ **kwargs,
282
+ ) -> grpc.Channel:
283
+ """Create and return a gRPC channel object.
284
+ Args:
285
+ host (Optional[str]): The host for the channel to use.
286
+ credentials (Optional[~.Credentials]): The
287
+ authorization credentials to attach to requests. These
288
+ credentials identify this application to the service. If
289
+ none are specified, the client will attempt to ascertain
290
+ the credentials from the environment.
291
+ credentials_file (Optional[str]): A file with credentials that can
292
+ be loaded with :func:`google.auth.load_credentials_from_file`.
293
+ This argument is mutually exclusive with credentials.
294
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
295
+ service. These are only used when credentials are not specified and
296
+ are passed to :func:`google.auth.default`.
297
+ quota_project_id (Optional[str]): An optional project to use for billing
298
+ and quota.
299
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
300
+ channel creation.
301
+ Returns:
302
+ grpc.Channel: A gRPC channel object.
303
+
304
+ Raises:
305
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
306
+ and ``credentials_file`` are passed.
307
+ """
308
+
309
+ return grpc_helpers.create_channel(
310
+ host,
311
+ credentials=credentials,
312
+ credentials_file=credentials_file,
313
+ quota_project_id=quota_project_id,
314
+ default_scopes=cls.AUTH_SCOPES,
315
+ scopes=scopes,
316
+ default_host=cls.DEFAULT_HOST,
317
+ **kwargs,
318
+ )
319
+
320
+ @property
321
+ def grpc_channel(self) -> grpc.Channel:
322
+ """Return the channel designed to connect to this service."""
323
+ return self._grpc_channel
324
+
325
+ @property
326
+ def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
327
+ r"""Return a callable for the get model method over gRPC.
328
+
329
+ Gets information about a specific ``Model`` such as its version
330
+ number, token limits,
331
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
332
+ and other metadata. Refer to the `Gemini models
333
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
334
+ for detailed model information.
335
+
336
+ Returns:
337
+ Callable[[~.GetModelRequest],
338
+ ~.Model]:
339
+ A function that, when called, will call the underlying RPC
340
+ on the server.
341
+ """
342
+ # Generate a "stub function" on-the-fly which will actually make
343
+ # the request.
344
+ # gRPC handles serialization and deserialization, so we just need
345
+ # to pass in the functions for each.
346
+ if "get_model" not in self._stubs:
347
+ self._stubs["get_model"] = self._logged_channel.unary_unary(
348
+ "/google.ai.generativelanguage.v1.ModelService/GetModel",
349
+ request_serializer=model_service.GetModelRequest.serialize,
350
+ response_deserializer=model.Model.deserialize,
351
+ )
352
+ return self._stubs["get_model"]
353
+
354
+ @property
355
+ def list_models(
356
+ self,
357
+ ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
358
+ r"""Return a callable for the list models method over gRPC.
359
+
360
+ Lists the
361
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
362
+ available through the Gemini API.
363
+
364
+ Returns:
365
+ Callable[[~.ListModelsRequest],
366
+ ~.ListModelsResponse]:
367
+ A function that, when called, will call the underlying RPC
368
+ on the server.
369
+ """
370
+ # Generate a "stub function" on-the-fly which will actually make
371
+ # the request.
372
+ # gRPC handles serialization and deserialization, so we just need
373
+ # to pass in the functions for each.
374
+ if "list_models" not in self._stubs:
375
+ self._stubs["list_models"] = self._logged_channel.unary_unary(
376
+ "/google.ai.generativelanguage.v1.ModelService/ListModels",
377
+ request_serializer=model_service.ListModelsRequest.serialize,
378
+ response_deserializer=model_service.ListModelsResponse.deserialize,
379
+ )
380
+ return self._stubs["list_models"]
381
+
382
+ def close(self):
383
+ self._logged_channel.close()
384
+
385
+ @property
386
+ def cancel_operation(
387
+ self,
388
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
389
+ r"""Return a callable for the cancel_operation method over gRPC."""
390
+ # Generate a "stub function" on-the-fly which will actually make
391
+ # the request.
392
+ # gRPC handles serialization and deserialization, so we just need
393
+ # to pass in the functions for each.
394
+ if "cancel_operation" not in self._stubs:
395
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
396
+ "/google.longrunning.Operations/CancelOperation",
397
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
398
+ response_deserializer=None,
399
+ )
400
+ return self._stubs["cancel_operation"]
401
+
402
+ @property
403
+ def get_operation(
404
+ self,
405
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
406
+ r"""Return a callable for the get_operation method over gRPC."""
407
+ # Generate a "stub function" on-the-fly which will actually make
408
+ # the request.
409
+ # gRPC handles serialization and deserialization, so we just need
410
+ # to pass in the functions for each.
411
+ if "get_operation" not in self._stubs:
412
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
413
+ "/google.longrunning.Operations/GetOperation",
414
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
415
+ response_deserializer=operations_pb2.Operation.FromString,
416
+ )
417
+ return self._stubs["get_operation"]
418
+
419
+ @property
420
+ def list_operations(
421
+ self,
422
+ ) -> Callable[
423
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
424
+ ]:
425
+ r"""Return a callable for the list_operations method over gRPC."""
426
+ # Generate a "stub function" on-the-fly which will actually make
427
+ # the request.
428
+ # gRPC handles serialization and deserialization, so we just need
429
+ # to pass in the functions for each.
430
+ if "list_operations" not in self._stubs:
431
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
432
+ "/google.longrunning.Operations/ListOperations",
433
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
434
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
435
+ )
436
+ return self._stubs["list_operations"]
437
+
438
+ @property
439
+ def kind(self) -> str:
440
+ return "grpc"
441
+
442
+
443
+ __all__ = ("ModelServiceGrpcTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.longrunning import operations_pb2 # type: ignore
29
+ from google.protobuf.json_format import MessageToJson
30
+ import google.protobuf.message
31
+ import grpc # type: ignore
32
+ from grpc.experimental import aio # type: ignore
33
+ import proto # type: ignore
34
+
35
+ from google.ai.generativelanguage_v1.types import model, model_service
36
+
37
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
38
+ from .grpc import ModelServiceGrpcTransport
39
+
40
+ try:
41
+ from google.api_core import client_logging # type: ignore
42
+
43
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
44
+ except ImportError: # pragma: NO COVER
45
+ CLIENT_LOGGING_SUPPORTED = False
46
+
47
+ _LOGGER = std_logging.getLogger(__name__)
48
+
49
+
50
+ class _LoggingClientAIOInterceptor(
51
+ grpc.aio.UnaryUnaryClientInterceptor
52
+ ): # pragma: NO COVER
53
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
54
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
55
+ std_logging.DEBUG
56
+ )
57
+ if logging_enabled: # pragma: NO COVER
58
+ request_metadata = client_call_details.metadata
59
+ if isinstance(request, proto.Message):
60
+ request_payload = type(request).to_json(request)
61
+ elif isinstance(request, google.protobuf.message.Message):
62
+ request_payload = MessageToJson(request)
63
+ else:
64
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
65
+
66
+ request_metadata = {
67
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
68
+ for key, value in request_metadata
69
+ }
70
+ grpc_request = {
71
+ "payload": request_payload,
72
+ "requestMethod": "grpc",
73
+ "metadata": dict(request_metadata),
74
+ }
75
+ _LOGGER.debug(
76
+ f"Sending request for {client_call_details.method}",
77
+ extra={
78
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
79
+ "rpcName": str(client_call_details.method),
80
+ "request": grpc_request,
81
+ "metadata": grpc_request["metadata"],
82
+ },
83
+ )
84
+ response = await continuation(client_call_details, request)
85
+ if logging_enabled: # pragma: NO COVER
86
+ response_metadata = await response.trailing_metadata()
87
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
88
+ metadata = (
89
+ dict([(k, str(v)) for k, v in response_metadata])
90
+ if response_metadata
91
+ else None
92
+ )
93
+ result = await response
94
+ if isinstance(result, proto.Message):
95
+ response_payload = type(result).to_json(result)
96
+ elif isinstance(result, google.protobuf.message.Message):
97
+ response_payload = MessageToJson(result)
98
+ else:
99
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
100
+ grpc_response = {
101
+ "payload": response_payload,
102
+ "metadata": metadata,
103
+ "status": "OK",
104
+ }
105
+ _LOGGER.debug(
106
+ f"Received response to rpc {client_call_details.method}.",
107
+ extra={
108
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
109
+ "rpcName": str(client_call_details.method),
110
+ "response": grpc_response,
111
+ "metadata": grpc_response["metadata"],
112
+ },
113
+ )
114
+ return response
115
+
116
+
117
+ class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport):
118
+ """gRPC AsyncIO backend transport for ModelService.
119
+
120
+ Provides methods for getting metadata information about
121
+ Generative Models.
122
+
123
+ This class defines the same methods as the primary client, so the
124
+ primary client can load the underlying transport implementation
125
+ and call it.
126
+
127
+ It sends protocol buffers over the wire using gRPC (which is built on
128
+ top of HTTP/2); the ``grpcio`` package must be installed.
129
+ """
130
+
131
+ _grpc_channel: aio.Channel
132
+ _stubs: Dict[str, Callable] = {}
133
+
134
+ @classmethod
135
+ def create_channel(
136
+ cls,
137
+ host: str = "generativelanguage.googleapis.com",
138
+ credentials: Optional[ga_credentials.Credentials] = None,
139
+ credentials_file: Optional[str] = None,
140
+ scopes: Optional[Sequence[str]] = None,
141
+ quota_project_id: Optional[str] = None,
142
+ **kwargs,
143
+ ) -> aio.Channel:
144
+ """Create and return a gRPC AsyncIO channel object.
145
+ Args:
146
+ host (Optional[str]): The host for the channel to use.
147
+ credentials (Optional[~.Credentials]): The
148
+ authorization credentials to attach to requests. These
149
+ credentials identify this application to the service. If
150
+ none are specified, the client will attempt to ascertain
151
+ the credentials from the environment.
152
+ credentials_file (Optional[str]): A file with credentials that can
153
+ be loaded with :func:`google.auth.load_credentials_from_file`.
154
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
155
+ service. These are only used when credentials are not specified and
156
+ are passed to :func:`google.auth.default`.
157
+ quota_project_id (Optional[str]): An optional project to use for billing
158
+ and quota.
159
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
160
+ channel creation.
161
+ Returns:
162
+ aio.Channel: A gRPC AsyncIO channel object.
163
+ """
164
+
165
+ return grpc_helpers_async.create_channel(
166
+ host,
167
+ credentials=credentials,
168
+ credentials_file=credentials_file,
169
+ quota_project_id=quota_project_id,
170
+ default_scopes=cls.AUTH_SCOPES,
171
+ scopes=scopes,
172
+ default_host=cls.DEFAULT_HOST,
173
+ **kwargs,
174
+ )
175
+
176
+ def __init__(
177
+ self,
178
+ *,
179
+ host: str = "generativelanguage.googleapis.com",
180
+ credentials: Optional[ga_credentials.Credentials] = None,
181
+ credentials_file: Optional[str] = None,
182
+ scopes: Optional[Sequence[str]] = None,
183
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
184
+ api_mtls_endpoint: Optional[str] = None,
185
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
186
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
187
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
188
+ quota_project_id: Optional[str] = None,
189
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
190
+ always_use_jwt_access: Optional[bool] = False,
191
+ api_audience: Optional[str] = None,
192
+ ) -> None:
193
+ """Instantiate the transport.
194
+
195
+ Args:
196
+ host (Optional[str]):
197
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
198
+ credentials (Optional[google.auth.credentials.Credentials]): The
199
+ authorization credentials to attach to requests. These
200
+ credentials identify the application to the service; if none
201
+ are specified, the client will attempt to ascertain the
202
+ credentials from the environment.
203
+ This argument is ignored if a ``channel`` instance is provided.
204
+ credentials_file (Optional[str]): A file with credentials that can
205
+ be loaded with :func:`google.auth.load_credentials_from_file`.
206
+ This argument is ignored if a ``channel`` instance is provided.
207
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
208
+ service. These are only used when credentials are not specified and
209
+ are passed to :func:`google.auth.default`.
210
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
211
+ A ``Channel`` instance through which to make calls, or a Callable
212
+ that constructs and returns one. If set to None, ``self.create_channel``
213
+ is used to create the channel. If a Callable is given, it will be called
214
+ with the same arguments as used in ``self.create_channel``.
215
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
216
+ If provided, it overrides the ``host`` argument and tries to create
217
+ a mutual TLS channel with client SSL credentials from
218
+ ``client_cert_source`` or application default SSL credentials.
219
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
220
+ Deprecated. A callback to provide client SSL certificate bytes and
221
+ private key bytes, both in PEM format. It is ignored if
222
+ ``api_mtls_endpoint`` is None.
223
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
224
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
225
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
226
+ A callback to provide client certificate bytes and private key bytes,
227
+ both in PEM format. It is used to configure a mutual TLS channel. It is
228
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
229
+ quota_project_id (Optional[str]): An optional project to use for billing
230
+ and quota.
231
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
232
+ The client info used to send a user-agent string along with
233
+ API requests. If ``None``, then default info will be used.
234
+ Generally, you only need to set this if you're developing
235
+ your own client library.
236
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
237
+ be used for service account credentials.
238
+
239
+ Raises:
240
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
241
+ creation failed for any reason.
242
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
243
+ and ``credentials_file`` are passed.
244
+ """
245
+ self._grpc_channel = None
246
+ self._ssl_channel_credentials = ssl_channel_credentials
247
+ self._stubs: Dict[str, Callable] = {}
248
+
249
+ if api_mtls_endpoint:
250
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
251
+ if client_cert_source:
252
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
253
+
254
+ if isinstance(channel, aio.Channel):
255
+ # Ignore credentials if a channel was passed.
256
+ credentials = None
257
+ self._ignore_credentials = True
258
+ # If a channel was explicitly provided, set it.
259
+ self._grpc_channel = channel
260
+ self._ssl_channel_credentials = None
261
+ else:
262
+ if api_mtls_endpoint:
263
+ host = api_mtls_endpoint
264
+
265
+ # Create SSL credentials with client_cert_source or application
266
+ # default SSL credentials.
267
+ if client_cert_source:
268
+ cert, key = client_cert_source()
269
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
270
+ certificate_chain=cert, private_key=key
271
+ )
272
+ else:
273
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
274
+
275
+ else:
276
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
277
+ cert, key = client_cert_source_for_mtls()
278
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
279
+ certificate_chain=cert, private_key=key
280
+ )
281
+
282
+ # The base transport sets the host, credentials and scopes
283
+ super().__init__(
284
+ host=host,
285
+ credentials=credentials,
286
+ credentials_file=credentials_file,
287
+ scopes=scopes,
288
+ quota_project_id=quota_project_id,
289
+ client_info=client_info,
290
+ always_use_jwt_access=always_use_jwt_access,
291
+ api_audience=api_audience,
292
+ )
293
+
294
+ if not self._grpc_channel:
295
+ # initialize with the provided callable or the default channel
296
+ channel_init = channel or type(self).create_channel
297
+ self._grpc_channel = channel_init(
298
+ self._host,
299
+ # use the credentials which are saved
300
+ credentials=self._credentials,
301
+ # Set ``credentials_file`` to ``None`` here as
302
+ # the credentials that we saved earlier should be used.
303
+ credentials_file=None,
304
+ scopes=self._scopes,
305
+ ssl_credentials=self._ssl_channel_credentials,
306
+ quota_project_id=quota_project_id,
307
+ options=[
308
+ ("grpc.max_send_message_length", -1),
309
+ ("grpc.max_receive_message_length", -1),
310
+ ],
311
+ )
312
+
313
+ self._interceptor = _LoggingClientAIOInterceptor()
314
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
315
+ self._logged_channel = self._grpc_channel
316
+ self._wrap_with_kind = (
317
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
318
+ )
319
+ # Wrap messages. This must be done after self._logged_channel exists
320
+ self._prep_wrapped_messages(client_info)
321
+
322
+ @property
323
+ def grpc_channel(self) -> aio.Channel:
324
+ """Create the channel designed to connect to this service.
325
+
326
+ This property caches on the instance; repeated calls return
327
+ the same channel.
328
+ """
329
+ # Return the channel from cache.
330
+ return self._grpc_channel
331
+
332
+ @property
333
+ def get_model(
334
+ self,
335
+ ) -> Callable[[model_service.GetModelRequest], Awaitable[model.Model]]:
336
+ r"""Return a callable for the get model method over gRPC.
337
+
338
+ Gets information about a specific ``Model`` such as its version
339
+ number, token limits,
340
+ `parameters <https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters>`__
341
+ and other metadata. Refer to the `Gemini models
342
+ guide <https://ai.google.dev/gemini-api/docs/models/gemini>`__
343
+ for detailed model information.
344
+
345
+ Returns:
346
+ Callable[[~.GetModelRequest],
347
+ Awaitable[~.Model]]:
348
+ A function that, when called, will call the underlying RPC
349
+ on the server.
350
+ """
351
+ # Generate a "stub function" on-the-fly which will actually make
352
+ # the request.
353
+ # gRPC handles serialization and deserialization, so we just need
354
+ # to pass in the functions for each.
355
+ if "get_model" not in self._stubs:
356
+ self._stubs["get_model"] = self._logged_channel.unary_unary(
357
+ "/google.ai.generativelanguage.v1.ModelService/GetModel",
358
+ request_serializer=model_service.GetModelRequest.serialize,
359
+ response_deserializer=model.Model.deserialize,
360
+ )
361
+ return self._stubs["get_model"]
362
+
363
+ @property
364
+ def list_models(
365
+ self,
366
+ ) -> Callable[
367
+ [model_service.ListModelsRequest], Awaitable[model_service.ListModelsResponse]
368
+ ]:
369
+ r"""Return a callable for the list models method over gRPC.
370
+
371
+ Lists the
372
+ ```Model``\ s <https://ai.google.dev/gemini-api/docs/models/gemini>`__
373
+ available through the Gemini API.
374
+
375
+ Returns:
376
+ Callable[[~.ListModelsRequest],
377
+ Awaitable[~.ListModelsResponse]]:
378
+ A function that, when called, will call the underlying RPC
379
+ on the server.
380
+ """
381
+ # Generate a "stub function" on-the-fly which will actually make
382
+ # the request.
383
+ # gRPC handles serialization and deserialization, so we just need
384
+ # to pass in the functions for each.
385
+ if "list_models" not in self._stubs:
386
+ self._stubs["list_models"] = self._logged_channel.unary_unary(
387
+ "/google.ai.generativelanguage.v1.ModelService/ListModels",
388
+ request_serializer=model_service.ListModelsRequest.serialize,
389
+ response_deserializer=model_service.ListModelsResponse.deserialize,
390
+ )
391
+ return self._stubs["list_models"]
392
+
393
+ def _prep_wrapped_messages(self, client_info):
394
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
395
+ self._wrapped_methods = {
396
+ self.get_model: self._wrap_method(
397
+ self.get_model,
398
+ default_timeout=None,
399
+ client_info=client_info,
400
+ ),
401
+ self.list_models: self._wrap_method(
402
+ self.list_models,
403
+ default_timeout=None,
404
+ client_info=client_info,
405
+ ),
406
+ self.cancel_operation: self._wrap_method(
407
+ self.cancel_operation,
408
+ default_timeout=None,
409
+ client_info=client_info,
410
+ ),
411
+ self.get_operation: self._wrap_method(
412
+ self.get_operation,
413
+ default_timeout=None,
414
+ client_info=client_info,
415
+ ),
416
+ self.list_operations: self._wrap_method(
417
+ self.list_operations,
418
+ default_timeout=None,
419
+ client_info=client_info,
420
+ ),
421
+ }
422
+
423
+ def _wrap_method(self, func, *args, **kwargs):
424
+ if self._wrap_with_kind: # pragma: NO COVER
425
+ kwargs["kind"] = self.kind
426
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
427
+
428
+ def close(self):
429
+ return self._logged_channel.close()
430
+
431
+ @property
432
+ def kind(self) -> str:
433
+ return "grpc_asyncio"
434
+
435
+ @property
436
+ def cancel_operation(
437
+ self,
438
+ ) -> Callable[[operations_pb2.CancelOperationRequest], None]:
439
+ r"""Return a callable for the cancel_operation method over gRPC."""
440
+ # Generate a "stub function" on-the-fly which will actually make
441
+ # the request.
442
+ # gRPC handles serialization and deserialization, so we just need
443
+ # to pass in the functions for each.
444
+ if "cancel_operation" not in self._stubs:
445
+ self._stubs["cancel_operation"] = self._logged_channel.unary_unary(
446
+ "/google.longrunning.Operations/CancelOperation",
447
+ request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
448
+ response_deserializer=None,
449
+ )
450
+ return self._stubs["cancel_operation"]
451
+
452
+ @property
453
+ def get_operation(
454
+ self,
455
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
456
+ r"""Return a callable for the get_operation method over gRPC."""
457
+ # Generate a "stub function" on-the-fly which will actually make
458
+ # the request.
459
+ # gRPC handles serialization and deserialization, so we just need
460
+ # to pass in the functions for each.
461
+ if "get_operation" not in self._stubs:
462
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
463
+ "/google.longrunning.Operations/GetOperation",
464
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
465
+ response_deserializer=operations_pb2.Operation.FromString,
466
+ )
467
+ return self._stubs["get_operation"]
468
+
469
+ @property
470
+ def list_operations(
471
+ self,
472
+ ) -> Callable[
473
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
474
+ ]:
475
+ r"""Return a callable for the list_operations method over gRPC."""
476
+ # Generate a "stub function" on-the-fly which will actually make
477
+ # the request.
478
+ # gRPC handles serialization and deserialization, so we just need
479
+ # to pass in the functions for each.
480
+ if "list_operations" not in self._stubs:
481
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
482
+ "/google.longrunning.Operations/ListOperations",
483
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
484
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
485
+ )
486
+ return self._stubs["list_operations"]
487
+
488
+
489
+ __all__ = ("ModelServiceGrpcAsyncIOTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/rest.py ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import dataclasses
17
+ import json # type: ignore
18
+ import logging
19
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
20
+ import warnings
21
+
22
+ from google.api_core import exceptions as core_exceptions
23
+ from google.api_core import gapic_v1, rest_helpers, rest_streaming
24
+ from google.api_core import retry as retries
25
+ from google.auth import credentials as ga_credentials # type: ignore
26
+ from google.auth.transport.requests import AuthorizedSession # type: ignore
27
+ from google.longrunning import operations_pb2 # type: ignore
28
+ from google.protobuf import json_format
29
+ from requests import __version__ as requests_version
30
+
31
+ from google.ai.generativelanguage_v1.types import model, model_service
32
+
33
+ from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
34
+ from .rest_base import _BaseModelServiceRestTransport
35
+
36
+ try:
37
+ OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None]
38
+ except AttributeError: # pragma: NO COVER
39
+ OptionalRetry = Union[retries.Retry, object, None] # type: ignore
40
+
41
+ try:
42
+ from google.api_core import client_logging # type: ignore
43
+
44
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
45
+ except ImportError: # pragma: NO COVER
46
+ CLIENT_LOGGING_SUPPORTED = False
47
+
48
+ _LOGGER = logging.getLogger(__name__)
49
+
50
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
51
+ gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
52
+ grpc_version=None,
53
+ rest_version=f"requests@{requests_version}",
54
+ )
55
+
56
+
57
+ class ModelServiceRestInterceptor:
58
+ """Interceptor for ModelService.
59
+
60
+ Interceptors are used to manipulate requests, request metadata, and responses
61
+ in arbitrary ways.
62
+ Example use cases include:
63
+ * Logging
64
+ * Verifying requests according to service or custom semantics
65
+ * Stripping extraneous information from responses
66
+
67
+ These use cases and more can be enabled by injecting an
68
+ instance of a custom subclass when constructing the ModelServiceRestTransport.
69
+
70
+ .. code-block:: python
71
+ class MyCustomModelServiceInterceptor(ModelServiceRestInterceptor):
72
+ def pre_get_model(self, request, metadata):
73
+ logging.log(f"Received request: {request}")
74
+ return request, metadata
75
+
76
+ def post_get_model(self, response):
77
+ logging.log(f"Received response: {response}")
78
+ return response
79
+
80
+ def pre_list_models(self, request, metadata):
81
+ logging.log(f"Received request: {request}")
82
+ return request, metadata
83
+
84
+ def post_list_models(self, response):
85
+ logging.log(f"Received response: {response}")
86
+ return response
87
+
88
+ transport = ModelServiceRestTransport(interceptor=MyCustomModelServiceInterceptor())
89
+ client = ModelServiceClient(transport=transport)
90
+
91
+
92
+ """
93
+
94
+ def pre_get_model(
95
+ self,
96
+ request: model_service.GetModelRequest,
97
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
98
+ ) -> Tuple[model_service.GetModelRequest, Sequence[Tuple[str, Union[str, bytes]]]]:
99
+ """Pre-rpc interceptor for get_model
100
+
101
+ Override in a subclass to manipulate the request or metadata
102
+ before they are sent to the ModelService server.
103
+ """
104
+ return request, metadata
105
+
106
+ def post_get_model(self, response: model.Model) -> model.Model:
107
+ """Post-rpc interceptor for get_model
108
+
109
+ Override in a subclass to manipulate the response
110
+ after it is returned by the ModelService server but before
111
+ it is returned to user code.
112
+ """
113
+ return response
114
+
115
+ def pre_list_models(
116
+ self,
117
+ request: model_service.ListModelsRequest,
118
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
119
+ ) -> Tuple[
120
+ model_service.ListModelsRequest, Sequence[Tuple[str, Union[str, bytes]]]
121
+ ]:
122
+ """Pre-rpc interceptor for list_models
123
+
124
+ Override in a subclass to manipulate the request or metadata
125
+ before they are sent to the ModelService server.
126
+ """
127
+ return request, metadata
128
+
129
+ def post_list_models(
130
+ self, response: model_service.ListModelsResponse
131
+ ) -> model_service.ListModelsResponse:
132
+ """Post-rpc interceptor for list_models
133
+
134
+ Override in a subclass to manipulate the response
135
+ after it is returned by the ModelService server but before
136
+ it is returned to user code.
137
+ """
138
+ return response
139
+
140
+ def pre_cancel_operation(
141
+ self,
142
+ request: operations_pb2.CancelOperationRequest,
143
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
144
+ ) -> Tuple[
145
+ operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
146
+ ]:
147
+ """Pre-rpc interceptor for cancel_operation
148
+
149
+ Override in a subclass to manipulate the request or metadata
150
+ before they are sent to the ModelService server.
151
+ """
152
+ return request, metadata
153
+
154
+ def post_cancel_operation(self, response: None) -> None:
155
+ """Post-rpc interceptor for cancel_operation
156
+
157
+ Override in a subclass to manipulate the response
158
+ after it is returned by the ModelService server but before
159
+ it is returned to user code.
160
+ """
161
+ return response
162
+
163
+ def pre_get_operation(
164
+ self,
165
+ request: operations_pb2.GetOperationRequest,
166
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
167
+ ) -> Tuple[
168
+ operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]]
169
+ ]:
170
+ """Pre-rpc interceptor for get_operation
171
+
172
+ Override in a subclass to manipulate the request or metadata
173
+ before they are sent to the ModelService server.
174
+ """
175
+ return request, metadata
176
+
177
+ def post_get_operation(
178
+ self, response: operations_pb2.Operation
179
+ ) -> operations_pb2.Operation:
180
+ """Post-rpc interceptor for get_operation
181
+
182
+ Override in a subclass to manipulate the response
183
+ after it is returned by the ModelService server but before
184
+ it is returned to user code.
185
+ """
186
+ return response
187
+
188
+ def pre_list_operations(
189
+ self,
190
+ request: operations_pb2.ListOperationsRequest,
191
+ metadata: Sequence[Tuple[str, Union[str, bytes]]],
192
+ ) -> Tuple[
193
+ operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]]
194
+ ]:
195
+ """Pre-rpc interceptor for list_operations
196
+
197
+ Override in a subclass to manipulate the request or metadata
198
+ before they are sent to the ModelService server.
199
+ """
200
+ return request, metadata
201
+
202
+ def post_list_operations(
203
+ self, response: operations_pb2.ListOperationsResponse
204
+ ) -> operations_pb2.ListOperationsResponse:
205
+ """Post-rpc interceptor for list_operations
206
+
207
+ Override in a subclass to manipulate the response
208
+ after it is returned by the ModelService server but before
209
+ it is returned to user code.
210
+ """
211
+ return response
212
+
213
+
214
+ @dataclasses.dataclass
215
+ class ModelServiceRestStub:
216
+ _session: AuthorizedSession
217
+ _host: str
218
+ _interceptor: ModelServiceRestInterceptor
219
+
220
+
221
+ class ModelServiceRestTransport(_BaseModelServiceRestTransport):
222
+ """REST backend synchronous transport for ModelService.
223
+
224
+ Provides methods for getting metadata information about
225
+ Generative Models.
226
+
227
+ This class defines the same methods as the primary client, so the
228
+ primary client can load the underlying transport implementation
229
+ and call it.
230
+
231
+ It sends JSON representations of protocol buffers over HTTP/1.1
232
+ """
233
+
234
+ def __init__(
235
+ self,
236
+ *,
237
+ host: str = "generativelanguage.googleapis.com",
238
+ credentials: Optional[ga_credentials.Credentials] = None,
239
+ credentials_file: Optional[str] = None,
240
+ scopes: Optional[Sequence[str]] = None,
241
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
242
+ quota_project_id: Optional[str] = None,
243
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
244
+ always_use_jwt_access: Optional[bool] = False,
245
+ url_scheme: str = "https",
246
+ interceptor: Optional[ModelServiceRestInterceptor] = None,
247
+ api_audience: Optional[str] = None,
248
+ ) -> None:
249
+ """Instantiate the transport.
250
+
251
+ Args:
252
+ host (Optional[str]):
253
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
254
+ credentials (Optional[google.auth.credentials.Credentials]): The
255
+ authorization credentials to attach to requests. These
256
+ credentials identify the application to the service; if none
257
+ are specified, the client will attempt to ascertain the
258
+ credentials from the environment.
259
+
260
+ credentials_file (Optional[str]): A file with credentials that can
261
+ be loaded with :func:`google.auth.load_credentials_from_file`.
262
+ This argument is ignored if ``channel`` is provided.
263
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
264
+ ignored if ``channel`` is provided.
265
+ client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
266
+ certificate to configure mutual TLS HTTP channel. It is ignored
267
+ if ``channel`` is provided.
268
+ quota_project_id (Optional[str]): An optional project to use for billing
269
+ and quota.
270
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
271
+ The client info used to send a user-agent string along with
272
+ API requests. If ``None``, then default info will be used.
273
+ Generally, you only need to set this if you are developing
274
+ your own client library.
275
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
276
+ be used for service account credentials.
277
+ url_scheme: the protocol scheme for the API endpoint. Normally
278
+ "https", but for testing or local servers,
279
+ "http" can be specified.
280
+ """
281
+ # Run the base constructor
282
+ # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
283
+ # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
284
+ # credentials object
285
+ super().__init__(
286
+ host=host,
287
+ credentials=credentials,
288
+ client_info=client_info,
289
+ always_use_jwt_access=always_use_jwt_access,
290
+ url_scheme=url_scheme,
291
+ api_audience=api_audience,
292
+ )
293
+ self._session = AuthorizedSession(
294
+ self._credentials, default_host=self.DEFAULT_HOST
295
+ )
296
+ if client_cert_source_for_mtls:
297
+ self._session.configure_mtls_channel(client_cert_source_for_mtls)
298
+ self._interceptor = interceptor or ModelServiceRestInterceptor()
299
+ self._prep_wrapped_messages(client_info)
300
+
301
+ class _GetModel(_BaseModelServiceRestTransport._BaseGetModel, ModelServiceRestStub):
302
+ def __hash__(self):
303
+ return hash("ModelServiceRestTransport.GetModel")
304
+
305
+ @staticmethod
306
+ def _get_response(
307
+ host,
308
+ metadata,
309
+ query_params,
310
+ session,
311
+ timeout,
312
+ transcoded_request,
313
+ body=None,
314
+ ):
315
+ uri = transcoded_request["uri"]
316
+ method = transcoded_request["method"]
317
+ headers = dict(metadata)
318
+ headers["Content-Type"] = "application/json"
319
+ response = getattr(session, method)(
320
+ "{host}{uri}".format(host=host, uri=uri),
321
+ timeout=timeout,
322
+ headers=headers,
323
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
324
+ )
325
+ return response
326
+
327
+ def __call__(
328
+ self,
329
+ request: model_service.GetModelRequest,
330
+ *,
331
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
332
+ timeout: Optional[float] = None,
333
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
334
+ ) -> model.Model:
335
+ r"""Call the get model method over HTTP.
336
+
337
+ Args:
338
+ request (~.model_service.GetModelRequest):
339
+ The request object. Request for getting information about
340
+ a specific Model.
341
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
342
+ should be retried.
343
+ timeout (float): The timeout for this request.
344
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
345
+ sent along with the request as metadata. Normally, each value must be of type `str`,
346
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
347
+ be of type `bytes`.
348
+
349
+ Returns:
350
+ ~.model.Model:
351
+ Information about a Generative
352
+ Language Model.
353
+
354
+ """
355
+
356
+ http_options = (
357
+ _BaseModelServiceRestTransport._BaseGetModel._get_http_options()
358
+ )
359
+
360
+ request, metadata = self._interceptor.pre_get_model(request, metadata)
361
+ transcoded_request = (
362
+ _BaseModelServiceRestTransport._BaseGetModel._get_transcoded_request(
363
+ http_options, request
364
+ )
365
+ )
366
+
367
+ # Jsonify the query params
368
+ query_params = (
369
+ _BaseModelServiceRestTransport._BaseGetModel._get_query_params_json(
370
+ transcoded_request
371
+ )
372
+ )
373
+
374
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
375
+ logging.DEBUG
376
+ ): # pragma: NO COVER
377
+ request_url = "{host}{uri}".format(
378
+ host=self._host, uri=transcoded_request["uri"]
379
+ )
380
+ method = transcoded_request["method"]
381
+ try:
382
+ request_payload = type(request).to_json(request)
383
+ except:
384
+ request_payload = None
385
+ http_request = {
386
+ "payload": request_payload,
387
+ "requestMethod": method,
388
+ "requestUrl": request_url,
389
+ "headers": dict(metadata),
390
+ }
391
+ _LOGGER.debug(
392
+ f"Sending request for google.ai.generativelanguage_v1.ModelServiceClient.GetModel",
393
+ extra={
394
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
395
+ "rpcName": "GetModel",
396
+ "httpRequest": http_request,
397
+ "metadata": http_request["headers"],
398
+ },
399
+ )
400
+
401
+ # Send the request
402
+ response = ModelServiceRestTransport._GetModel._get_response(
403
+ self._host,
404
+ metadata,
405
+ query_params,
406
+ self._session,
407
+ timeout,
408
+ transcoded_request,
409
+ )
410
+
411
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
412
+ # subclass.
413
+ if response.status_code >= 400:
414
+ raise core_exceptions.from_http_response(response)
415
+
416
+ # Return the response
417
+ resp = model.Model()
418
+ pb_resp = model.Model.pb(resp)
419
+
420
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
421
+
422
+ resp = self._interceptor.post_get_model(resp)
423
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
424
+ logging.DEBUG
425
+ ): # pragma: NO COVER
426
+ try:
427
+ response_payload = model.Model.to_json(response)
428
+ except:
429
+ response_payload = None
430
+ http_response = {
431
+ "payload": response_payload,
432
+ "headers": dict(response.headers),
433
+ "status": response.status_code,
434
+ }
435
+ _LOGGER.debug(
436
+ "Received response for google.ai.generativelanguage_v1.ModelServiceClient.get_model",
437
+ extra={
438
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
439
+ "rpcName": "GetModel",
440
+ "metadata": http_response["headers"],
441
+ "httpResponse": http_response,
442
+ },
443
+ )
444
+ return resp
445
+
446
+ class _ListModels(
447
+ _BaseModelServiceRestTransport._BaseListModels, ModelServiceRestStub
448
+ ):
449
+ def __hash__(self):
450
+ return hash("ModelServiceRestTransport.ListModels")
451
+
452
+ @staticmethod
453
+ def _get_response(
454
+ host,
455
+ metadata,
456
+ query_params,
457
+ session,
458
+ timeout,
459
+ transcoded_request,
460
+ body=None,
461
+ ):
462
+ uri = transcoded_request["uri"]
463
+ method = transcoded_request["method"]
464
+ headers = dict(metadata)
465
+ headers["Content-Type"] = "application/json"
466
+ response = getattr(session, method)(
467
+ "{host}{uri}".format(host=host, uri=uri),
468
+ timeout=timeout,
469
+ headers=headers,
470
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
471
+ )
472
+ return response
473
+
474
+ def __call__(
475
+ self,
476
+ request: model_service.ListModelsRequest,
477
+ *,
478
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
479
+ timeout: Optional[float] = None,
480
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
481
+ ) -> model_service.ListModelsResponse:
482
+ r"""Call the list models method over HTTP.
483
+
484
+ Args:
485
+ request (~.model_service.ListModelsRequest):
486
+ The request object. Request for listing all Models.
487
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
488
+ should be retried.
489
+ timeout (float): The timeout for this request.
490
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
491
+ sent along with the request as metadata. Normally, each value must be of type `str`,
492
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
493
+ be of type `bytes`.
494
+
495
+ Returns:
496
+ ~.model_service.ListModelsResponse:
497
+ Response from ``ListModel`` containing a paginated list
498
+ of Models.
499
+
500
+ """
501
+
502
+ http_options = (
503
+ _BaseModelServiceRestTransport._BaseListModels._get_http_options()
504
+ )
505
+
506
+ request, metadata = self._interceptor.pre_list_models(request, metadata)
507
+ transcoded_request = (
508
+ _BaseModelServiceRestTransport._BaseListModels._get_transcoded_request(
509
+ http_options, request
510
+ )
511
+ )
512
+
513
+ # Jsonify the query params
514
+ query_params = (
515
+ _BaseModelServiceRestTransport._BaseListModels._get_query_params_json(
516
+ transcoded_request
517
+ )
518
+ )
519
+
520
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
521
+ logging.DEBUG
522
+ ): # pragma: NO COVER
523
+ request_url = "{host}{uri}".format(
524
+ host=self._host, uri=transcoded_request["uri"]
525
+ )
526
+ method = transcoded_request["method"]
527
+ try:
528
+ request_payload = type(request).to_json(request)
529
+ except:
530
+ request_payload = None
531
+ http_request = {
532
+ "payload": request_payload,
533
+ "requestMethod": method,
534
+ "requestUrl": request_url,
535
+ "headers": dict(metadata),
536
+ }
537
+ _LOGGER.debug(
538
+ f"Sending request for google.ai.generativelanguage_v1.ModelServiceClient.ListModels",
539
+ extra={
540
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
541
+ "rpcName": "ListModels",
542
+ "httpRequest": http_request,
543
+ "metadata": http_request["headers"],
544
+ },
545
+ )
546
+
547
+ # Send the request
548
+ response = ModelServiceRestTransport._ListModels._get_response(
549
+ self._host,
550
+ metadata,
551
+ query_params,
552
+ self._session,
553
+ timeout,
554
+ transcoded_request,
555
+ )
556
+
557
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
558
+ # subclass.
559
+ if response.status_code >= 400:
560
+ raise core_exceptions.from_http_response(response)
561
+
562
+ # Return the response
563
+ resp = model_service.ListModelsResponse()
564
+ pb_resp = model_service.ListModelsResponse.pb(resp)
565
+
566
+ json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
567
+
568
+ resp = self._interceptor.post_list_models(resp)
569
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
570
+ logging.DEBUG
571
+ ): # pragma: NO COVER
572
+ try:
573
+ response_payload = model_service.ListModelsResponse.to_json(
574
+ response
575
+ )
576
+ except:
577
+ response_payload = None
578
+ http_response = {
579
+ "payload": response_payload,
580
+ "headers": dict(response.headers),
581
+ "status": response.status_code,
582
+ }
583
+ _LOGGER.debug(
584
+ "Received response for google.ai.generativelanguage_v1.ModelServiceClient.list_models",
585
+ extra={
586
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
587
+ "rpcName": "ListModels",
588
+ "metadata": http_response["headers"],
589
+ "httpResponse": http_response,
590
+ },
591
+ )
592
+ return resp
593
+
594
+ @property
595
+ def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
596
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
597
+ # In C++ this would require a dynamic_cast
598
+ return self._GetModel(self._session, self._host, self._interceptor) # type: ignore
599
+
600
+ @property
601
+ def list_models(
602
+ self,
603
+ ) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
604
+ # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
605
+ # In C++ this would require a dynamic_cast
606
+ return self._ListModels(self._session, self._host, self._interceptor) # type: ignore
607
+
608
+ @property
609
+ def cancel_operation(self):
610
+ return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore
611
+
612
+ class _CancelOperation(
613
+ _BaseModelServiceRestTransport._BaseCancelOperation, ModelServiceRestStub
614
+ ):
615
+ def __hash__(self):
616
+ return hash("ModelServiceRestTransport.CancelOperation")
617
+
618
+ @staticmethod
619
+ def _get_response(
620
+ host,
621
+ metadata,
622
+ query_params,
623
+ session,
624
+ timeout,
625
+ transcoded_request,
626
+ body=None,
627
+ ):
628
+ uri = transcoded_request["uri"]
629
+ method = transcoded_request["method"]
630
+ headers = dict(metadata)
631
+ headers["Content-Type"] = "application/json"
632
+ response = getattr(session, method)(
633
+ "{host}{uri}".format(host=host, uri=uri),
634
+ timeout=timeout,
635
+ headers=headers,
636
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
637
+ data=body,
638
+ )
639
+ return response
640
+
641
+ def __call__(
642
+ self,
643
+ request: operations_pb2.CancelOperationRequest,
644
+ *,
645
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
646
+ timeout: Optional[float] = None,
647
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
648
+ ) -> None:
649
+ r"""Call the cancel operation method over HTTP.
650
+
651
+ Args:
652
+ request (operations_pb2.CancelOperationRequest):
653
+ The request object for CancelOperation method.
654
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
655
+ should be retried.
656
+ timeout (float): The timeout for this request.
657
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
658
+ sent along with the request as metadata. Normally, each value must be of type `str`,
659
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
660
+ be of type `bytes`.
661
+ """
662
+
663
+ http_options = (
664
+ _BaseModelServiceRestTransport._BaseCancelOperation._get_http_options()
665
+ )
666
+
667
+ request, metadata = self._interceptor.pre_cancel_operation(
668
+ request, metadata
669
+ )
670
+ transcoded_request = _BaseModelServiceRestTransport._BaseCancelOperation._get_transcoded_request(
671
+ http_options, request
672
+ )
673
+
674
+ body = _BaseModelServiceRestTransport._BaseCancelOperation._get_request_body_json(
675
+ transcoded_request
676
+ )
677
+
678
+ # Jsonify the query params
679
+ query_params = _BaseModelServiceRestTransport._BaseCancelOperation._get_query_params_json(
680
+ transcoded_request
681
+ )
682
+
683
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
684
+ logging.DEBUG
685
+ ): # pragma: NO COVER
686
+ request_url = "{host}{uri}".format(
687
+ host=self._host, uri=transcoded_request["uri"]
688
+ )
689
+ method = transcoded_request["method"]
690
+ try:
691
+ request_payload = json_format.MessageToJson(request)
692
+ except:
693
+ request_payload = None
694
+ http_request = {
695
+ "payload": request_payload,
696
+ "requestMethod": method,
697
+ "requestUrl": request_url,
698
+ "headers": dict(metadata),
699
+ }
700
+ _LOGGER.debug(
701
+ f"Sending request for google.ai.generativelanguage_v1.ModelServiceClient.CancelOperation",
702
+ extra={
703
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
704
+ "rpcName": "CancelOperation",
705
+ "httpRequest": http_request,
706
+ "metadata": http_request["headers"],
707
+ },
708
+ )
709
+
710
+ # Send the request
711
+ response = ModelServiceRestTransport._CancelOperation._get_response(
712
+ self._host,
713
+ metadata,
714
+ query_params,
715
+ self._session,
716
+ timeout,
717
+ transcoded_request,
718
+ body,
719
+ )
720
+
721
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
722
+ # subclass.
723
+ if response.status_code >= 400:
724
+ raise core_exceptions.from_http_response(response)
725
+
726
+ return self._interceptor.post_cancel_operation(None)
727
+
728
+ @property
729
+ def get_operation(self):
730
+ return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore
731
+
732
+ class _GetOperation(
733
+ _BaseModelServiceRestTransport._BaseGetOperation, ModelServiceRestStub
734
+ ):
735
+ def __hash__(self):
736
+ return hash("ModelServiceRestTransport.GetOperation")
737
+
738
+ @staticmethod
739
+ def _get_response(
740
+ host,
741
+ metadata,
742
+ query_params,
743
+ session,
744
+ timeout,
745
+ transcoded_request,
746
+ body=None,
747
+ ):
748
+ uri = transcoded_request["uri"]
749
+ method = transcoded_request["method"]
750
+ headers = dict(metadata)
751
+ headers["Content-Type"] = "application/json"
752
+ response = getattr(session, method)(
753
+ "{host}{uri}".format(host=host, uri=uri),
754
+ timeout=timeout,
755
+ headers=headers,
756
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
757
+ )
758
+ return response
759
+
760
+ def __call__(
761
+ self,
762
+ request: operations_pb2.GetOperationRequest,
763
+ *,
764
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
765
+ timeout: Optional[float] = None,
766
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
767
+ ) -> operations_pb2.Operation:
768
+ r"""Call the get operation method over HTTP.
769
+
770
+ Args:
771
+ request (operations_pb2.GetOperationRequest):
772
+ The request object for GetOperation method.
773
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
774
+ should be retried.
775
+ timeout (float): The timeout for this request.
776
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
777
+ sent along with the request as metadata. Normally, each value must be of type `str`,
778
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
779
+ be of type `bytes`.
780
+
781
+ Returns:
782
+ operations_pb2.Operation: Response from GetOperation method.
783
+ """
784
+
785
+ http_options = (
786
+ _BaseModelServiceRestTransport._BaseGetOperation._get_http_options()
787
+ )
788
+
789
+ request, metadata = self._interceptor.pre_get_operation(request, metadata)
790
+ transcoded_request = _BaseModelServiceRestTransport._BaseGetOperation._get_transcoded_request(
791
+ http_options, request
792
+ )
793
+
794
+ # Jsonify the query params
795
+ query_params = (
796
+ _BaseModelServiceRestTransport._BaseGetOperation._get_query_params_json(
797
+ transcoded_request
798
+ )
799
+ )
800
+
801
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
802
+ logging.DEBUG
803
+ ): # pragma: NO COVER
804
+ request_url = "{host}{uri}".format(
805
+ host=self._host, uri=transcoded_request["uri"]
806
+ )
807
+ method = transcoded_request["method"]
808
+ try:
809
+ request_payload = json_format.MessageToJson(request)
810
+ except:
811
+ request_payload = None
812
+ http_request = {
813
+ "payload": request_payload,
814
+ "requestMethod": method,
815
+ "requestUrl": request_url,
816
+ "headers": dict(metadata),
817
+ }
818
+ _LOGGER.debug(
819
+ f"Sending request for google.ai.generativelanguage_v1.ModelServiceClient.GetOperation",
820
+ extra={
821
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
822
+ "rpcName": "GetOperation",
823
+ "httpRequest": http_request,
824
+ "metadata": http_request["headers"],
825
+ },
826
+ )
827
+
828
+ # Send the request
829
+ response = ModelServiceRestTransport._GetOperation._get_response(
830
+ self._host,
831
+ metadata,
832
+ query_params,
833
+ self._session,
834
+ timeout,
835
+ transcoded_request,
836
+ )
837
+
838
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
839
+ # subclass.
840
+ if response.status_code >= 400:
841
+ raise core_exceptions.from_http_response(response)
842
+
843
+ content = response.content.decode("utf-8")
844
+ resp = operations_pb2.Operation()
845
+ resp = json_format.Parse(content, resp)
846
+ resp = self._interceptor.post_get_operation(resp)
847
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
848
+ logging.DEBUG
849
+ ): # pragma: NO COVER
850
+ try:
851
+ response_payload = json_format.MessageToJson(resp)
852
+ except:
853
+ response_payload = None
854
+ http_response = {
855
+ "payload": response_payload,
856
+ "headers": dict(response.headers),
857
+ "status": response.status_code,
858
+ }
859
+ _LOGGER.debug(
860
+ "Received response for google.ai.generativelanguage_v1.ModelServiceAsyncClient.GetOperation",
861
+ extra={
862
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
863
+ "rpcName": "GetOperation",
864
+ "httpResponse": http_response,
865
+ "metadata": http_response["headers"],
866
+ },
867
+ )
868
+ return resp
869
+
870
+ @property
871
+ def list_operations(self):
872
+ return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore
873
+
874
+ class _ListOperations(
875
+ _BaseModelServiceRestTransport._BaseListOperations, ModelServiceRestStub
876
+ ):
877
+ def __hash__(self):
878
+ return hash("ModelServiceRestTransport.ListOperations")
879
+
880
+ @staticmethod
881
+ def _get_response(
882
+ host,
883
+ metadata,
884
+ query_params,
885
+ session,
886
+ timeout,
887
+ transcoded_request,
888
+ body=None,
889
+ ):
890
+ uri = transcoded_request["uri"]
891
+ method = transcoded_request["method"]
892
+ headers = dict(metadata)
893
+ headers["Content-Type"] = "application/json"
894
+ response = getattr(session, method)(
895
+ "{host}{uri}".format(host=host, uri=uri),
896
+ timeout=timeout,
897
+ headers=headers,
898
+ params=rest_helpers.flatten_query_params(query_params, strict=True),
899
+ )
900
+ return response
901
+
902
+ def __call__(
903
+ self,
904
+ request: operations_pb2.ListOperationsRequest,
905
+ *,
906
+ retry: OptionalRetry = gapic_v1.method.DEFAULT,
907
+ timeout: Optional[float] = None,
908
+ metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
909
+ ) -> operations_pb2.ListOperationsResponse:
910
+ r"""Call the list operations method over HTTP.
911
+
912
+ Args:
913
+ request (operations_pb2.ListOperationsRequest):
914
+ The request object for ListOperations method.
915
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
916
+ should be retried.
917
+ timeout (float): The timeout for this request.
918
+ metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
919
+ sent along with the request as metadata. Normally, each value must be of type `str`,
920
+ but for metadata keys ending with the suffix `-bin`, the corresponding values must
921
+ be of type `bytes`.
922
+
923
+ Returns:
924
+ operations_pb2.ListOperationsResponse: Response from ListOperations method.
925
+ """
926
+
927
+ http_options = (
928
+ _BaseModelServiceRestTransport._BaseListOperations._get_http_options()
929
+ )
930
+
931
+ request, metadata = self._interceptor.pre_list_operations(request, metadata)
932
+ transcoded_request = _BaseModelServiceRestTransport._BaseListOperations._get_transcoded_request(
933
+ http_options, request
934
+ )
935
+
936
+ # Jsonify the query params
937
+ query_params = _BaseModelServiceRestTransport._BaseListOperations._get_query_params_json(
938
+ transcoded_request
939
+ )
940
+
941
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
942
+ logging.DEBUG
943
+ ): # pragma: NO COVER
944
+ request_url = "{host}{uri}".format(
945
+ host=self._host, uri=transcoded_request["uri"]
946
+ )
947
+ method = transcoded_request["method"]
948
+ try:
949
+ request_payload = json_format.MessageToJson(request)
950
+ except:
951
+ request_payload = None
952
+ http_request = {
953
+ "payload": request_payload,
954
+ "requestMethod": method,
955
+ "requestUrl": request_url,
956
+ "headers": dict(metadata),
957
+ }
958
+ _LOGGER.debug(
959
+ f"Sending request for google.ai.generativelanguage_v1.ModelServiceClient.ListOperations",
960
+ extra={
961
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
962
+ "rpcName": "ListOperations",
963
+ "httpRequest": http_request,
964
+ "metadata": http_request["headers"],
965
+ },
966
+ )
967
+
968
+ # Send the request
969
+ response = ModelServiceRestTransport._ListOperations._get_response(
970
+ self._host,
971
+ metadata,
972
+ query_params,
973
+ self._session,
974
+ timeout,
975
+ transcoded_request,
976
+ )
977
+
978
+ # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
979
+ # subclass.
980
+ if response.status_code >= 400:
981
+ raise core_exceptions.from_http_response(response)
982
+
983
+ content = response.content.decode("utf-8")
984
+ resp = operations_pb2.ListOperationsResponse()
985
+ resp = json_format.Parse(content, resp)
986
+ resp = self._interceptor.post_list_operations(resp)
987
+ if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
988
+ logging.DEBUG
989
+ ): # pragma: NO COVER
990
+ try:
991
+ response_payload = json_format.MessageToJson(resp)
992
+ except:
993
+ response_payload = None
994
+ http_response = {
995
+ "payload": response_payload,
996
+ "headers": dict(response.headers),
997
+ "status": response.status_code,
998
+ }
999
+ _LOGGER.debug(
1000
+ "Received response for google.ai.generativelanguage_v1.ModelServiceAsyncClient.ListOperations",
1001
+ extra={
1002
+ "serviceName": "google.ai.generativelanguage.v1.ModelService",
1003
+ "rpcName": "ListOperations",
1004
+ "httpResponse": http_response,
1005
+ "metadata": http_response["headers"],
1006
+ },
1007
+ )
1008
+ return resp
1009
+
1010
+ @property
1011
+ def kind(self) -> str:
1012
+ return "rest"
1013
+
1014
+ def close(self):
1015
+ self._session.close()
1016
+
1017
+
1018
+ __all__ = ("ModelServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/services/model_service/transports/rest_base.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import json # type: ignore
17
+ import re
18
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
19
+
20
+ from google.api_core import gapic_v1, path_template
21
+ from google.longrunning import operations_pb2 # type: ignore
22
+ from google.protobuf import json_format
23
+
24
+ from google.ai.generativelanguage_v1.types import model, model_service
25
+
26
+ from .base import DEFAULT_CLIENT_INFO, ModelServiceTransport
27
+
28
+
29
+ class _BaseModelServiceRestTransport(ModelServiceTransport):
30
+ """Base REST backend transport for ModelService.
31
+
32
+ Note: This class is not meant to be used directly. Use its sync and
33
+ async sub-classes instead.
34
+
35
+ This class defines the same methods as the primary client, so the
36
+ primary client can load the underlying transport implementation
37
+ and call it.
38
+
39
+ It sends JSON representations of protocol buffers over HTTP/1.1
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ *,
45
+ host: str = "generativelanguage.googleapis.com",
46
+ credentials: Optional[Any] = None,
47
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
48
+ always_use_jwt_access: Optional[bool] = False,
49
+ url_scheme: str = "https",
50
+ api_audience: Optional[str] = None,
51
+ ) -> None:
52
+ """Instantiate the transport.
53
+ Args:
54
+ host (Optional[str]):
55
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
56
+ credentials (Optional[Any]): The
57
+ authorization credentials to attach to requests. These
58
+ credentials identify the application to the service; if none
59
+ are specified, the client will attempt to ascertain the
60
+ credentials from the environment.
61
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
62
+ The client info used to send a user-agent string along with
63
+ API requests. If ``None``, then default info will be used.
64
+ Generally, you only need to set this if you are developing
65
+ your own client library.
66
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
67
+ be used for service account credentials.
68
+ url_scheme: the protocol scheme for the API endpoint. Normally
69
+ "https", but for testing or local servers,
70
+ "http" can be specified.
71
+ """
72
+ # Run the base constructor
73
+ maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
74
+ if maybe_url_match is None:
75
+ raise ValueError(
76
+ f"Unexpected hostname structure: {host}"
77
+ ) # pragma: NO COVER
78
+
79
+ url_match_items = maybe_url_match.groupdict()
80
+
81
+ host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
82
+
83
+ super().__init__(
84
+ host=host,
85
+ credentials=credentials,
86
+ client_info=client_info,
87
+ always_use_jwt_access=always_use_jwt_access,
88
+ api_audience=api_audience,
89
+ )
90
+
91
+ class _BaseGetModel:
92
+ def __hash__(self): # pragma: NO COVER
93
+ return NotImplementedError("__hash__ must be implemented.")
94
+
95
+ __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
96
+
97
+ @classmethod
98
+ def _get_unset_required_fields(cls, message_dict):
99
+ return {
100
+ k: v
101
+ for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
102
+ if k not in message_dict
103
+ }
104
+
105
+ @staticmethod
106
+ def _get_http_options():
107
+ http_options: List[Dict[str, str]] = [
108
+ {
109
+ "method": "get",
110
+ "uri": "/v1/{name=models/*}",
111
+ },
112
+ ]
113
+ return http_options
114
+
115
+ @staticmethod
116
+ def _get_transcoded_request(http_options, request):
117
+ pb_request = model_service.GetModelRequest.pb(request)
118
+ transcoded_request = path_template.transcode(http_options, pb_request)
119
+ return transcoded_request
120
+
121
+ @staticmethod
122
+ def _get_query_params_json(transcoded_request):
123
+ query_params = json.loads(
124
+ json_format.MessageToJson(
125
+ transcoded_request["query_params"],
126
+ use_integers_for_enums=True,
127
+ )
128
+ )
129
+ query_params.update(
130
+ _BaseModelServiceRestTransport._BaseGetModel._get_unset_required_fields(
131
+ query_params
132
+ )
133
+ )
134
+
135
+ query_params["$alt"] = "json;enum-encoding=int"
136
+ return query_params
137
+
138
+ class _BaseListModels:
139
+ def __hash__(self): # pragma: NO COVER
140
+ return NotImplementedError("__hash__ must be implemented.")
141
+
142
+ @staticmethod
143
+ def _get_http_options():
144
+ http_options: List[Dict[str, str]] = [
145
+ {
146
+ "method": "get",
147
+ "uri": "/v1/models",
148
+ },
149
+ ]
150
+ return http_options
151
+
152
+ @staticmethod
153
+ def _get_transcoded_request(http_options, request):
154
+ pb_request = model_service.ListModelsRequest.pb(request)
155
+ transcoded_request = path_template.transcode(http_options, pb_request)
156
+ return transcoded_request
157
+
158
+ @staticmethod
159
+ def _get_query_params_json(transcoded_request):
160
+ query_params = json.loads(
161
+ json_format.MessageToJson(
162
+ transcoded_request["query_params"],
163
+ use_integers_for_enums=True,
164
+ )
165
+ )
166
+
167
+ query_params["$alt"] = "json;enum-encoding=int"
168
+ return query_params
169
+
170
+ class _BaseCancelOperation:
171
+ def __hash__(self): # pragma: NO COVER
172
+ return NotImplementedError("__hash__ must be implemented.")
173
+
174
+ @staticmethod
175
+ def _get_http_options():
176
+ http_options: List[Dict[str, str]] = [
177
+ {
178
+ "method": "post",
179
+ "uri": "/v1/{name=tunedModels/*/operations/*}:cancel",
180
+ "body": "*",
181
+ },
182
+ ]
183
+ return http_options
184
+
185
+ @staticmethod
186
+ def _get_transcoded_request(http_options, request):
187
+ request_kwargs = json_format.MessageToDict(request)
188
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
189
+ return transcoded_request
190
+
191
+ @staticmethod
192
+ def _get_request_body_json(transcoded_request):
193
+ body = json.dumps(transcoded_request["body"])
194
+ return body
195
+
196
+ @staticmethod
197
+ def _get_query_params_json(transcoded_request):
198
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
199
+ return query_params
200
+
201
+ class _BaseGetOperation:
202
+ def __hash__(self): # pragma: NO COVER
203
+ return NotImplementedError("__hash__ must be implemented.")
204
+
205
+ @staticmethod
206
+ def _get_http_options():
207
+ http_options: List[Dict[str, str]] = [
208
+ {
209
+ "method": "get",
210
+ "uri": "/v1/{name=tunedModels/*/operations/*}",
211
+ },
212
+ {
213
+ "method": "get",
214
+ "uri": "/v1/{name=generatedFiles/*/operations/*}",
215
+ },
216
+ {
217
+ "method": "get",
218
+ "uri": "/v1/{name=models/*/operations/*}",
219
+ },
220
+ ]
221
+ return http_options
222
+
223
+ @staticmethod
224
+ def _get_transcoded_request(http_options, request):
225
+ request_kwargs = json_format.MessageToDict(request)
226
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
227
+ return transcoded_request
228
+
229
+ @staticmethod
230
+ def _get_query_params_json(transcoded_request):
231
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
232
+ return query_params
233
+
234
+ class _BaseListOperations:
235
+ def __hash__(self): # pragma: NO COVER
236
+ return NotImplementedError("__hash__ must be implemented.")
237
+
238
+ @staticmethod
239
+ def _get_http_options():
240
+ http_options: List[Dict[str, str]] = [
241
+ {
242
+ "method": "get",
243
+ "uri": "/v1/{name=operations}",
244
+ },
245
+ {
246
+ "method": "get",
247
+ "uri": "/v1/{name=tunedModels/*}/operations",
248
+ },
249
+ {
250
+ "method": "get",
251
+ "uri": "/v1/{name=models/*}/operations",
252
+ },
253
+ ]
254
+ return http_options
255
+
256
+ @staticmethod
257
+ def _get_transcoded_request(http_options, request):
258
+ request_kwargs = json_format.MessageToDict(request)
259
+ transcoded_request = path_template.transcode(http_options, **request_kwargs)
260
+ return transcoded_request
261
+
262
+ @staticmethod
263
+ def _get_query_params_json(transcoded_request):
264
+ query_params = json.loads(json.dumps(transcoded_request["query_params"]))
265
+ return query_params
266
+
267
+
268
+ __all__ = ("_BaseModelServiceRestTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__init__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from .citation import CitationMetadata, CitationSource
17
+ from .content import Blob, Content, Part
18
+ from .generative_service import (
19
+ BatchEmbedContentsRequest,
20
+ BatchEmbedContentsResponse,
21
+ Candidate,
22
+ ContentEmbedding,
23
+ CountTokensRequest,
24
+ CountTokensResponse,
25
+ EmbedContentRequest,
26
+ EmbedContentResponse,
27
+ GenerateContentRequest,
28
+ GenerateContentResponse,
29
+ GenerationConfig,
30
+ GroundingChunk,
31
+ GroundingMetadata,
32
+ GroundingSupport,
33
+ LogprobsResult,
34
+ RetrievalMetadata,
35
+ SearchEntryPoint,
36
+ Segment,
37
+ TaskType,
38
+ )
39
+ from .model import Model
40
+ from .model_service import GetModelRequest, ListModelsRequest, ListModelsResponse
41
+ from .safety import HarmCategory, SafetyRating, SafetySetting
42
+
43
+ __all__ = (
44
+ "CitationMetadata",
45
+ "CitationSource",
46
+ "Blob",
47
+ "Content",
48
+ "Part",
49
+ "BatchEmbedContentsRequest",
50
+ "BatchEmbedContentsResponse",
51
+ "Candidate",
52
+ "ContentEmbedding",
53
+ "CountTokensRequest",
54
+ "CountTokensResponse",
55
+ "EmbedContentRequest",
56
+ "EmbedContentResponse",
57
+ "GenerateContentRequest",
58
+ "GenerateContentResponse",
59
+ "GenerationConfig",
60
+ "GroundingChunk",
61
+ "GroundingMetadata",
62
+ "GroundingSupport",
63
+ "LogprobsResult",
64
+ "RetrievalMetadata",
65
+ "SearchEntryPoint",
66
+ "Segment",
67
+ "TaskType",
68
+ "Model",
69
+ "GetModelRequest",
70
+ "ListModelsRequest",
71
+ "ListModelsResponse",
72
+ "SafetyRating",
73
+ "SafetySetting",
74
+ "HarmCategory",
75
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__pycache__/generative_service.cpython-311.pyc ADDED
Binary file (47.5 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__pycache__/model_service.cpython-311.pyc ADDED
Binary file (4.02 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/__pycache__/safety.cpython-311.pyc ADDED
Binary file (7.71 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/citation.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1",
24
+ manifest={
25
+ "CitationMetadata",
26
+ "CitationSource",
27
+ },
28
+ )
29
+
30
+
31
+ class CitationMetadata(proto.Message):
32
+ r"""A collection of source attributions for a piece of content.
33
+
34
+ Attributes:
35
+ citation_sources (MutableSequence[google.ai.generativelanguage_v1.types.CitationSource]):
36
+ Citations to sources for a specific response.
37
+ """
38
+
39
+ citation_sources: MutableSequence["CitationSource"] = proto.RepeatedField(
40
+ proto.MESSAGE,
41
+ number=1,
42
+ message="CitationSource",
43
+ )
44
+
45
+
46
+ class CitationSource(proto.Message):
47
+ r"""A citation to a source for a portion of a specific response.
48
+
49
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
50
+
51
+ Attributes:
52
+ start_index (int):
53
+ Optional. Start of segment of the response
54
+ that is attributed to this source.
55
+
56
+ Index indicates the start of the segment,
57
+ measured in bytes.
58
+
59
+ This field is a member of `oneof`_ ``_start_index``.
60
+ end_index (int):
61
+ Optional. End of the attributed segment,
62
+ exclusive.
63
+
64
+ This field is a member of `oneof`_ ``_end_index``.
65
+ uri (str):
66
+ Optional. URI that is attributed as a source
67
+ for a portion of the text.
68
+
69
+ This field is a member of `oneof`_ ``_uri``.
70
+ license_ (str):
71
+ Optional. License for the GitHub project that
72
+ is attributed as a source for segment.
73
+
74
+ License info is required for code citations.
75
+
76
+ This field is a member of `oneof`_ ``_license``.
77
+ """
78
+
79
+ start_index: int = proto.Field(
80
+ proto.INT32,
81
+ number=1,
82
+ optional=True,
83
+ )
84
+ end_index: int = proto.Field(
85
+ proto.INT32,
86
+ number=2,
87
+ optional=True,
88
+ )
89
+ uri: str = proto.Field(
90
+ proto.STRING,
91
+ number=3,
92
+ optional=True,
93
+ )
94
+ license_: str = proto.Field(
95
+ proto.STRING,
96
+ number=4,
97
+ optional=True,
98
+ )
99
+
100
+
101
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/content.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1",
24
+ manifest={
25
+ "Content",
26
+ "Part",
27
+ "Blob",
28
+ },
29
+ )
30
+
31
+
32
+ class Content(proto.Message):
33
+ r"""The base structured datatype containing multi-part content of a
34
+ message.
35
+
36
+ A ``Content`` includes a ``role`` field designating the producer of
37
+ the ``Content`` and a ``parts`` field containing multi-part data
38
+ that contains the content of the message turn.
39
+
40
+ Attributes:
41
+ parts (MutableSequence[google.ai.generativelanguage_v1.types.Part]):
42
+ Ordered ``Parts`` that constitute a single message. Parts
43
+ may have different MIME types.
44
+ role (str):
45
+ Optional. The producer of the content. Must
46
+ be either 'user' or 'model'.
47
+ Useful to set for multi-turn conversations,
48
+ otherwise can be left blank or unset.
49
+ """
50
+
51
+ parts: MutableSequence["Part"] = proto.RepeatedField(
52
+ proto.MESSAGE,
53
+ number=1,
54
+ message="Part",
55
+ )
56
+ role: str = proto.Field(
57
+ proto.STRING,
58
+ number=2,
59
+ )
60
+
61
+
62
+ class Part(proto.Message):
63
+ r"""A datatype containing media that is part of a multi-part ``Content``
64
+ message.
65
+
66
+ A ``Part`` consists of data which has an associated datatype. A
67
+ ``Part`` can only contain one of the accepted types in
68
+ ``Part.data``.
69
+
70
+ A ``Part`` must have a fixed IANA MIME type identifying the type and
71
+ subtype of the media if the ``inline_data`` field is filled with raw
72
+ bytes.
73
+
74
+ This message has `oneof`_ fields (mutually exclusive fields).
75
+ For each oneof, at most one member field can be set at the same time.
76
+ Setting any member of the oneof automatically clears all other
77
+ members.
78
+
79
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
80
+
81
+ Attributes:
82
+ text (str):
83
+ Inline text.
84
+
85
+ This field is a member of `oneof`_ ``data``.
86
+ inline_data (google.ai.generativelanguage_v1.types.Blob):
87
+ Inline media bytes.
88
+
89
+ This field is a member of `oneof`_ ``data``.
90
+ """
91
+
92
+ text: str = proto.Field(
93
+ proto.STRING,
94
+ number=2,
95
+ oneof="data",
96
+ )
97
+ inline_data: "Blob" = proto.Field(
98
+ proto.MESSAGE,
99
+ number=3,
100
+ oneof="data",
101
+ message="Blob",
102
+ )
103
+
104
+
105
+ class Blob(proto.Message):
106
+ r"""Raw media bytes.
107
+
108
+ Text should not be sent as raw bytes, use the 'text' field.
109
+
110
+ Attributes:
111
+ mime_type (str):
112
+ The IANA standard MIME type of the source data. Examples:
113
+
114
+ - image/png
115
+ - image/jpeg If an unsupported MIME type is provided, an
116
+ error will be returned. For a complete list of supported
117
+ types, see `Supported file
118
+ formats <https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats>`__.
119
+ data (bytes):
120
+ Raw bytes for media formats.
121
+ """
122
+
123
+ mime_type: str = proto.Field(
124
+ proto.STRING,
125
+ number=1,
126
+ )
127
+ data: bytes = proto.Field(
128
+ proto.BYTES,
129
+ number=2,
130
+ )
131
+
132
+
133
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/generative_service.py ADDED
@@ -0,0 +1,1129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1.types import citation
23
+ from google.ai.generativelanguage_v1.types import content as gag_content
24
+ from google.ai.generativelanguage_v1.types import safety
25
+
26
+ __protobuf__ = proto.module(
27
+ package="google.ai.generativelanguage.v1",
28
+ manifest={
29
+ "TaskType",
30
+ "GenerateContentRequest",
31
+ "GenerationConfig",
32
+ "GenerateContentResponse",
33
+ "Candidate",
34
+ "LogprobsResult",
35
+ "RetrievalMetadata",
36
+ "GroundingMetadata",
37
+ "SearchEntryPoint",
38
+ "GroundingChunk",
39
+ "Segment",
40
+ "GroundingSupport",
41
+ "EmbedContentRequest",
42
+ "ContentEmbedding",
43
+ "EmbedContentResponse",
44
+ "BatchEmbedContentsRequest",
45
+ "BatchEmbedContentsResponse",
46
+ "CountTokensRequest",
47
+ "CountTokensResponse",
48
+ },
49
+ )
50
+
51
+
52
+ class TaskType(proto.Enum):
53
+ r"""Type of task for which the embedding will be used.
54
+
55
+ Values:
56
+ TASK_TYPE_UNSPECIFIED (0):
57
+ Unset value, which will default to one of the
58
+ other enum values.
59
+ RETRIEVAL_QUERY (1):
60
+ Specifies the given text is a query in a
61
+ search/retrieval setting.
62
+ RETRIEVAL_DOCUMENT (2):
63
+ Specifies the given text is a document from
64
+ the corpus being searched.
65
+ SEMANTIC_SIMILARITY (3):
66
+ Specifies the given text will be used for
67
+ STS.
68
+ CLASSIFICATION (4):
69
+ Specifies that the given text will be
70
+ classified.
71
+ CLUSTERING (5):
72
+ Specifies that the embeddings will be used
73
+ for clustering.
74
+ QUESTION_ANSWERING (6):
75
+ Specifies that the given text will be used
76
+ for question answering.
77
+ FACT_VERIFICATION (7):
78
+ Specifies that the given text will be used
79
+ for fact verification.
80
+ """
81
+ TASK_TYPE_UNSPECIFIED = 0
82
+ RETRIEVAL_QUERY = 1
83
+ RETRIEVAL_DOCUMENT = 2
84
+ SEMANTIC_SIMILARITY = 3
85
+ CLASSIFICATION = 4
86
+ CLUSTERING = 5
87
+ QUESTION_ANSWERING = 6
88
+ FACT_VERIFICATION = 7
89
+
90
+
91
+ class GenerateContentRequest(proto.Message):
92
+ r"""Request to generate a completion from the model.
93
+
94
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
95
+
96
+ Attributes:
97
+ model (str):
98
+ Required. The name of the ``Model`` to use for generating
99
+ the completion.
100
+
101
+ Format: ``models/{model}``.
102
+ contents (MutableSequence[google.ai.generativelanguage_v1.types.Content]):
103
+ Required. The content of the current conversation with the
104
+ model.
105
+
106
+ For single-turn queries, this is a single instance. For
107
+ multi-turn queries like
108
+ `chat <https://ai.google.dev/gemini-api/docs/text-generation#chat>`__,
109
+ this is a repeated field that contains the conversation
110
+ history and the latest request.
111
+ safety_settings (MutableSequence[google.ai.generativelanguage_v1.types.SafetySetting]):
112
+ Optional. A list of unique ``SafetySetting`` instances for
113
+ blocking unsafe content.
114
+
115
+ This will be enforced on the
116
+ ``GenerateContentRequest.contents`` and
117
+ ``GenerateContentResponse.candidates``. There should not be
118
+ more than one setting for each ``SafetyCategory`` type. The
119
+ API will block any contents and responses that fail to meet
120
+ the thresholds set by these settings. This list overrides
121
+ the default settings for each ``SafetyCategory`` specified
122
+ in the safety_settings. If there is no ``SafetySetting`` for
123
+ a given ``SafetyCategory`` provided in the list, the API
124
+ will use the default safety setting for that category. Harm
125
+ categories HARM_CATEGORY_HATE_SPEECH,
126
+ HARM_CATEGORY_SEXUALLY_EXPLICIT,
127
+ HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT,
128
+ HARM_CATEGORY_CIVIC_INTEGRITY are supported. Refer to the
129
+ `guide <https://ai.google.dev/gemini-api/docs/safety-settings>`__
130
+ for detailed information on available safety settings. Also
131
+ refer to the `Safety
132
+ guidance <https://ai.google.dev/gemini-api/docs/safety-guidance>`__
133
+ to learn how to incorporate safety considerations in your AI
134
+ applications.
135
+ generation_config (google.ai.generativelanguage_v1.types.GenerationConfig):
136
+ Optional. Configuration options for model
137
+ generation and outputs.
138
+
139
+ This field is a member of `oneof`_ ``_generation_config``.
140
+ """
141
+
142
+ model: str = proto.Field(
143
+ proto.STRING,
144
+ number=1,
145
+ )
146
+ contents: MutableSequence[gag_content.Content] = proto.RepeatedField(
147
+ proto.MESSAGE,
148
+ number=2,
149
+ message=gag_content.Content,
150
+ )
151
+ safety_settings: MutableSequence[safety.SafetySetting] = proto.RepeatedField(
152
+ proto.MESSAGE,
153
+ number=3,
154
+ message=safety.SafetySetting,
155
+ )
156
+ generation_config: "GenerationConfig" = proto.Field(
157
+ proto.MESSAGE,
158
+ number=4,
159
+ optional=True,
160
+ message="GenerationConfig",
161
+ )
162
+
163
+
164
+ class GenerationConfig(proto.Message):
165
+ r"""Configuration options for model generation and outputs. Not
166
+ all parameters are configurable for every model.
167
+
168
+
169
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
170
+
171
+ Attributes:
172
+ candidate_count (int):
173
+ Optional. Number of generated responses to
174
+ return.
175
+ Currently, this value can only be set to 1. If
176
+ unset, this will default to 1.
177
+
178
+ This field is a member of `oneof`_ ``_candidate_count``.
179
+ stop_sequences (MutableSequence[str]):
180
+ Optional. The set of character sequences (up to 5) that will
181
+ stop output generation. If specified, the API will stop at
182
+ the first appearance of a ``stop_sequence``. The stop
183
+ sequence will not be included as part of the response.
184
+ max_output_tokens (int):
185
+ Optional. The maximum number of tokens to include in a
186
+ response candidate.
187
+
188
+ Note: The default value varies by model, see the
189
+ ``Model.output_token_limit`` attribute of the ``Model``
190
+ returned from the ``getModel`` function.
191
+
192
+ This field is a member of `oneof`_ ``_max_output_tokens``.
193
+ temperature (float):
194
+ Optional. Controls the randomness of the output.
195
+
196
+ Note: The default value varies by model, see the
197
+ ``Model.temperature`` attribute of the ``Model`` returned
198
+ from the ``getModel`` function.
199
+
200
+ Values can range from [0.0, 2.0].
201
+
202
+ This field is a member of `oneof`_ ``_temperature``.
203
+ top_p (float):
204
+ Optional. The maximum cumulative probability of tokens to
205
+ consider when sampling.
206
+
207
+ The model uses combined Top-k and Top-p (nucleus) sampling.
208
+
209
+ Tokens are sorted based on their assigned probabilities so
210
+ that only the most likely tokens are considered. Top-k
211
+ sampling directly limits the maximum number of tokens to
212
+ consider, while Nucleus sampling limits the number of tokens
213
+ based on the cumulative probability.
214
+
215
+ Note: The default value varies by ``Model`` and is specified
216
+ by the\ ``Model.top_p`` attribute returned from the
217
+ ``getModel`` function. An empty ``top_k`` attribute
218
+ indicates that the model doesn't apply top-k sampling and
219
+ doesn't allow setting ``top_k`` on requests.
220
+
221
+ This field is a member of `oneof`_ ``_top_p``.
222
+ top_k (int):
223
+ Optional. The maximum number of tokens to consider when
224
+ sampling.
225
+
226
+ Gemini models use Top-p (nucleus) sampling or a combination
227
+ of Top-k and nucleus sampling. Top-k sampling considers the
228
+ set of ``top_k`` most probable tokens. Models running with
229
+ nucleus sampling don't allow top_k setting.
230
+
231
+ Note: The default value varies by ``Model`` and is specified
232
+ by the\ ``Model.top_p`` attribute returned from the
233
+ ``getModel`` function. An empty ``top_k`` attribute
234
+ indicates that the model doesn't apply top-k sampling and
235
+ doesn't allow setting ``top_k`` on requests.
236
+
237
+ This field is a member of `oneof`_ ``_top_k``.
238
+ presence_penalty (float):
239
+ Optional. Presence penalty applied to the next token's
240
+ logprobs if the token has already been seen in the response.
241
+
242
+ This penalty is binary on/off and not dependant on the
243
+ number of times the token is used (after the first). Use
244
+ [frequency_penalty][google.ai.generativelanguage.v1.GenerationConfig.frequency_penalty]
245
+ for a penalty that increases with each use.
246
+
247
+ A positive penalty will discourage the use of tokens that
248
+ have already been used in the response, increasing the
249
+ vocabulary.
250
+
251
+ A negative penalty will encourage the use of tokens that
252
+ have already been used in the response, decreasing the
253
+ vocabulary.
254
+
255
+ This field is a member of `oneof`_ ``_presence_penalty``.
256
+ frequency_penalty (float):
257
+ Optional. Frequency penalty applied to the next token's
258
+ logprobs, multiplied by the number of times each token has
259
+ been seen in the respponse so far.
260
+
261
+ A positive penalty will discourage the use of tokens that
262
+ have already been used, proportional to the number of times
263
+ the token has been used: The more a token is used, the more
264
+ dificult it is for the model to use that token again
265
+ increasing the vocabulary of responses.
266
+
267
+ Caution: A *negative* penalty will encourage the model to
268
+ reuse tokens proportional to the number of times the token
269
+ has been used. Small negative values will reduce the
270
+ vocabulary of a response. Larger negative values will cause
271
+ the model to start repeating a common token until it hits
272
+ the
273
+ [max_output_tokens][google.ai.generativelanguage.v1.GenerationConfig.max_output_tokens]
274
+ limit.
275
+
276
+ This field is a member of `oneof`_ ``_frequency_penalty``.
277
+ response_logprobs (bool):
278
+ Optional. If true, export the logprobs
279
+ results in response.
280
+
281
+ This field is a member of `oneof`_ ``_response_logprobs``.
282
+ logprobs (int):
283
+ Optional. Only valid if
284
+ [response_logprobs=True][google.ai.generativelanguage.v1.GenerationConfig.response_logprobs].
285
+ This sets the number of top logprobs to return at each
286
+ decoding step in the
287
+ [Candidate.logprobs_result][google.ai.generativelanguage.v1.Candidate.logprobs_result].
288
+
289
+ This field is a member of `oneof`_ ``_logprobs``.
290
+ enable_enhanced_civic_answers (bool):
291
+ Optional. Enables enhanced civic answers. It
292
+ may not be available for all models.
293
+
294
+ This field is a member of `oneof`_ ``_enable_enhanced_civic_answers``.
295
+ """
296
+
297
+ candidate_count: int = proto.Field(
298
+ proto.INT32,
299
+ number=1,
300
+ optional=True,
301
+ )
302
+ stop_sequences: MutableSequence[str] = proto.RepeatedField(
303
+ proto.STRING,
304
+ number=2,
305
+ )
306
+ max_output_tokens: int = proto.Field(
307
+ proto.INT32,
308
+ number=4,
309
+ optional=True,
310
+ )
311
+ temperature: float = proto.Field(
312
+ proto.FLOAT,
313
+ number=5,
314
+ optional=True,
315
+ )
316
+ top_p: float = proto.Field(
317
+ proto.FLOAT,
318
+ number=6,
319
+ optional=True,
320
+ )
321
+ top_k: int = proto.Field(
322
+ proto.INT32,
323
+ number=7,
324
+ optional=True,
325
+ )
326
+ presence_penalty: float = proto.Field(
327
+ proto.FLOAT,
328
+ number=15,
329
+ optional=True,
330
+ )
331
+ frequency_penalty: float = proto.Field(
332
+ proto.FLOAT,
333
+ number=16,
334
+ optional=True,
335
+ )
336
+ response_logprobs: bool = proto.Field(
337
+ proto.BOOL,
338
+ number=17,
339
+ optional=True,
340
+ )
341
+ logprobs: int = proto.Field(
342
+ proto.INT32,
343
+ number=18,
344
+ optional=True,
345
+ )
346
+ enable_enhanced_civic_answers: bool = proto.Field(
347
+ proto.BOOL,
348
+ number=19,
349
+ optional=True,
350
+ )
351
+
352
+
353
+ class GenerateContentResponse(proto.Message):
354
+ r"""Response from the model supporting multiple candidate responses.
355
+
356
+ Safety ratings and content filtering are reported for both prompt in
357
+ ``GenerateContentResponse.prompt_feedback`` and for each candidate
358
+ in ``finish_reason`` and in ``safety_ratings``. The API:
359
+
360
+ - Returns either all requested candidates or none of them
361
+ - Returns no candidates at all only if there was something wrong
362
+ with the prompt (check ``prompt_feedback``)
363
+ - Reports feedback on each candidate in ``finish_reason`` and
364
+ ``safety_ratings``.
365
+
366
+ Attributes:
367
+ candidates (MutableSequence[google.ai.generativelanguage_v1.types.Candidate]):
368
+ Candidate responses from the model.
369
+ prompt_feedback (google.ai.generativelanguage_v1.types.GenerateContentResponse.PromptFeedback):
370
+ Returns the prompt's feedback related to the
371
+ content filters.
372
+ usage_metadata (google.ai.generativelanguage_v1.types.GenerateContentResponse.UsageMetadata):
373
+ Output only. Metadata on the generation
374
+ requests' token usage.
375
+ model_version (str):
376
+ Output only. The model version used to
377
+ generate the response.
378
+ """
379
+
380
+ class PromptFeedback(proto.Message):
381
+ r"""A set of the feedback metadata the prompt specified in
382
+ ``GenerateContentRequest.content``.
383
+
384
+ Attributes:
385
+ block_reason (google.ai.generativelanguage_v1.types.GenerateContentResponse.PromptFeedback.BlockReason):
386
+ Optional. If set, the prompt was blocked and
387
+ no candidates are returned. Rephrase the prompt.
388
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1.types.SafetyRating]):
389
+ Ratings for safety of the prompt.
390
+ There is at most one rating per category.
391
+ """
392
+
393
+ class BlockReason(proto.Enum):
394
+ r"""Specifies the reason why the prompt was blocked.
395
+
396
+ Values:
397
+ BLOCK_REASON_UNSPECIFIED (0):
398
+ Default value. This value is unused.
399
+ SAFETY (1):
400
+ Prompt was blocked due to safety reasons. Inspect
401
+ ``safety_ratings`` to understand which safety category
402
+ blocked it.
403
+ OTHER (2):
404
+ Prompt was blocked due to unknown reasons.
405
+ BLOCKLIST (3):
406
+ Prompt was blocked due to the terms which are
407
+ included from the terminology blocklist.
408
+ PROHIBITED_CONTENT (4):
409
+ Prompt was blocked due to prohibited content.
410
+ IMAGE_SAFETY (5):
411
+ Candidates blocked due to unsafe image
412
+ generation content.
413
+ """
414
+ BLOCK_REASON_UNSPECIFIED = 0
415
+ SAFETY = 1
416
+ OTHER = 2
417
+ BLOCKLIST = 3
418
+ PROHIBITED_CONTENT = 4
419
+ IMAGE_SAFETY = 5
420
+
421
+ block_reason: "GenerateContentResponse.PromptFeedback.BlockReason" = (
422
+ proto.Field(
423
+ proto.ENUM,
424
+ number=1,
425
+ enum="GenerateContentResponse.PromptFeedback.BlockReason",
426
+ )
427
+ )
428
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
429
+ proto.MESSAGE,
430
+ number=2,
431
+ message=safety.SafetyRating,
432
+ )
433
+
434
+ class UsageMetadata(proto.Message):
435
+ r"""Metadata on the generation request's token usage.
436
+
437
+ Attributes:
438
+ prompt_token_count (int):
439
+ Number of tokens in the prompt. When ``cached_content`` is
440
+ set, this is still the total effective prompt size meaning
441
+ this includes the number of tokens in the cached content.
442
+ candidates_token_count (int):
443
+ Total number of tokens across all the
444
+ generated response candidates.
445
+ total_token_count (int):
446
+ Total token count for the generation request
447
+ (prompt + response candidates).
448
+ """
449
+
450
+ prompt_token_count: int = proto.Field(
451
+ proto.INT32,
452
+ number=1,
453
+ )
454
+ candidates_token_count: int = proto.Field(
455
+ proto.INT32,
456
+ number=2,
457
+ )
458
+ total_token_count: int = proto.Field(
459
+ proto.INT32,
460
+ number=3,
461
+ )
462
+
463
+ candidates: MutableSequence["Candidate"] = proto.RepeatedField(
464
+ proto.MESSAGE,
465
+ number=1,
466
+ message="Candidate",
467
+ )
468
+ prompt_feedback: PromptFeedback = proto.Field(
469
+ proto.MESSAGE,
470
+ number=2,
471
+ message=PromptFeedback,
472
+ )
473
+ usage_metadata: UsageMetadata = proto.Field(
474
+ proto.MESSAGE,
475
+ number=3,
476
+ message=UsageMetadata,
477
+ )
478
+ model_version: str = proto.Field(
479
+ proto.STRING,
480
+ number=4,
481
+ )
482
+
483
+
484
+ class Candidate(proto.Message):
485
+ r"""A response candidate generated from the model.
486
+
487
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
488
+
489
+ Attributes:
490
+ index (int):
491
+ Output only. Index of the candidate in the
492
+ list of response candidates.
493
+
494
+ This field is a member of `oneof`_ ``_index``.
495
+ content (google.ai.generativelanguage_v1.types.Content):
496
+ Output only. Generated content returned from
497
+ the model.
498
+ finish_reason (google.ai.generativelanguage_v1.types.Candidate.FinishReason):
499
+ Optional. Output only. The reason why the
500
+ model stopped generating tokens.
501
+ If empty, the model has not stopped generating
502
+ tokens.
503
+ safety_ratings (MutableSequence[google.ai.generativelanguage_v1.types.SafetyRating]):
504
+ List of ratings for the safety of a response
505
+ candidate.
506
+ There is at most one rating per category.
507
+ citation_metadata (google.ai.generativelanguage_v1.types.CitationMetadata):
508
+ Output only. Citation information for model-generated
509
+ candidate.
510
+
511
+ This field may be populated with recitation information for
512
+ any text included in the ``content``. These are passages
513
+ that are "recited" from copyrighted material in the
514
+ foundational LLM's training data.
515
+ token_count (int):
516
+ Output only. Token count for this candidate.
517
+ grounding_metadata (google.ai.generativelanguage_v1.types.GroundingMetadata):
518
+ Output only. Grounding metadata for the candidate.
519
+
520
+ This field is populated for ``GenerateContent`` calls.
521
+ avg_logprobs (float):
522
+ Output only. Average log probability score of
523
+ the candidate.
524
+ logprobs_result (google.ai.generativelanguage_v1.types.LogprobsResult):
525
+ Output only. Log-likelihood scores for the
526
+ response tokens and top tokens
527
+ """
528
+
529
+ class FinishReason(proto.Enum):
530
+ r"""Defines the reason why the model stopped generating tokens.
531
+
532
+ Values:
533
+ FINISH_REASON_UNSPECIFIED (0):
534
+ Default value. This value is unused.
535
+ STOP (1):
536
+ Natural stop point of the model or provided
537
+ stop sequence.
538
+ MAX_TOKENS (2):
539
+ The maximum number of tokens as specified in
540
+ the request was reached.
541
+ SAFETY (3):
542
+ The response candidate content was flagged
543
+ for safety reasons.
544
+ RECITATION (4):
545
+ The response candidate content was flagged
546
+ for recitation reasons.
547
+ LANGUAGE (6):
548
+ The response candidate content was flagged
549
+ for using an unsupported language.
550
+ OTHER (5):
551
+ Unknown reason.
552
+ BLOCKLIST (7):
553
+ Token generation stopped because the content
554
+ contains forbidden terms.
555
+ PROHIBITED_CONTENT (8):
556
+ Token generation stopped for potentially
557
+ containing prohibited content.
558
+ SPII (9):
559
+ Token generation stopped because the content
560
+ potentially contains Sensitive Personally
561
+ Identifiable Information (SPII).
562
+ MALFORMED_FUNCTION_CALL (10):
563
+ The function call generated by the model is
564
+ invalid.
565
+ IMAGE_SAFETY (11):
566
+ Token generation stopped because generated
567
+ images contain safety violations.
568
+ """
569
+ FINISH_REASON_UNSPECIFIED = 0
570
+ STOP = 1
571
+ MAX_TOKENS = 2
572
+ SAFETY = 3
573
+ RECITATION = 4
574
+ LANGUAGE = 6
575
+ OTHER = 5
576
+ BLOCKLIST = 7
577
+ PROHIBITED_CONTENT = 8
578
+ SPII = 9
579
+ MALFORMED_FUNCTION_CALL = 10
580
+ IMAGE_SAFETY = 11
581
+
582
+ index: int = proto.Field(
583
+ proto.INT32,
584
+ number=3,
585
+ optional=True,
586
+ )
587
+ content: gag_content.Content = proto.Field(
588
+ proto.MESSAGE,
589
+ number=1,
590
+ message=gag_content.Content,
591
+ )
592
+ finish_reason: FinishReason = proto.Field(
593
+ proto.ENUM,
594
+ number=2,
595
+ enum=FinishReason,
596
+ )
597
+ safety_ratings: MutableSequence[safety.SafetyRating] = proto.RepeatedField(
598
+ proto.MESSAGE,
599
+ number=5,
600
+ message=safety.SafetyRating,
601
+ )
602
+ citation_metadata: citation.CitationMetadata = proto.Field(
603
+ proto.MESSAGE,
604
+ number=6,
605
+ message=citation.CitationMetadata,
606
+ )
607
+ token_count: int = proto.Field(
608
+ proto.INT32,
609
+ number=7,
610
+ )
611
+ grounding_metadata: "GroundingMetadata" = proto.Field(
612
+ proto.MESSAGE,
613
+ number=9,
614
+ message="GroundingMetadata",
615
+ )
616
+ avg_logprobs: float = proto.Field(
617
+ proto.DOUBLE,
618
+ number=10,
619
+ )
620
+ logprobs_result: "LogprobsResult" = proto.Field(
621
+ proto.MESSAGE,
622
+ number=11,
623
+ message="LogprobsResult",
624
+ )
625
+
626
+
627
+ class LogprobsResult(proto.Message):
628
+ r"""Logprobs Result
629
+
630
+ Attributes:
631
+ top_candidates (MutableSequence[google.ai.generativelanguage_v1.types.LogprobsResult.TopCandidates]):
632
+ Length = total number of decoding steps.
633
+ chosen_candidates (MutableSequence[google.ai.generativelanguage_v1.types.LogprobsResult.Candidate]):
634
+ Length = total number of decoding steps. The chosen
635
+ candidates may or may not be in top_candidates.
636
+ """
637
+
638
+ class Candidate(proto.Message):
639
+ r"""Candidate for the logprobs token and score.
640
+
641
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
642
+
643
+ Attributes:
644
+ token (str):
645
+ The candidate’s token string value.
646
+
647
+ This field is a member of `oneof`_ ``_token``.
648
+ token_id (int):
649
+ The candidate’s token id value.
650
+
651
+ This field is a member of `oneof`_ ``_token_id``.
652
+ log_probability (float):
653
+ The candidate's log probability.
654
+
655
+ This field is a member of `oneof`_ ``_log_probability``.
656
+ """
657
+
658
+ token: str = proto.Field(
659
+ proto.STRING,
660
+ number=1,
661
+ optional=True,
662
+ )
663
+ token_id: int = proto.Field(
664
+ proto.INT32,
665
+ number=3,
666
+ optional=True,
667
+ )
668
+ log_probability: float = proto.Field(
669
+ proto.FLOAT,
670
+ number=2,
671
+ optional=True,
672
+ )
673
+
674
+ class TopCandidates(proto.Message):
675
+ r"""Candidates with top log probabilities at each decoding step.
676
+
677
+ Attributes:
678
+ candidates (MutableSequence[google.ai.generativelanguage_v1.types.LogprobsResult.Candidate]):
679
+ Sorted by log probability in descending
680
+ order.
681
+ """
682
+
683
+ candidates: MutableSequence["LogprobsResult.Candidate"] = proto.RepeatedField(
684
+ proto.MESSAGE,
685
+ number=1,
686
+ message="LogprobsResult.Candidate",
687
+ )
688
+
689
+ top_candidates: MutableSequence[TopCandidates] = proto.RepeatedField(
690
+ proto.MESSAGE,
691
+ number=1,
692
+ message=TopCandidates,
693
+ )
694
+ chosen_candidates: MutableSequence[Candidate] = proto.RepeatedField(
695
+ proto.MESSAGE,
696
+ number=2,
697
+ message=Candidate,
698
+ )
699
+
700
+
701
+ class RetrievalMetadata(proto.Message):
702
+ r"""Metadata related to retrieval in the grounding flow.
703
+
704
+ Attributes:
705
+ google_search_dynamic_retrieval_score (float):
706
+ Optional. Score indicating how likely information from
707
+ google search could help answer the prompt. The score is in
708
+ the range [0, 1], where 0 is the least likely and 1 is the
709
+ most likely. This score is only populated when google search
710
+ grounding and dynamic retrieval is enabled. It will be
711
+ compared to the threshold to determine whether to trigger
712
+ google search.
713
+ """
714
+
715
+ google_search_dynamic_retrieval_score: float = proto.Field(
716
+ proto.FLOAT,
717
+ number=2,
718
+ )
719
+
720
+
721
+ class GroundingMetadata(proto.Message):
722
+ r"""Metadata returned to client when grounding is enabled.
723
+
724
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
725
+
726
+ Attributes:
727
+ search_entry_point (google.ai.generativelanguage_v1.types.SearchEntryPoint):
728
+ Optional. Google search entry for the
729
+ following-up web searches.
730
+
731
+ This field is a member of `oneof`_ ``_search_entry_point``.
732
+ grounding_chunks (MutableSequence[google.ai.generativelanguage_v1.types.GroundingChunk]):
733
+ List of supporting references retrieved from
734
+ specified grounding source.
735
+ grounding_supports (MutableSequence[google.ai.generativelanguage_v1.types.GroundingSupport]):
736
+ List of grounding support.
737
+ retrieval_metadata (google.ai.generativelanguage_v1.types.RetrievalMetadata):
738
+ Metadata related to retrieval in the
739
+ grounding flow.
740
+
741
+ This field is a member of `oneof`_ ``_retrieval_metadata``.
742
+ web_search_queries (MutableSequence[str]):
743
+ Web search queries for the following-up web
744
+ search.
745
+ """
746
+
747
+ search_entry_point: "SearchEntryPoint" = proto.Field(
748
+ proto.MESSAGE,
749
+ number=1,
750
+ optional=True,
751
+ message="SearchEntryPoint",
752
+ )
753
+ grounding_chunks: MutableSequence["GroundingChunk"] = proto.RepeatedField(
754
+ proto.MESSAGE,
755
+ number=2,
756
+ message="GroundingChunk",
757
+ )
758
+ grounding_supports: MutableSequence["GroundingSupport"] = proto.RepeatedField(
759
+ proto.MESSAGE,
760
+ number=3,
761
+ message="GroundingSupport",
762
+ )
763
+ retrieval_metadata: "RetrievalMetadata" = proto.Field(
764
+ proto.MESSAGE,
765
+ number=4,
766
+ optional=True,
767
+ message="RetrievalMetadata",
768
+ )
769
+ web_search_queries: MutableSequence[str] = proto.RepeatedField(
770
+ proto.STRING,
771
+ number=5,
772
+ )
773
+
774
+
775
+ class SearchEntryPoint(proto.Message):
776
+ r"""Google search entry point.
777
+
778
+ Attributes:
779
+ rendered_content (str):
780
+ Optional. Web content snippet that can be
781
+ embedded in a web page or an app webview.
782
+ sdk_blob (bytes):
783
+ Optional. Base64 encoded JSON representing
784
+ array of <search term, search url> tuple.
785
+ """
786
+
787
+ rendered_content: str = proto.Field(
788
+ proto.STRING,
789
+ number=1,
790
+ )
791
+ sdk_blob: bytes = proto.Field(
792
+ proto.BYTES,
793
+ number=2,
794
+ )
795
+
796
+
797
+ class GroundingChunk(proto.Message):
798
+ r"""Grounding chunk.
799
+
800
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
801
+
802
+ Attributes:
803
+ web (google.ai.generativelanguage_v1.types.GroundingChunk.Web):
804
+ Grounding chunk from the web.
805
+
806
+ This field is a member of `oneof`_ ``chunk_type``.
807
+ """
808
+
809
+ class Web(proto.Message):
810
+ r"""Chunk from the web.
811
+
812
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
813
+
814
+ Attributes:
815
+ uri (str):
816
+ URI reference of the chunk.
817
+
818
+ This field is a member of `oneof`_ ``_uri``.
819
+ title (str):
820
+ Title of the chunk.
821
+
822
+ This field is a member of `oneof`_ ``_title``.
823
+ """
824
+
825
+ uri: str = proto.Field(
826
+ proto.STRING,
827
+ number=1,
828
+ optional=True,
829
+ )
830
+ title: str = proto.Field(
831
+ proto.STRING,
832
+ number=2,
833
+ optional=True,
834
+ )
835
+
836
+ web: Web = proto.Field(
837
+ proto.MESSAGE,
838
+ number=1,
839
+ oneof="chunk_type",
840
+ message=Web,
841
+ )
842
+
843
+
844
+ class Segment(proto.Message):
845
+ r"""Segment of the content.
846
+
847
+ Attributes:
848
+ part_index (int):
849
+ Output only. The index of a Part object
850
+ within its parent Content object.
851
+ start_index (int):
852
+ Output only. Start index in the given Part,
853
+ measured in bytes. Offset from the start of the
854
+ Part, inclusive, starting at zero.
855
+ end_index (int):
856
+ Output only. End index in the given Part,
857
+ measured in bytes. Offset from the start of the
858
+ Part, exclusive, starting at zero.
859
+ text (str):
860
+ Output only. The text corresponding to the
861
+ segment from the response.
862
+ """
863
+
864
+ part_index: int = proto.Field(
865
+ proto.INT32,
866
+ number=1,
867
+ )
868
+ start_index: int = proto.Field(
869
+ proto.INT32,
870
+ number=2,
871
+ )
872
+ end_index: int = proto.Field(
873
+ proto.INT32,
874
+ number=3,
875
+ )
876
+ text: str = proto.Field(
877
+ proto.STRING,
878
+ number=4,
879
+ )
880
+
881
+
882
+ class GroundingSupport(proto.Message):
883
+ r"""Grounding support.
884
+
885
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
886
+
887
+ Attributes:
888
+ segment (google.ai.generativelanguage_v1.types.Segment):
889
+ Segment of the content this support belongs
890
+ to.
891
+
892
+ This field is a member of `oneof`_ ``_segment``.
893
+ grounding_chunk_indices (MutableSequence[int]):
894
+ A list of indices (into 'grounding_chunk') specifying the
895
+ citations associated with the claim. For instance [1,3,4]
896
+ means that grounding_chunk[1], grounding_chunk[3],
897
+ grounding_chunk[4] are the retrieved content attributed to
898
+ the claim.
899
+ confidence_scores (MutableSequence[float]):
900
+ Confidence score of the support references. Ranges from 0 to
901
+ 1. 1 is the most confident. This list must have the same
902
+ size as the grounding_chunk_indices.
903
+ """
904
+
905
+ segment: "Segment" = proto.Field(
906
+ proto.MESSAGE,
907
+ number=1,
908
+ optional=True,
909
+ message="Segment",
910
+ )
911
+ grounding_chunk_indices: MutableSequence[int] = proto.RepeatedField(
912
+ proto.INT32,
913
+ number=2,
914
+ )
915
+ confidence_scores: MutableSequence[float] = proto.RepeatedField(
916
+ proto.FLOAT,
917
+ number=3,
918
+ )
919
+
920
+
921
+ class EmbedContentRequest(proto.Message):
922
+ r"""Request containing the ``Content`` for the model to embed.
923
+
924
+ .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
925
+
926
+ Attributes:
927
+ model (str):
928
+ Required. The model's resource name. This serves as an ID
929
+ for the Model to use.
930
+
931
+ This name should match a model name returned by the
932
+ ``ListModels`` method.
933
+
934
+ Format: ``models/{model}``
935
+ content (google.ai.generativelanguage_v1.types.Content):
936
+ Required. The content to embed. Only the ``parts.text``
937
+ fields will be counted.
938
+ task_type (google.ai.generativelanguage_v1.types.TaskType):
939
+ Optional. Optional task type for which the embeddings will
940
+ be used. Can only be set for ``models/embedding-001``.
941
+
942
+ This field is a member of `oneof`_ ``_task_type``.
943
+ title (str):
944
+ Optional. An optional title for the text. Only applicable
945
+ when TaskType is ``RETRIEVAL_DOCUMENT``.
946
+
947
+ Note: Specifying a ``title`` for ``RETRIEVAL_DOCUMENT``
948
+ provides better quality embeddings for retrieval.
949
+
950
+ This field is a member of `oneof`_ ``_title``.
951
+ output_dimensionality (int):
952
+ Optional. Optional reduced dimension for the output
953
+ embedding. If set, excessive values in the output embedding
954
+ are truncated from the end. Supported by newer models since
955
+ 2024 only. You cannot set this value if using the earlier
956
+ model (``models/embedding-001``).
957
+
958
+ This field is a member of `oneof`_ ``_output_dimensionality``.
959
+ """
960
+
961
+ model: str = proto.Field(
962
+ proto.STRING,
963
+ number=1,
964
+ )
965
+ content: gag_content.Content = proto.Field(
966
+ proto.MESSAGE,
967
+ number=2,
968
+ message=gag_content.Content,
969
+ )
970
+ task_type: "TaskType" = proto.Field(
971
+ proto.ENUM,
972
+ number=3,
973
+ optional=True,
974
+ enum="TaskType",
975
+ )
976
+ title: str = proto.Field(
977
+ proto.STRING,
978
+ number=4,
979
+ optional=True,
980
+ )
981
+ output_dimensionality: int = proto.Field(
982
+ proto.INT32,
983
+ number=5,
984
+ optional=True,
985
+ )
986
+
987
+
988
+ class ContentEmbedding(proto.Message):
989
+ r"""A list of floats representing an embedding.
990
+
991
+ Attributes:
992
+ values (MutableSequence[float]):
993
+ The embedding values.
994
+ """
995
+
996
+ values: MutableSequence[float] = proto.RepeatedField(
997
+ proto.FLOAT,
998
+ number=1,
999
+ )
1000
+
1001
+
1002
+ class EmbedContentResponse(proto.Message):
1003
+ r"""The response to an ``EmbedContentRequest``.
1004
+
1005
+ Attributes:
1006
+ embedding (google.ai.generativelanguage_v1.types.ContentEmbedding):
1007
+ Output only. The embedding generated from the
1008
+ input content.
1009
+ """
1010
+
1011
+ embedding: "ContentEmbedding" = proto.Field(
1012
+ proto.MESSAGE,
1013
+ number=1,
1014
+ message="ContentEmbedding",
1015
+ )
1016
+
1017
+
1018
+ class BatchEmbedContentsRequest(proto.Message):
1019
+ r"""Batch request to get embeddings from the model for a list of
1020
+ prompts.
1021
+
1022
+ Attributes:
1023
+ model (str):
1024
+ Required. The model's resource name. This serves as an ID
1025
+ for the Model to use.
1026
+
1027
+ This name should match a model name returned by the
1028
+ ``ListModels`` method.
1029
+
1030
+ Format: ``models/{model}``
1031
+ requests (MutableSequence[google.ai.generativelanguage_v1.types.EmbedContentRequest]):
1032
+ Required. Embed requests for the batch. The model in each of
1033
+ these requests must match the model specified
1034
+ ``BatchEmbedContentsRequest.model``.
1035
+ """
1036
+
1037
+ model: str = proto.Field(
1038
+ proto.STRING,
1039
+ number=1,
1040
+ )
1041
+ requests: MutableSequence["EmbedContentRequest"] = proto.RepeatedField(
1042
+ proto.MESSAGE,
1043
+ number=2,
1044
+ message="EmbedContentRequest",
1045
+ )
1046
+
1047
+
1048
+ class BatchEmbedContentsResponse(proto.Message):
1049
+ r"""The response to a ``BatchEmbedContentsRequest``.
1050
+
1051
+ Attributes:
1052
+ embeddings (MutableSequence[google.ai.generativelanguage_v1.types.ContentEmbedding]):
1053
+ Output only. The embeddings for each request,
1054
+ in the same order as provided in the batch
1055
+ request.
1056
+ """
1057
+
1058
+ embeddings: MutableSequence["ContentEmbedding"] = proto.RepeatedField(
1059
+ proto.MESSAGE,
1060
+ number=1,
1061
+ message="ContentEmbedding",
1062
+ )
1063
+
1064
+
1065
+ class CountTokensRequest(proto.Message):
1066
+ r"""Counts the number of tokens in the ``prompt`` sent to a model.
1067
+
1068
+ Models may tokenize text differently, so each model may return a
1069
+ different ``token_count``.
1070
+
1071
+ Attributes:
1072
+ model (str):
1073
+ Required. The model's resource name. This serves as an ID
1074
+ for the Model to use.
1075
+
1076
+ This name should match a model name returned by the
1077
+ ``ListModels`` method.
1078
+
1079
+ Format: ``models/{model}``
1080
+ contents (MutableSequence[google.ai.generativelanguage_v1.types.Content]):
1081
+ Optional. The input given to the model as a prompt. This
1082
+ field is ignored when ``generate_content_request`` is set.
1083
+ generate_content_request (google.ai.generativelanguage_v1.types.GenerateContentRequest):
1084
+ Optional. The overall input given to the ``Model``. This
1085
+ includes the prompt as well as other model steering
1086
+ information like `system
1087
+ instructions <https://ai.google.dev/gemini-api/docs/system-instructions>`__,
1088
+ and/or function declarations for `function
1089
+ calling <https://ai.google.dev/gemini-api/docs/function-calling>`__.
1090
+ ``Model``\ s/\ ``Content``\ s and
1091
+ ``generate_content_request``\ s are mutually exclusive. You
1092
+ can either send ``Model`` + ``Content``\ s or a
1093
+ ``generate_content_request``, but never both.
1094
+ """
1095
+
1096
+ model: str = proto.Field(
1097
+ proto.STRING,
1098
+ number=1,
1099
+ )
1100
+ contents: MutableSequence[gag_content.Content] = proto.RepeatedField(
1101
+ proto.MESSAGE,
1102
+ number=2,
1103
+ message=gag_content.Content,
1104
+ )
1105
+ generate_content_request: "GenerateContentRequest" = proto.Field(
1106
+ proto.MESSAGE,
1107
+ number=3,
1108
+ message="GenerateContentRequest",
1109
+ )
1110
+
1111
+
1112
+ class CountTokensResponse(proto.Message):
1113
+ r"""A response from ``CountTokens``.
1114
+
1115
+ It returns the model's ``token_count`` for the ``prompt``.
1116
+
1117
+ Attributes:
1118
+ total_tokens (int):
1119
+ The number of tokens that the ``Model`` tokenizes the
1120
+ ``prompt`` into. Always non-negative.
1121
+ """
1122
+
1123
+ total_tokens: int = proto.Field(
1124
+ proto.INT32,
1125
+ number=1,
1126
+ )
1127
+
1128
+
1129
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/model_service.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ from google.ai.generativelanguage_v1.types import model
23
+
24
+ __protobuf__ = proto.module(
25
+ package="google.ai.generativelanguage.v1",
26
+ manifest={
27
+ "GetModelRequest",
28
+ "ListModelsRequest",
29
+ "ListModelsResponse",
30
+ },
31
+ )
32
+
33
+
34
+ class GetModelRequest(proto.Message):
35
+ r"""Request for getting information about a specific Model.
36
+
37
+ Attributes:
38
+ name (str):
39
+ Required. The resource name of the model.
40
+
41
+ This name should match a model name returned by the
42
+ ``ListModels`` method.
43
+
44
+ Format: ``models/{model}``
45
+ """
46
+
47
+ name: str = proto.Field(
48
+ proto.STRING,
49
+ number=1,
50
+ )
51
+
52
+
53
+ class ListModelsRequest(proto.Message):
54
+ r"""Request for listing all Models.
55
+
56
+ Attributes:
57
+ page_size (int):
58
+ The maximum number of ``Models`` to return (per page).
59
+
60
+ If unspecified, 50 models will be returned per page. This
61
+ method returns at most 1000 models per page, even if you
62
+ pass a larger page_size.
63
+ page_token (str):
64
+ A page token, received from a previous ``ListModels`` call.
65
+
66
+ Provide the ``page_token`` returned by one request as an
67
+ argument to the next request to retrieve the next page.
68
+
69
+ When paginating, all other parameters provided to
70
+ ``ListModels`` must match the call that provided the page
71
+ token.
72
+ """
73
+
74
+ page_size: int = proto.Field(
75
+ proto.INT32,
76
+ number=2,
77
+ )
78
+ page_token: str = proto.Field(
79
+ proto.STRING,
80
+ number=3,
81
+ )
82
+
83
+
84
+ class ListModelsResponse(proto.Message):
85
+ r"""Response from ``ListModel`` containing a paginated list of Models.
86
+
87
+ Attributes:
88
+ models (MutableSequence[google.ai.generativelanguage_v1.types.Model]):
89
+ The returned Models.
90
+ next_page_token (str):
91
+ A token, which can be sent as ``page_token`` to retrieve the
92
+ next page.
93
+
94
+ If this field is omitted, there are no more pages.
95
+ """
96
+
97
+ @property
98
+ def raw_page(self):
99
+ return self
100
+
101
+ models: MutableSequence[model.Model] = proto.RepeatedField(
102
+ proto.MESSAGE,
103
+ number=1,
104
+ message=model.Model,
105
+ )
106
+ next_page_token: str = proto.Field(
107
+ proto.STRING,
108
+ number=2,
109
+ )
110
+
111
+
112
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1/types/safety.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from __future__ import annotations
17
+
18
+ from typing import MutableMapping, MutableSequence
19
+
20
+ import proto # type: ignore
21
+
22
+ __protobuf__ = proto.module(
23
+ package="google.ai.generativelanguage.v1",
24
+ manifest={
25
+ "HarmCategory",
26
+ "SafetyRating",
27
+ "SafetySetting",
28
+ },
29
+ )
30
+
31
+
32
+ class HarmCategory(proto.Enum):
33
+ r"""The category of a rating.
34
+
35
+ These categories cover various kinds of harms that developers
36
+ may wish to adjust.
37
+
38
+ Values:
39
+ HARM_CATEGORY_UNSPECIFIED (0):
40
+ Category is unspecified.
41
+ HARM_CATEGORY_DEROGATORY (1):
42
+ **PaLM** - Negative or harmful comments targeting identity
43
+ and/or protected attribute.
44
+ HARM_CATEGORY_TOXICITY (2):
45
+ **PaLM** - Content that is rude, disrespectful, or profane.
46
+ HARM_CATEGORY_VIOLENCE (3):
47
+ **PaLM** - Describes scenarios depicting violence against an
48
+ individual or group, or general descriptions of gore.
49
+ HARM_CATEGORY_SEXUAL (4):
50
+ **PaLM** - Contains references to sexual acts or other lewd
51
+ content.
52
+ HARM_CATEGORY_MEDICAL (5):
53
+ **PaLM** - Promotes unchecked medical advice.
54
+ HARM_CATEGORY_DANGEROUS (6):
55
+ **PaLM** - Dangerous content that promotes, facilitates, or
56
+ encourages harmful acts.
57
+ HARM_CATEGORY_HARASSMENT (7):
58
+ **Gemini** - Harassment content.
59
+ HARM_CATEGORY_HATE_SPEECH (8):
60
+ **Gemini** - Hate speech and content.
61
+ HARM_CATEGORY_SEXUALLY_EXPLICIT (9):
62
+ **Gemini** - Sexually explicit content.
63
+ HARM_CATEGORY_DANGEROUS_CONTENT (10):
64
+ **Gemini** - Dangerous content.
65
+ HARM_CATEGORY_CIVIC_INTEGRITY (11):
66
+ **Gemini** - Content that may be used to harm civic
67
+ integrity.
68
+ """
69
+ HARM_CATEGORY_UNSPECIFIED = 0
70
+ HARM_CATEGORY_DEROGATORY = 1
71
+ HARM_CATEGORY_TOXICITY = 2
72
+ HARM_CATEGORY_VIOLENCE = 3
73
+ HARM_CATEGORY_SEXUAL = 4
74
+ HARM_CATEGORY_MEDICAL = 5
75
+ HARM_CATEGORY_DANGEROUS = 6
76
+ HARM_CATEGORY_HARASSMENT = 7
77
+ HARM_CATEGORY_HATE_SPEECH = 8
78
+ HARM_CATEGORY_SEXUALLY_EXPLICIT = 9
79
+ HARM_CATEGORY_DANGEROUS_CONTENT = 10
80
+ HARM_CATEGORY_CIVIC_INTEGRITY = 11
81
+
82
+
83
+ class SafetyRating(proto.Message):
84
+ r"""Safety rating for a piece of content.
85
+
86
+ The safety rating contains the category of harm and the harm
87
+ probability level in that category for a piece of content.
88
+ Content is classified for safety across a number of harm
89
+ categories and the probability of the harm classification is
90
+ included here.
91
+
92
+ Attributes:
93
+ category (google.ai.generativelanguage_v1.types.HarmCategory):
94
+ Required. The category for this rating.
95
+ probability (google.ai.generativelanguage_v1.types.SafetyRating.HarmProbability):
96
+ Required. The probability of harm for this
97
+ content.
98
+ blocked (bool):
99
+ Was this content blocked because of this
100
+ rating?
101
+ """
102
+
103
+ class HarmProbability(proto.Enum):
104
+ r"""The probability that a piece of content is harmful.
105
+
106
+ The classification system gives the probability of the content
107
+ being unsafe. This does not indicate the severity of harm for a
108
+ piece of content.
109
+
110
+ Values:
111
+ HARM_PROBABILITY_UNSPECIFIED (0):
112
+ Probability is unspecified.
113
+ NEGLIGIBLE (1):
114
+ Content has a negligible chance of being
115
+ unsafe.
116
+ LOW (2):
117
+ Content has a low chance of being unsafe.
118
+ MEDIUM (3):
119
+ Content has a medium chance of being unsafe.
120
+ HIGH (4):
121
+ Content has a high chance of being unsafe.
122
+ """
123
+ HARM_PROBABILITY_UNSPECIFIED = 0
124
+ NEGLIGIBLE = 1
125
+ LOW = 2
126
+ MEDIUM = 3
127
+ HIGH = 4
128
+
129
+ category: "HarmCategory" = proto.Field(
130
+ proto.ENUM,
131
+ number=3,
132
+ enum="HarmCategory",
133
+ )
134
+ probability: HarmProbability = proto.Field(
135
+ proto.ENUM,
136
+ number=4,
137
+ enum=HarmProbability,
138
+ )
139
+ blocked: bool = proto.Field(
140
+ proto.BOOL,
141
+ number=5,
142
+ )
143
+
144
+
145
+ class SafetySetting(proto.Message):
146
+ r"""Safety setting, affecting the safety-blocking behavior.
147
+
148
+ Passing a safety setting for a category changes the allowed
149
+ probability that content is blocked.
150
+
151
+ Attributes:
152
+ category (google.ai.generativelanguage_v1.types.HarmCategory):
153
+ Required. The category for this setting.
154
+ threshold (google.ai.generativelanguage_v1.types.SafetySetting.HarmBlockThreshold):
155
+ Required. Controls the probability threshold
156
+ at which harm is blocked.
157
+ """
158
+
159
+ class HarmBlockThreshold(proto.Enum):
160
+ r"""Block at and beyond a specified harm probability.
161
+
162
+ Values:
163
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED (0):
164
+ Threshold is unspecified.
165
+ BLOCK_LOW_AND_ABOVE (1):
166
+ Content with NEGLIGIBLE will be allowed.
167
+ BLOCK_MEDIUM_AND_ABOVE (2):
168
+ Content with NEGLIGIBLE and LOW will be
169
+ allowed.
170
+ BLOCK_ONLY_HIGH (3):
171
+ Content with NEGLIGIBLE, LOW, and MEDIUM will
172
+ be allowed.
173
+ BLOCK_NONE (4):
174
+ All content will be allowed.
175
+ OFF (5):
176
+ Turn off the safety filter.
177
+ """
178
+ HARM_BLOCK_THRESHOLD_UNSPECIFIED = 0
179
+ BLOCK_LOW_AND_ABOVE = 1
180
+ BLOCK_MEDIUM_AND_ABOVE = 2
181
+ BLOCK_ONLY_HIGH = 3
182
+ BLOCK_NONE = 4
183
+ OFF = 5
184
+
185
+ category: "HarmCategory" = proto.Field(
186
+ proto.ENUM,
187
+ number=3,
188
+ enum="HarmCategory",
189
+ )
190
+ threshold: HarmBlockThreshold = proto.Field(
191
+ proto.ENUM,
192
+ number=4,
193
+ enum=HarmBlockThreshold,
194
+ )
195
+
196
+
197
+ __all__ = tuple(sorted(__protobuf__.manifest))
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (408 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/__pycache__/async_client.cpython-311.pyc ADDED
Binary file (30.9 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/__pycache__/client.cpython-311.pyc ADDED
Binary file (47.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ from collections import OrderedDict
17
+ from typing import Dict, Type
18
+
19
+ from .base import DiscussServiceTransport
20
+ from .grpc import DiscussServiceGrpcTransport
21
+ from .grpc_asyncio import DiscussServiceGrpcAsyncIOTransport
22
+ from .rest import DiscussServiceRestInterceptor, DiscussServiceRestTransport
23
+
24
+ # Compile a registry of transports.
25
+ _transport_registry = OrderedDict() # type: Dict[str, Type[DiscussServiceTransport]]
26
+ _transport_registry["grpc"] = DiscussServiceGrpcTransport
27
+ _transport_registry["grpc_asyncio"] = DiscussServiceGrpcAsyncIOTransport
28
+ _transport_registry["rest"] = DiscussServiceRestTransport
29
+
30
+ __all__ = (
31
+ "DiscussServiceTransport",
32
+ "DiscussServiceGrpcTransport",
33
+ "DiscussServiceGrpcAsyncIOTransport",
34
+ "DiscussServiceRestTransport",
35
+ "DiscussServiceRestInterceptor",
36
+ )
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (907 Bytes). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/base.cpython-311.pyc ADDED
Binary file (9.01 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/grpc.cpython-311.pyc ADDED
Binary file (19.3 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/grpc_asyncio.cpython-311.pyc ADDED
Binary file (21.9 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/rest.cpython-311.pyc ADDED
Binary file (36 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/__pycache__/rest_base.cpython-311.pyc ADDED
Binary file (12.8 kB). View file
 
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/base.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import abc
17
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
18
+
19
+ import google.api_core
20
+ from google.api_core import exceptions as core_exceptions
21
+ from google.api_core import gapic_v1
22
+ from google.api_core import retry as retries
23
+ import google.auth # type: ignore
24
+ from google.auth import credentials as ga_credentials # type: ignore
25
+ from google.longrunning import operations_pb2 # type: ignore
26
+ from google.oauth2 import service_account # type: ignore
27
+
28
+ from google.ai.generativelanguage_v1beta import gapic_version as package_version
29
+ from google.ai.generativelanguage_v1beta.types import discuss_service
30
+
31
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
32
+ gapic_version=package_version.__version__
33
+ )
34
+
35
+
36
+ class DiscussServiceTransport(abc.ABC):
37
+ """Abstract transport class for DiscussService."""
38
+
39
+ AUTH_SCOPES = ()
40
+
41
+ DEFAULT_HOST: str = "generativelanguage.googleapis.com"
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ host: str = DEFAULT_HOST,
47
+ credentials: Optional[ga_credentials.Credentials] = None,
48
+ credentials_file: Optional[str] = None,
49
+ scopes: Optional[Sequence[str]] = None,
50
+ quota_project_id: Optional[str] = None,
51
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
52
+ always_use_jwt_access: Optional[bool] = False,
53
+ api_audience: Optional[str] = None,
54
+ **kwargs,
55
+ ) -> None:
56
+ """Instantiate the transport.
57
+
58
+ Args:
59
+ host (Optional[str]):
60
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
61
+ credentials (Optional[google.auth.credentials.Credentials]): The
62
+ authorization credentials to attach to requests. These
63
+ credentials identify the application to the service; if none
64
+ are specified, the client will attempt to ascertain the
65
+ credentials from the environment.
66
+ credentials_file (Optional[str]): A file with credentials that can
67
+ be loaded with :func:`google.auth.load_credentials_from_file`.
68
+ This argument is mutually exclusive with credentials.
69
+ scopes (Optional[Sequence[str]]): A list of scopes.
70
+ quota_project_id (Optional[str]): An optional project to use for billing
71
+ and quota.
72
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
73
+ The client info used to send a user-agent string along with
74
+ API requests. If ``None``, then default info will be used.
75
+ Generally, you only need to set this if you're developing
76
+ your own client library.
77
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
78
+ be used for service account credentials.
79
+ """
80
+
81
+ scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
82
+
83
+ # Save the scopes.
84
+ self._scopes = scopes
85
+ if not hasattr(self, "_ignore_credentials"):
86
+ self._ignore_credentials: bool = False
87
+
88
+ # If no credentials are provided, then determine the appropriate
89
+ # defaults.
90
+ if credentials and credentials_file:
91
+ raise core_exceptions.DuplicateCredentialArgs(
92
+ "'credentials_file' and 'credentials' are mutually exclusive"
93
+ )
94
+
95
+ if credentials_file is not None:
96
+ credentials, _ = google.auth.load_credentials_from_file(
97
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
98
+ )
99
+ elif credentials is None and not self._ignore_credentials:
100
+ credentials, _ = google.auth.default(
101
+ **scopes_kwargs, quota_project_id=quota_project_id
102
+ )
103
+ # Don't apply audience if the credentials file passed from user.
104
+ if hasattr(credentials, "with_gdch_audience"):
105
+ credentials = credentials.with_gdch_audience(
106
+ api_audience if api_audience else host
107
+ )
108
+
109
+ # If the credentials are service account credentials, then always try to use self signed JWT.
110
+ if (
111
+ always_use_jwt_access
112
+ and isinstance(credentials, service_account.Credentials)
113
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
114
+ ):
115
+ credentials = credentials.with_always_use_jwt_access(True)
116
+
117
+ # Save the credentials.
118
+ self._credentials = credentials
119
+
120
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
121
+ if ":" not in host:
122
+ host += ":443"
123
+ self._host = host
124
+
125
+ @property
126
+ def host(self):
127
+ return self._host
128
+
129
+ def _prep_wrapped_messages(self, client_info):
130
+ # Precompute the wrapped methods.
131
+ self._wrapped_methods = {
132
+ self.generate_message: gapic_v1.method.wrap_method(
133
+ self.generate_message,
134
+ default_retry=retries.Retry(
135
+ initial=1.0,
136
+ maximum=10.0,
137
+ multiplier=1.3,
138
+ predicate=retries.if_exception_type(
139
+ core_exceptions.ServiceUnavailable,
140
+ ),
141
+ deadline=60.0,
142
+ ),
143
+ default_timeout=60.0,
144
+ client_info=client_info,
145
+ ),
146
+ self.count_message_tokens: gapic_v1.method.wrap_method(
147
+ self.count_message_tokens,
148
+ default_retry=retries.Retry(
149
+ initial=1.0,
150
+ maximum=10.0,
151
+ multiplier=1.3,
152
+ predicate=retries.if_exception_type(
153
+ core_exceptions.ServiceUnavailable,
154
+ ),
155
+ deadline=60.0,
156
+ ),
157
+ default_timeout=60.0,
158
+ client_info=client_info,
159
+ ),
160
+ self.get_operation: gapic_v1.method.wrap_method(
161
+ self.get_operation,
162
+ default_timeout=None,
163
+ client_info=client_info,
164
+ ),
165
+ self.list_operations: gapic_v1.method.wrap_method(
166
+ self.list_operations,
167
+ default_timeout=None,
168
+ client_info=client_info,
169
+ ),
170
+ }
171
+
172
+ def close(self):
173
+ """Closes resources associated with the transport.
174
+
175
+ .. warning::
176
+ Only call this method if the transport is NOT shared
177
+ with other clients - this may cause errors in other clients!
178
+ """
179
+ raise NotImplementedError()
180
+
181
+ @property
182
+ def generate_message(
183
+ self,
184
+ ) -> Callable[
185
+ [discuss_service.GenerateMessageRequest],
186
+ Union[
187
+ discuss_service.GenerateMessageResponse,
188
+ Awaitable[discuss_service.GenerateMessageResponse],
189
+ ],
190
+ ]:
191
+ raise NotImplementedError()
192
+
193
+ @property
194
+ def count_message_tokens(
195
+ self,
196
+ ) -> Callable[
197
+ [discuss_service.CountMessageTokensRequest],
198
+ Union[
199
+ discuss_service.CountMessageTokensResponse,
200
+ Awaitable[discuss_service.CountMessageTokensResponse],
201
+ ],
202
+ ]:
203
+ raise NotImplementedError()
204
+
205
+ @property
206
+ def list_operations(
207
+ self,
208
+ ) -> Callable[
209
+ [operations_pb2.ListOperationsRequest],
210
+ Union[
211
+ operations_pb2.ListOperationsResponse,
212
+ Awaitable[operations_pb2.ListOperationsResponse],
213
+ ],
214
+ ]:
215
+ raise NotImplementedError()
216
+
217
+ @property
218
+ def get_operation(
219
+ self,
220
+ ) -> Callable[
221
+ [operations_pb2.GetOperationRequest],
222
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
223
+ ]:
224
+ raise NotImplementedError()
225
+
226
+ @property
227
+ def kind(self) -> str:
228
+ raise NotImplementedError()
229
+
230
+
231
+ __all__ = ("DiscussServiceTransport",)
.venv/lib/python3.11/site-packages/google/ai/generativelanguage_v1beta/services/discuss_service/transports/grpc_asyncio.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright 2024 Google LLC
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import inspect
17
+ import json
18
+ import logging as std_logging
19
+ import pickle
20
+ from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
21
+ import warnings
22
+
23
+ from google.api_core import exceptions as core_exceptions
24
+ from google.api_core import gapic_v1, grpc_helpers_async
25
+ from google.api_core import retry_async as retries
26
+ from google.auth import credentials as ga_credentials # type: ignore
27
+ from google.auth.transport.grpc import SslCredentials # type: ignore
28
+ from google.longrunning import operations_pb2 # type: ignore
29
+ from google.protobuf.json_format import MessageToJson
30
+ import google.protobuf.message
31
+ import grpc # type: ignore
32
+ from grpc.experimental import aio # type: ignore
33
+ import proto # type: ignore
34
+
35
+ from google.ai.generativelanguage_v1beta.types import discuss_service
36
+
37
+ from .base import DEFAULT_CLIENT_INFO, DiscussServiceTransport
38
+ from .grpc import DiscussServiceGrpcTransport
39
+
40
+ try:
41
+ from google.api_core import client_logging # type: ignore
42
+
43
+ CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER
44
+ except ImportError: # pragma: NO COVER
45
+ CLIENT_LOGGING_SUPPORTED = False
46
+
47
+ _LOGGER = std_logging.getLogger(__name__)
48
+
49
+
50
+ class _LoggingClientAIOInterceptor(
51
+ grpc.aio.UnaryUnaryClientInterceptor
52
+ ): # pragma: NO COVER
53
+ async def intercept_unary_unary(self, continuation, client_call_details, request):
54
+ logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor(
55
+ std_logging.DEBUG
56
+ )
57
+ if logging_enabled: # pragma: NO COVER
58
+ request_metadata = client_call_details.metadata
59
+ if isinstance(request, proto.Message):
60
+ request_payload = type(request).to_json(request)
61
+ elif isinstance(request, google.protobuf.message.Message):
62
+ request_payload = MessageToJson(request)
63
+ else:
64
+ request_payload = f"{type(request).__name__}: {pickle.dumps(request)}"
65
+
66
+ request_metadata = {
67
+ key: value.decode("utf-8") if isinstance(value, bytes) else value
68
+ for key, value in request_metadata
69
+ }
70
+ grpc_request = {
71
+ "payload": request_payload,
72
+ "requestMethod": "grpc",
73
+ "metadata": dict(request_metadata),
74
+ }
75
+ _LOGGER.debug(
76
+ f"Sending request for {client_call_details.method}",
77
+ extra={
78
+ "serviceName": "google.ai.generativelanguage.v1beta.DiscussService",
79
+ "rpcName": str(client_call_details.method),
80
+ "request": grpc_request,
81
+ "metadata": grpc_request["metadata"],
82
+ },
83
+ )
84
+ response = await continuation(client_call_details, request)
85
+ if logging_enabled: # pragma: NO COVER
86
+ response_metadata = await response.trailing_metadata()
87
+ # Convert gRPC metadata `<class 'grpc.aio._metadata.Metadata'>` to list of tuples
88
+ metadata = (
89
+ dict([(k, str(v)) for k, v in response_metadata])
90
+ if response_metadata
91
+ else None
92
+ )
93
+ result = await response
94
+ if isinstance(result, proto.Message):
95
+ response_payload = type(result).to_json(result)
96
+ elif isinstance(result, google.protobuf.message.Message):
97
+ response_payload = MessageToJson(result)
98
+ else:
99
+ response_payload = f"{type(result).__name__}: {pickle.dumps(result)}"
100
+ grpc_response = {
101
+ "payload": response_payload,
102
+ "metadata": metadata,
103
+ "status": "OK",
104
+ }
105
+ _LOGGER.debug(
106
+ f"Received response to rpc {client_call_details.method}.",
107
+ extra={
108
+ "serviceName": "google.ai.generativelanguage.v1beta.DiscussService",
109
+ "rpcName": str(client_call_details.method),
110
+ "response": grpc_response,
111
+ "metadata": grpc_response["metadata"],
112
+ },
113
+ )
114
+ return response
115
+
116
+
117
+ class DiscussServiceGrpcAsyncIOTransport(DiscussServiceTransport):
118
+ """gRPC AsyncIO backend transport for DiscussService.
119
+
120
+ An API for using Generative Language Models (GLMs) in dialog
121
+ applications.
122
+ Also known as large language models (LLMs), this API provides
123
+ models that are trained for multi-turn dialog.
124
+
125
+ This class defines the same methods as the primary client, so the
126
+ primary client can load the underlying transport implementation
127
+ and call it.
128
+
129
+ It sends protocol buffers over the wire using gRPC (which is built on
130
+ top of HTTP/2); the ``grpcio`` package must be installed.
131
+ """
132
+
133
+ _grpc_channel: aio.Channel
134
+ _stubs: Dict[str, Callable] = {}
135
+
136
+ @classmethod
137
+ def create_channel(
138
+ cls,
139
+ host: str = "generativelanguage.googleapis.com",
140
+ credentials: Optional[ga_credentials.Credentials] = None,
141
+ credentials_file: Optional[str] = None,
142
+ scopes: Optional[Sequence[str]] = None,
143
+ quota_project_id: Optional[str] = None,
144
+ **kwargs,
145
+ ) -> aio.Channel:
146
+ """Create and return a gRPC AsyncIO channel object.
147
+ Args:
148
+ host (Optional[str]): The host for the channel to use.
149
+ credentials (Optional[~.Credentials]): The
150
+ authorization credentials to attach to requests. These
151
+ credentials identify this application to the service. If
152
+ none are specified, the client will attempt to ascertain
153
+ the credentials from the environment.
154
+ credentials_file (Optional[str]): A file with credentials that can
155
+ be loaded with :func:`google.auth.load_credentials_from_file`.
156
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
157
+ service. These are only used when credentials are not specified and
158
+ are passed to :func:`google.auth.default`.
159
+ quota_project_id (Optional[str]): An optional project to use for billing
160
+ and quota.
161
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
162
+ channel creation.
163
+ Returns:
164
+ aio.Channel: A gRPC AsyncIO channel object.
165
+ """
166
+
167
+ return grpc_helpers_async.create_channel(
168
+ host,
169
+ credentials=credentials,
170
+ credentials_file=credentials_file,
171
+ quota_project_id=quota_project_id,
172
+ default_scopes=cls.AUTH_SCOPES,
173
+ scopes=scopes,
174
+ default_host=cls.DEFAULT_HOST,
175
+ **kwargs,
176
+ )
177
+
178
+ def __init__(
179
+ self,
180
+ *,
181
+ host: str = "generativelanguage.googleapis.com",
182
+ credentials: Optional[ga_credentials.Credentials] = None,
183
+ credentials_file: Optional[str] = None,
184
+ scopes: Optional[Sequence[str]] = None,
185
+ channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None,
186
+ api_mtls_endpoint: Optional[str] = None,
187
+ client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
188
+ ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
189
+ client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
190
+ quota_project_id: Optional[str] = None,
191
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
192
+ always_use_jwt_access: Optional[bool] = False,
193
+ api_audience: Optional[str] = None,
194
+ ) -> None:
195
+ """Instantiate the transport.
196
+
197
+ Args:
198
+ host (Optional[str]):
199
+ The hostname to connect to (default: 'generativelanguage.googleapis.com').
200
+ credentials (Optional[google.auth.credentials.Credentials]): The
201
+ authorization credentials to attach to requests. These
202
+ credentials identify the application to the service; if none
203
+ are specified, the client will attempt to ascertain the
204
+ credentials from the environment.
205
+ This argument is ignored if a ``channel`` instance is provided.
206
+ credentials_file (Optional[str]): A file with credentials that can
207
+ be loaded with :func:`google.auth.load_credentials_from_file`.
208
+ This argument is ignored if a ``channel`` instance is provided.
209
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
210
+ service. These are only used when credentials are not specified and
211
+ are passed to :func:`google.auth.default`.
212
+ channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]):
213
+ A ``Channel`` instance through which to make calls, or a Callable
214
+ that constructs and returns one. If set to None, ``self.create_channel``
215
+ is used to create the channel. If a Callable is given, it will be called
216
+ with the same arguments as used in ``self.create_channel``.
217
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
218
+ If provided, it overrides the ``host`` argument and tries to create
219
+ a mutual TLS channel with client SSL credentials from
220
+ ``client_cert_source`` or application default SSL credentials.
221
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
222
+ Deprecated. A callback to provide client SSL certificate bytes and
223
+ private key bytes, both in PEM format. It is ignored if
224
+ ``api_mtls_endpoint`` is None.
225
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
226
+ for the grpc channel. It is ignored if a ``channel`` instance is provided.
227
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
228
+ A callback to provide client certificate bytes and private key bytes,
229
+ both in PEM format. It is used to configure a mutual TLS channel. It is
230
+ ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided.
231
+ quota_project_id (Optional[str]): An optional project to use for billing
232
+ and quota.
233
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
234
+ The client info used to send a user-agent string along with
235
+ API requests. If ``None``, then default info will be used.
236
+ Generally, you only need to set this if you're developing
237
+ your own client library.
238
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
239
+ be used for service account credentials.
240
+
241
+ Raises:
242
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
243
+ creation failed for any reason.
244
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
245
+ and ``credentials_file`` are passed.
246
+ """
247
+ self._grpc_channel = None
248
+ self._ssl_channel_credentials = ssl_channel_credentials
249
+ self._stubs: Dict[str, Callable] = {}
250
+
251
+ if api_mtls_endpoint:
252
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
253
+ if client_cert_source:
254
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
255
+
256
+ if isinstance(channel, aio.Channel):
257
+ # Ignore credentials if a channel was passed.
258
+ credentials = None
259
+ self._ignore_credentials = True
260
+ # If a channel was explicitly provided, set it.
261
+ self._grpc_channel = channel
262
+ self._ssl_channel_credentials = None
263
+ else:
264
+ if api_mtls_endpoint:
265
+ host = api_mtls_endpoint
266
+
267
+ # Create SSL credentials with client_cert_source or application
268
+ # default SSL credentials.
269
+ if client_cert_source:
270
+ cert, key = client_cert_source()
271
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
272
+ certificate_chain=cert, private_key=key
273
+ )
274
+ else:
275
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
276
+
277
+ else:
278
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
279
+ cert, key = client_cert_source_for_mtls()
280
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
281
+ certificate_chain=cert, private_key=key
282
+ )
283
+
284
+ # The base transport sets the host, credentials and scopes
285
+ super().__init__(
286
+ host=host,
287
+ credentials=credentials,
288
+ credentials_file=credentials_file,
289
+ scopes=scopes,
290
+ quota_project_id=quota_project_id,
291
+ client_info=client_info,
292
+ always_use_jwt_access=always_use_jwt_access,
293
+ api_audience=api_audience,
294
+ )
295
+
296
+ if not self._grpc_channel:
297
+ # initialize with the provided callable or the default channel
298
+ channel_init = channel or type(self).create_channel
299
+ self._grpc_channel = channel_init(
300
+ self._host,
301
+ # use the credentials which are saved
302
+ credentials=self._credentials,
303
+ # Set ``credentials_file`` to ``None`` here as
304
+ # the credentials that we saved earlier should be used.
305
+ credentials_file=None,
306
+ scopes=self._scopes,
307
+ ssl_credentials=self._ssl_channel_credentials,
308
+ quota_project_id=quota_project_id,
309
+ options=[
310
+ ("grpc.max_send_message_length", -1),
311
+ ("grpc.max_receive_message_length", -1),
312
+ ],
313
+ )
314
+
315
+ self._interceptor = _LoggingClientAIOInterceptor()
316
+ self._grpc_channel._unary_unary_interceptors.append(self._interceptor)
317
+ self._logged_channel = self._grpc_channel
318
+ self._wrap_with_kind = (
319
+ "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters
320
+ )
321
+ # Wrap messages. This must be done after self._logged_channel exists
322
+ self._prep_wrapped_messages(client_info)
323
+
324
+ @property
325
+ def grpc_channel(self) -> aio.Channel:
326
+ """Create the channel designed to connect to this service.
327
+
328
+ This property caches on the instance; repeated calls return
329
+ the same channel.
330
+ """
331
+ # Return the channel from cache.
332
+ return self._grpc_channel
333
+
334
+ @property
335
+ def generate_message(
336
+ self,
337
+ ) -> Callable[
338
+ [discuss_service.GenerateMessageRequest],
339
+ Awaitable[discuss_service.GenerateMessageResponse],
340
+ ]:
341
+ r"""Return a callable for the generate message method over gRPC.
342
+
343
+ Generates a response from the model given an input
344
+ ``MessagePrompt``.
345
+
346
+ Returns:
347
+ Callable[[~.GenerateMessageRequest],
348
+ Awaitable[~.GenerateMessageResponse]]:
349
+ A function that, when called, will call the underlying RPC
350
+ on the server.
351
+ """
352
+ # Generate a "stub function" on-the-fly which will actually make
353
+ # the request.
354
+ # gRPC handles serialization and deserialization, so we just need
355
+ # to pass in the functions for each.
356
+ if "generate_message" not in self._stubs:
357
+ self._stubs["generate_message"] = self._logged_channel.unary_unary(
358
+ "/google.ai.generativelanguage.v1beta.DiscussService/GenerateMessage",
359
+ request_serializer=discuss_service.GenerateMessageRequest.serialize,
360
+ response_deserializer=discuss_service.GenerateMessageResponse.deserialize,
361
+ )
362
+ return self._stubs["generate_message"]
363
+
364
+ @property
365
+ def count_message_tokens(
366
+ self,
367
+ ) -> Callable[
368
+ [discuss_service.CountMessageTokensRequest],
369
+ Awaitable[discuss_service.CountMessageTokensResponse],
370
+ ]:
371
+ r"""Return a callable for the count message tokens method over gRPC.
372
+
373
+ Runs a model's tokenizer on a string and returns the
374
+ token count.
375
+
376
+ Returns:
377
+ Callable[[~.CountMessageTokensRequest],
378
+ Awaitable[~.CountMessageTokensResponse]]:
379
+ A function that, when called, will call the underlying RPC
380
+ on the server.
381
+ """
382
+ # Generate a "stub function" on-the-fly which will actually make
383
+ # the request.
384
+ # gRPC handles serialization and deserialization, so we just need
385
+ # to pass in the functions for each.
386
+ if "count_message_tokens" not in self._stubs:
387
+ self._stubs["count_message_tokens"] = self._logged_channel.unary_unary(
388
+ "/google.ai.generativelanguage.v1beta.DiscussService/CountMessageTokens",
389
+ request_serializer=discuss_service.CountMessageTokensRequest.serialize,
390
+ response_deserializer=discuss_service.CountMessageTokensResponse.deserialize,
391
+ )
392
+ return self._stubs["count_message_tokens"]
393
+
394
+ def _prep_wrapped_messages(self, client_info):
395
+ """Precompute the wrapped methods, overriding the base class method to use async wrappers."""
396
+ self._wrapped_methods = {
397
+ self.generate_message: self._wrap_method(
398
+ self.generate_message,
399
+ default_retry=retries.AsyncRetry(
400
+ initial=1.0,
401
+ maximum=10.0,
402
+ multiplier=1.3,
403
+ predicate=retries.if_exception_type(
404
+ core_exceptions.ServiceUnavailable,
405
+ ),
406
+ deadline=60.0,
407
+ ),
408
+ default_timeout=60.0,
409
+ client_info=client_info,
410
+ ),
411
+ self.count_message_tokens: self._wrap_method(
412
+ self.count_message_tokens,
413
+ default_retry=retries.AsyncRetry(
414
+ initial=1.0,
415
+ maximum=10.0,
416
+ multiplier=1.3,
417
+ predicate=retries.if_exception_type(
418
+ core_exceptions.ServiceUnavailable,
419
+ ),
420
+ deadline=60.0,
421
+ ),
422
+ default_timeout=60.0,
423
+ client_info=client_info,
424
+ ),
425
+ self.get_operation: self._wrap_method(
426
+ self.get_operation,
427
+ default_timeout=None,
428
+ client_info=client_info,
429
+ ),
430
+ self.list_operations: self._wrap_method(
431
+ self.list_operations,
432
+ default_timeout=None,
433
+ client_info=client_info,
434
+ ),
435
+ }
436
+
437
+ def _wrap_method(self, func, *args, **kwargs):
438
+ if self._wrap_with_kind: # pragma: NO COVER
439
+ kwargs["kind"] = self.kind
440
+ return gapic_v1.method_async.wrap_method(func, *args, **kwargs)
441
+
442
+ def close(self):
443
+ return self._logged_channel.close()
444
+
445
+ @property
446
+ def kind(self) -> str:
447
+ return "grpc_asyncio"
448
+
449
+ @property
450
+ def get_operation(
451
+ self,
452
+ ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
453
+ r"""Return a callable for the get_operation method over gRPC."""
454
+ # Generate a "stub function" on-the-fly which will actually make
455
+ # the request.
456
+ # gRPC handles serialization and deserialization, so we just need
457
+ # to pass in the functions for each.
458
+ if "get_operation" not in self._stubs:
459
+ self._stubs["get_operation"] = self._logged_channel.unary_unary(
460
+ "/google.longrunning.Operations/GetOperation",
461
+ request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
462
+ response_deserializer=operations_pb2.Operation.FromString,
463
+ )
464
+ return self._stubs["get_operation"]
465
+
466
+ @property
467
+ def list_operations(
468
+ self,
469
+ ) -> Callable[
470
+ [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
471
+ ]:
472
+ r"""Return a callable for the list_operations method over gRPC."""
473
+ # Generate a "stub function" on-the-fly which will actually make
474
+ # the request.
475
+ # gRPC handles serialization and deserialization, so we just need
476
+ # to pass in the functions for each.
477
+ if "list_operations" not in self._stubs:
478
+ self._stubs["list_operations"] = self._logged_channel.unary_unary(
479
+ "/google.longrunning.Operations/ListOperations",
480
+ request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
481
+ response_deserializer=operations_pb2.ListOperationsResponse.FromString,
482
+ )
483
+ return self._stubs["list_operations"]
484
+
485
+
486
+ __all__ = ("DiscussServiceGrpcAsyncIOTransport",)