diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__init__.py b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4db32a4cb0afe86c0302184f7e8b2c7c053ff5f3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__init__.py @@ -0,0 +1,40 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Package for interacting with the google.longrunning.operations meta-API.""" + +from google.api_core.operations_v1.abstract_operations_client import AbstractOperationsClient +from google.api_core.operations_v1.operations_async_client import OperationsAsyncClient +from google.api_core.operations_v1.operations_client import OperationsClient +from google.api_core.operations_v1.transports.rest import OperationsRestTransport + +__all__ = [ + "AbstractOperationsClient", + "OperationsAsyncClient", + "OperationsClient", + "OperationsRestTransport" +] + +try: + from google.api_core.operations_v1.transports.rest_asyncio import ( + AsyncOperationsRestTransport, + ) + from google.api_core.operations_v1.operations_rest_client_async import AsyncOperationsRestClient + + __all__ += ["AsyncOperationsRestClient", "AsyncOperationsRestTransport"] +except ImportError: + # This import requires the `async_rest` extra. + # Don't raise an exception if `AsyncOperationsRestTransport` cannot be imported + # as other transports are still available. + pass diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/abstract_operations_base_client.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/abstract_operations_base_client.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7ab2e5b0cf29cc4ebe4e6b28c5d470efca87b56 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/abstract_operations_base_client.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/abstract_operations_client.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/abstract_operations_client.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e3fe91e87db8749985ba37cecf6c13ec4865b76 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/abstract_operations_client.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_async_client.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_async_client.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0eb1cad7458bbd41e758c7ebf44387198205158 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_async_client.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_client.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_client.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b31dcf998d8cb69d310efc24ad2b9f61e8bd33c3 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_client.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_client_config.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_client_config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2953d294bde6ad48e8607b3d712631681aaa0516 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_client_config.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_rest_client_async.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_rest_client_async.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f397d737dea28f01b5fc0351ab2432c53b9a2d1 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/operations_rest_client_async.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2c215a49c9b3b78f9a2fb4546f7bde6521c6b5d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers_async.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers_async.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5b70c8eaef9814efc39deac5feb015eb3e66b06 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers_async.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers_base.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers_base.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6aa5aa3e0f2040f628572357e5f07decbd5daf5 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/__pycache__/pagers_base.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/abstract_operations_base_client.py b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/abstract_operations_base_client.py new file mode 100644 index 0000000000000000000000000000000000000000..160c2a88f45cb1856e5e7b76b05132adeef59d59 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/abstract_operations_base_client.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Type, Union + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core.operations_v1.transports.base import ( + DEFAULT_CLIENT_INFO, + OperationsTransport, +) +from google.api_core.operations_v1.transports.rest import OperationsRestTransport + +try: + from google.api_core.operations_v1.transports.rest_asyncio import ( + AsyncOperationsRestTransport, + ) + + HAS_ASYNC_REST_DEPENDENCIES = True +except ImportError as e: + HAS_ASYNC_REST_DEPENDENCIES = False + ASYNC_REST_EXCEPTION = e + +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.auth.transport import mtls # type: ignore + + +class AbstractOperationsBaseClientMeta(type): + """Metaclass for the Operations Base client. + + This provides base class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[OperationsTransport]] + _transport_registry["rest"] = OperationsRestTransport + if HAS_ASYNC_REST_DEPENDENCIES: + _transport_registry["rest_asyncio"] = AsyncOperationsRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[OperationsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if ( + label == "rest_asyncio" and not HAS_ASYNC_REST_DEPENDENCIES + ): # pragma: NO COVER + raise ASYNC_REST_EXCEPTION + + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AbstractOperationsBaseClient(metaclass=AbstractOperationsBaseClientMeta): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be + designed to return [Operation][google.api_core.operations_v1.Operation] to the + client, and the client can use this interface to receive the real + response asynchronously by polling the operation resource, or pass + the operation resource to another API (such as Google Cloud Pub/Sub + API) to receive the response. Any API service that returns + long-running operations should implement the ``Operations`` + interface so developers can have a consistent client experience. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "longrunning.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """ + This class method should be overridden by the subclasses. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Raises: + NotImplementedError: If the method is called on the base class. + """ + raise NotImplementedError("`from_service_account_info` is not implemented.") + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """ + This class method should be overridden by the subclasses. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Raises: + NotImplementedError: If the method is called on the base class. + """ + raise NotImplementedError("`from_service_account_file` is not implemented.") + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> OperationsTransport: + """Returns the transport used by the client instance. + + Returns: + OperationsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, OperationsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the operations client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, OperationsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + client_cert_source_func = None + is_mtls = False + if use_client_cert == "true": + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, OperationsTransport): + # transport is a OperationsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/operations_client.py b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/operations_client.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d3fd55c783fefb0af9795728e0b0ab76c325ad --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/operations_client.py @@ -0,0 +1,378 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A client for the google.longrunning.operations meta-API. + +This is a client that deals with long-running operations that follow the +pattern outlined by the `Google API Style Guide`_. + +When an API method normally takes long time to complete, it can be designed to +return ``Operation`` to the client, and the client can use this interface to +receive the real response asynchronously by polling the operation resource to +receive the response. + +It is not a separate service, but rather an interface implemented by a larger +service. The protocol-level definition is available at +`google/longrunning/operations.proto`_. Typically, this will be constructed +automatically by another client class to deal with operations. + +.. _Google API Style Guide: + https://cloud.google.com/apis/design/design_pattern + s#long_running_operations +.. _google/longrunning/operations.proto: + https://github.com/googleapis/googleapis/blob/master/google/longrunning + /operations.proto +""" + +import functools + +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import page_iterator +from google.api_core import retry as retries +from google.api_core import timeout as timeouts +from google.longrunning import operations_pb2 +from grpc import Compression + + +class OperationsClient(object): + """Client for interacting with long-running operations within a service. + + Args: + channel (grpc.Channel): The gRPC channel associated with the service + that implements the ``google.longrunning.operations`` interface. + client_config (dict): + A dictionary of call options for each method. If not specified + the default configuration is used. + """ + + def __init__(self, channel, client_config=None): + # Create the gRPC client stub. + self.operations_stub = operations_pb2.OperationsStub(channel) + + default_retry = retries.Retry( + initial=0.1, # seconds + maximum=60.0, # seconds + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + timeout=600.0, # seconds + ) + default_timeout = timeouts.TimeToDeadlineTimeout(timeout=600.0) + + default_compression = Compression.NoCompression + + self._get_operation = gapic_v1.method.wrap_method( + self.operations_stub.GetOperation, + default_retry=default_retry, + default_timeout=default_timeout, + default_compression=default_compression, + ) + + self._list_operations = gapic_v1.method.wrap_method( + self.operations_stub.ListOperations, + default_retry=default_retry, + default_timeout=default_timeout, + default_compression=default_compression, + ) + + self._cancel_operation = gapic_v1.method.wrap_method( + self.operations_stub.CancelOperation, + default_retry=default_retry, + default_timeout=default_timeout, + default_compression=default_compression, + ) + + self._delete_operation = gapic_v1.method.wrap_method( + self.operations_stub.DeleteOperation, + default_retry=default_retry, + default_timeout=default_timeout, + default_compression=default_compression, + ) + + # Service calls + def get_operation( + self, + name, + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + compression=gapic_v1.method.DEFAULT, + metadata=None, + ): + """Gets the latest state of a long-running operation. + + Clients can use this method to poll the operation result at intervals + as recommended by the API service. + + Example: + >>> from google.api_core import operations_v1 + >>> api = operations_v1.OperationsClient() + >>> name = '' + >>> response = api.get_operation(name) + + Args: + name (str): The name of the operation resource. + retry (google.api_core.retry.Retry): The retry strategy to use + when invoking the RPC. If unspecified, the default retry from + the client configuration will be used. If ``None``, then this + method will not retry the RPC at all. + timeout (float): The amount of time in seconds to wait for the RPC + to complete. Note that if ``retry`` is used, this timeout + applies to each individual attempt and the overall time it + takes for this method to complete may be longer. If + unspecified, the the default timeout in the client + configuration is used. If ``None``, then the RPC method will + not time out. + compression (grpc.Compression): An element of grpc.compression + e.g. grpc.compression.Gzip. + metadata (Optional[List[Tuple[str, str]]]): + Additional gRPC metadata. + + Returns: + google.longrunning.operations_pb2.Operation: The state of the + operation. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If an error occurred + while invoking the RPC, the appropriate ``GoogleAPICallError`` + subclass will be raised. + """ + request = operations_pb2.GetOperationRequest(name=name) + + # Add routing header + metadata = metadata or [] + metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name})) + + return self._get_operation( + request, + retry=retry, + timeout=timeout, + compression=compression, + metadata=metadata, + ) + + def list_operations( + self, + name, + filter_, + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + compression=gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists operations that match the specified filter in the request. + + Example: + >>> from google.api_core import operations_v1 + >>> api = operations_v1.OperationsClient() + >>> name = '' + >>> + >>> # Iterate over all results + >>> for operation in api.list_operations(name): + >>> # process operation + >>> pass + >>> + >>> # Or iterate over results one page at a time + >>> iter = api.list_operations(name) + >>> for page in iter.pages: + >>> for operation in page: + >>> # process operation + >>> pass + + Args: + name (str): The name of the operation collection. + filter_ (str): The standard list filter. + retry (google.api_core.retry.Retry): The retry strategy to use + when invoking the RPC. If unspecified, the default retry from + the client configuration will be used. If ``None``, then this + method will not retry the RPC at all. + timeout (float): The amount of time in seconds to wait for the RPC + to complete. Note that if ``retry`` is used, this timeout + applies to each individual attempt and the overall time it + takes for this method to complete may be longer. If + unspecified, the the default timeout in the client + configuration is used. If ``None``, then the RPC method will + not time out. + compression (grpc.Compression): An element of grpc.compression + e.g. grpc.compression.Gzip. + metadata (Optional[List[Tuple[str, str]]]): Additional gRPC + metadata. + + Returns: + google.api_core.page_iterator.Iterator: An iterator that yields + :class:`google.longrunning.operations_pb2.Operation` instances. + + Raises: + google.api_core.exceptions.MethodNotImplemented: If the server + does not support this method. Services are not required to + implement this method. + google.api_core.exceptions.GoogleAPICallError: If an error occurred + while invoking the RPC, the appropriate ``GoogleAPICallError`` + subclass will be raised. + """ + # Create the request object. + request = operations_pb2.ListOperationsRequest(name=name, filter=filter_) + + # Add routing header + metadata = metadata or [] + metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name})) + + # Create the method used to fetch pages + method = functools.partial( + self._list_operations, + retry=retry, + timeout=timeout, + compression=compression, + metadata=metadata, + ) + + iterator = page_iterator.GRPCIterator( + client=None, + method=method, + request=request, + items_field="operations", + request_token_field="page_token", + response_token_field="next_page_token", + ) + + return iterator + + def cancel_operation( + self, + name, + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + compression=gapic_v1.method.DEFAULT, + metadata=None, + ): + """Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success is + not guaranteed. Clients can use :meth:`get_operation` or service- + specific methods to check whether the cancellation succeeded or whether + the operation completed despite cancellation. On successful + cancellation, the operation is not deleted; instead, it becomes an + operation with an ``Operation.error`` value with a + ``google.rpc.Status.code`` of ``1``, corresponding to + ``Code.CANCELLED``. + + Example: + >>> from google.api_core import operations_v1 + >>> api = operations_v1.OperationsClient() + >>> name = '' + >>> api.cancel_operation(name) + + Args: + name (str): The name of the operation resource to be cancelled. + retry (google.api_core.retry.Retry): The retry strategy to use + when invoking the RPC. If unspecified, the default retry from + the client configuration will be used. If ``None``, then this + method will not retry the RPC at all. + timeout (float): The amount of time in seconds to wait for the RPC + to complete. Note that if ``retry`` is used, this timeout + applies to each individual attempt and the overall time it + takes for this method to complete may be longer. If + unspecified, the the default timeout in the client + configuration is used. If ``None``, then the RPC method will + not time out. + compression (grpc.Compression): An element of grpc.compression + e.g. grpc.compression.Gzip. + metadata (Optional[List[Tuple[str, str]]]): Additional gRPC + metadata. + + Raises: + google.api_core.exceptions.MethodNotImplemented: If the server + does not support this method. Services are not required to + implement this method. + google.api_core.exceptions.GoogleAPICallError: If an error occurred + while invoking the RPC, the appropriate ``GoogleAPICallError`` + subclass will be raised. + """ + # Create the request object. + request = operations_pb2.CancelOperationRequest(name=name) + + # Add routing header + metadata = metadata or [] + metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name})) + + self._cancel_operation( + request, + retry=retry, + timeout=timeout, + compression=compression, + metadata=metadata, + ) + + def delete_operation( + self, + name, + retry=gapic_v1.method.DEFAULT, + timeout=gapic_v1.method.DEFAULT, + compression=gapic_v1.method.DEFAULT, + metadata=None, + ): + """Deletes a long-running operation. + + This method indicates that the client is no longer interested in the + operation result. It does not cancel the operation. + + Example: + >>> from google.api_core import operations_v1 + >>> api = operations_v1.OperationsClient() + >>> name = '' + >>> api.delete_operation(name) + + Args: + name (str): The name of the operation resource to be deleted. + retry (google.api_core.retry.Retry): The retry strategy to use + when invoking the RPC. If unspecified, the default retry from + the client configuration will be used. If ``None``, then this + method will not retry the RPC at all. + timeout (float): The amount of time in seconds to wait for the RPC + to complete. Note that if ``retry`` is used, this timeout + applies to each individual attempt and the overall time it + takes for this method to complete may be longer. If + unspecified, the the default timeout in the client + configuration is used. If ``None``, then the RPC method will + not time out. + compression (grpc.Compression): An element of grpc.compression + e.g. grpc.compression.Gzip. + metadata (Optional[List[Tuple[str, str]]]): Additional gRPC + metadata. + + Raises: + google.api_core.exceptions.MethodNotImplemented: If the server + does not support this method. Services are not required to + implement this method. + google.api_core.exceptions.GoogleAPICallError: If an error occurred + while invoking the RPC, the appropriate ``GoogleAPICallError`` + subclass will be raised. + """ + # Create the request object. + request = operations_pb2.DeleteOperationRequest(name=name) + + # Add routing header + metadata = metadata or [] + metadata.append(gapic_v1.routing_header.to_grpc_metadata({"name": name})) + + self._delete_operation( + request, + retry=retry, + timeout=timeout, + compression=compression, + metadata=metadata, + ) diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__init__.py b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8c24ce6efa236a846f7315d1ef9febc6b1a27751 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import cast, Dict, Tuple + +from .base import OperationsTransport +from .rest import OperationsRestTransport + +# Compile a registry of transports. +_transport_registry: Dict[str, OperationsTransport] = OrderedDict() +_transport_registry["rest"] = cast(OperationsTransport, OperationsRestTransport) + +__all__: Tuple[str, ...] = ("OperationsTransport", "OperationsRestTransport") + +try: + from .rest_asyncio import AsyncOperationsRestTransport + + __all__ += ("AsyncOperationsRestTransport",) + _transport_registry["rest_asyncio"] = cast( + OperationsTransport, AsyncOperationsRestTransport + ) +except ImportError: + # This import requires the `async_rest` extra. + # Don't raise an exception if `AsyncOperationsRestTransport` cannot be imported + # as other transports are still available. + pass diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..490d38afc1109f380db767893bb3498d4dd005da Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/base.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..557a796ba934ed10d3e9383bb70dbccf33f1af00 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/base.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/rest.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/rest.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4eda880120d7bf46f42feaa9828d1cfc4357af1 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/rest.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/rest_asyncio.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/rest_asyncio.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70422a5d802369c73eeb9941b479a9f2ac95025e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/__pycache__/rest_asyncio.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/base.py b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/base.py new file mode 100644 index 0000000000000000000000000000000000000000..50e137612cfaa7c050653e29fa0002b7994d0814 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/base.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +import re +from typing import Awaitable, Callable, Optional, Sequence, Union + +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import version +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account # type: ignore +import google.protobuf +from google.protobuf import empty_pb2, json_format # type: ignore +from grpc import Compression + + +PROTOBUF_VERSION = google.protobuf.__version__ + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=version.__version__, +) + + +class OperationsTransport(abc.ABC): + """Abstract transport class for Operations.""" + + AUTH_SCOPES = () + + DEFAULT_HOST: str = "longrunning.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + # TODO(https://github.com/googleapis/python-api-core/issues/709): update type hint for credentials to include `google.auth.aio.Credentials`. + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme="https", + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" # pragma: NO COVER + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_retry=retries.Retry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + default_compression=Compression.NoCompression, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_retry=retries.Retry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + default_compression=Compression.NoCompression, + client_info=client_info, + ), + self.delete_operation: gapic_v1.method.wrap_method( + self.delete_operation, + default_retry=retries.Retry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + default_compression=Compression.NoCompression, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_retry=retries.Retry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + default_compression=Compression.NoCompression, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + def _convert_protobuf_message_to_dict( + self, message: google.protobuf.message.Message + ): + r"""Converts protobuf message to a dictionary. + + When the dictionary is encoded to JSON, it conforms to proto3 JSON spec. + + Args: + message(google.protobuf.message.Message): The protocol buffers message + instance to serialize. + + Returns: + A dict representation of the protocol buffer message. + """ + # TODO(https://github.com/googleapis/python-api-core/issues/643): For backwards compatibility + # with protobuf 3.x 4.x, Remove once support for protobuf 3.x and 4.x is dropped. + if PROTOBUF_VERSION[0:2] in ["3.", "4."]: + result = json_format.MessageToDict( + message, + preserving_proto_field_name=True, + including_default_value_fields=True, # type: ignore # backward compatibility + ) + else: + result = json_format.MessageToDict( + message, + preserving_proto_field_name=True, + always_print_fields_with_no_presence=True, + ) + + return result + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("OperationsTransport",) diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/rest.py b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/rest.py new file mode 100644 index 0000000000000000000000000000000000000000..766a6685bcae7f0e5b9fda49c689d963560477e5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/rest.py @@ -0,0 +1,473 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from requests import __version__ as requests_version + +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import path_template # type: ignore +from google.api_core import rest_helpers # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.requests import AuthorizedSession # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import json_format # type: ignore +import google.protobuf + +import grpc +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, OperationsTransport + +PROTOBUF_VERSION = google.protobuf.__version__ + +OptionalRetry = Union[retries.Retry, object] + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"requests@{requests_version}", +) + + +class OperationsRestTransport(OperationsTransport): + """REST backend transport for Operations. + + Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be + designed to return [Operation][google.api_core.operations_v1.Operation] to the + client, and the client can use this interface to receive the real + response asynchronously by polling the operation resource, or pass + the operation resource to another API (such as Google Cloud Pub/Sub + API) to receive the response. Any API service that returns + long-running operations should implement the ``Operations`` + interface so developers can have a consistent client experience. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "longrunning.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + http_options: Optional[Dict] = None, + path_prefix: str = "v1", + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + http_options: a dictionary of http_options for transcoding, to override + the defaults from operations.proto. Each method has an entry + with the corresponding http rules as value. + path_prefix: path prefix (usually represents API version). Set to + "v1" by default. + + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + # TODO(https://github.com/googleapis/python-api-core/issues/720): Add wrap logic directly to the property methods for callables. + self._prep_wrapped_messages(client_info) + self._http_options = http_options or {} + self._path_prefix = path_prefix + + def _list_operations( + self, + request: operations_pb2.ListOperationsRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/723): Leverage `retry` + # to allow configuring retryable error codes. + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + compression: Optional[grpc.Compression] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Call the list operations method over HTTP. + + Args: + request (~.operations_pb2.ListOperationsRequest): + The request object. The request message for + [Operations.ListOperations][google.api_core.operations_v1.Operations.ListOperations]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.ListOperationsResponse: + The response message for + [Operations.ListOperations][google.api_core.operations_v1.Operations.ListOperations]. + + """ + + http_options = [ + { + "method": "get", + "uri": "/{}/{{name=**}}/operations".format(self._path_prefix), + }, + ] + if "google.longrunning.Operations.ListOperations" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.ListOperations" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.ListOperationsRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + api_response = operations_pb2.ListOperationsResponse() + json_format.Parse(response.content, api_response, ignore_unknown_fields=False) + return api_response + + def _get_operation( + self, + request: operations_pb2.GetOperationRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/723): Leverage `retry` + # to allow configuring retryable error codes. + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + compression: Optional[grpc.Compression] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the get operation method over HTTP. + + Args: + request (~.operations_pb2.GetOperationRequest): + The request object. The request message for + [Operations.GetOperation][google.api_core.operations_v1.Operations.GetOperation]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a long- + running operation that is the result of a + network API call. + + """ + + http_options = [ + { + "method": "get", + "uri": "/{}/{{name=**/operations/*}}".format(self._path_prefix), + }, + ] + if "google.longrunning.Operations.GetOperation" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.GetOperation" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.GetOperationRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + api_response = operations_pb2.Operation() + json_format.Parse(response.content, api_response, ignore_unknown_fields=False) + return api_response + + def _delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/723): Leverage `retry` + # to allow configuring retryable error codes. + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + compression: Optional[grpc.Compression] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> empty_pb2.Empty: + r"""Call the delete operation method over HTTP. + + Args: + request (~.operations_pb2.DeleteOperationRequest): + The request object. The request message for + [Operations.DeleteOperation][google.api_core.operations_v1.Operations.DeleteOperation]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options = [ + { + "method": "delete", + "uri": "/{}/{{name=**/operations/*}}".format(self._path_prefix), + }, + ] + if "google.longrunning.Operations.DeleteOperation" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.DeleteOperation" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.DeleteOperationRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return empty_pb2.Empty() + + def _cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/723): Leverage `retry` + # to allow configuring retryable error codes. + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + compression: Optional[grpc.Compression] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> empty_pb2.Empty: + r"""Call the cancel operation method over HTTP. + + Args: + request (~.operations_pb2.CancelOperationRequest): + The request object. The request message for + [Operations.CancelOperation][google.api_core.operations_v1.Operations.CancelOperation]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options = [ + { + "method": "post", + "uri": "/{}/{{name=**/operations/*}}:cancel".format(self._path_prefix), + "body": "*", + }, + ] + if "google.longrunning.Operations.CancelOperation" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.CancelOperation" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + # Jsonify the request body + body_request = operations_pb2.CancelOperationRequest() + json_format.ParseDict(transcoded_request["body"], body_request) + body = json_format.MessageToDict( + body_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.CancelOperationRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return empty_pb2.Empty() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + return self._list_operations + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + return self._get_operation + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], empty_pb2.Empty]: + return self._delete_operation + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], empty_pb2.Empty]: + return self._cancel_operation + + +__all__ = ("OperationsRestTransport",) diff --git a/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/rest_asyncio.py b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/rest_asyncio.py new file mode 100644 index 0000000000000000000000000000000000000000..71c20eb8ad4f8ccfd9077af772872718041e33ad --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/api_core/operations_v1/transports/rest_asyncio.py @@ -0,0 +1,560 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import json +from typing import Any, Callable, Coroutine, Dict, Optional, Sequence, Tuple + +from google.auth import __version__ as auth_version + +try: + from google.auth.aio.transport.sessions import AsyncAuthorizedSession # type: ignore +except ImportError as e: # pragma: NO COVER + raise ImportError( + "The `async_rest` extra of `google-api-core` is required to use long-running operations. Install it by running " + "`pip install google-api-core[async_rest]`." + ) from e + +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import path_template # type: ignore +from google.api_core import rest_helpers # type: ignore +from google.api_core import retry_async as retries_async # type: ignore +from google.auth.aio import credentials as ga_credentials_async # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import json_format # type: ignore + +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, OperationsTransport + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=f"google-auth@{auth_version}", +) + + +class AsyncOperationsRestTransport(OperationsTransport): + """Asynchronous REST backend transport for Operations. + + Manages async long-running operations with an API service. + + When an API method normally takes long time to complete, it can be + designed to return [Operation][google.api_core.operations_v1.Operation] to the + client, and the client can use this interface to receive the real + response asynchronously by polling the operation resource, or pass + the operation resource to another API (such as Google Cloud Pub/Sub + API) to receive the response. Any API service that returns + long-running operations should implement the ``Operations`` + interface so developers can have a consistent client experience. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "longrunning.googleapis.com", + credentials: Optional[ga_credentials_async.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + http_options: Optional[Dict] = None, + path_prefix: str = "v1", + # TODO(https://github.com/googleapis/python-api-core/issues/715): Add docstring for `credentials_file` to async REST transport. + # TODO(https://github.com/googleapis/python-api-core/issues/716): Add docstring for `scopes` to async REST transport. + # TODO(https://github.com/googleapis/python-api-core/issues/717): Add docstring for `quota_project_id` to async REST transport. + # TODO(https://github.com/googleapis/python-api-core/issues/718): Add docstring for `client_cert_source` to async REST transport. + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.aio.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + http_options: a dictionary of http_options for transcoding, to override + the defaults from operations.proto. Each method has an entry + with the corresponding http rules as value. + path_prefix: path prefix (usually represents API version). Set to + "v1" by default. + + """ + unsupported_params = { + # TODO(https://github.com/googleapis/python-api-core/issues/715): Add support for `credentials_file` to async REST transport. + "google.api_core.client_options.ClientOptions.credentials_file": credentials_file, + # TODO(https://github.com/googleapis/python-api-core/issues/716): Add support for `scopes` to async REST transport. + "google.api_core.client_options.ClientOptions.scopes": scopes, + # TODO(https://github.com/googleapis/python-api-core/issues/717): Add support for `quota_project_id` to async REST transport. + "google.api_core.client_options.ClientOptions.quota_project_id": quota_project_id, + # TODO(https://github.com/googleapis/python-api-core/issues/718): Add support for `client_cert_source` to async REST transport. + "google.api_core.client_options.ClientOptions.client_cert_source": client_cert_source_for_mtls, + # TODO(https://github.com/googleapis/python-api-core/issues/718): Add support for `client_cert_source` to async REST transport. + "google.api_core.client_options.ClientOptions.client_cert_source": client_cert_source_for_mtls, + } + provided_unsupported_params = [ + name for name, value in unsupported_params.items() if value is not None + ] + if provided_unsupported_params: + raise core_exceptions.AsyncRestUnsupportedParameterError( + f"The following provided parameters are not supported for `transport=rest_asyncio`: {', '.join(provided_unsupported_params)}" + ) + + super().__init__( + host=host, + # TODO(https://github.com/googleapis/python-api-core/issues/709): Remove `type: ignore` when the linked issue is resolved. + credentials=credentials, # type: ignore + client_info=client_info, + # TODO(https://github.com/googleapis/python-api-core/issues/725): Set always_use_jwt_access token when supported. + always_use_jwt_access=False, + ) + # TODO(https://github.com/googleapis/python-api-core/issues/708): add support for + # `default_host` in AsyncAuthorizedSession for feature parity with the synchronous + # code. + # TODO(https://github.com/googleapis/python-api-core/issues/709): Remove `type: ignore` when the linked issue is resolved. + self._session = AsyncAuthorizedSession(self._credentials) # type: ignore + # TODO(https://github.com/googleapis/python-api-core/issues/720): Add wrap logic directly to the property methods for callables. + self._prep_wrapped_messages(client_info) + self._http_options = http_options or {} + self._path_prefix = path_prefix + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_operations: gapic_v1.method_async.wrap_method( + self.list_operations, + default_retry=retries_async.AsyncRetry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries_async.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + client_info=client_info, + kind="rest_asyncio", + ), + self.get_operation: gapic_v1.method_async.wrap_method( + self.get_operation, + default_retry=retries_async.AsyncRetry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries_async.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + client_info=client_info, + kind="rest_asyncio", + ), + self.delete_operation: gapic_v1.method_async.wrap_method( + self.delete_operation, + default_retry=retries_async.AsyncRetry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries_async.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + client_info=client_info, + kind="rest_asyncio", + ), + self.cancel_operation: gapic_v1.method_async.wrap_method( + self.cancel_operation, + default_retry=retries_async.AsyncRetry( + initial=0.5, + maximum=10.0, + multiplier=2.0, + predicate=retries_async.if_exception_type( + core_exceptions.ServiceUnavailable, + ), + deadline=10.0, + ), + default_timeout=10.0, + client_info=client_info, + kind="rest_asyncio", + ), + } + + async def _list_operations( + self, + request: operations_pb2.ListOperationsRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/722): Leverage `retry` + # to allow configuring retryable error codes. + retry=gapic_v1.method_async.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Asynchronously call the list operations method over HTTP. + + Args: + request (~.operations_pb2.ListOperationsRequest): + The request object. The request message for + [Operations.ListOperations][google.api_core.operations_v1.Operations.ListOperations]. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.ListOperationsResponse: + The response message for + [Operations.ListOperations][google.api_core.operations_v1.Operations.ListOperations]. + + """ + + http_options = [ + { + "method": "get", + "uri": "/{}/{{name=**}}/operations".format(self._path_prefix), + }, + ] + if "google.longrunning.Operations.ListOperations" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.ListOperations" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.ListOperationsRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = await getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + content = await response.read() + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format(host=self._host, uri=uri) + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + api_response = operations_pb2.ListOperationsResponse() + json_format.Parse(content, api_response, ignore_unknown_fields=False) + return api_response + + async def _get_operation( + self, + request: operations_pb2.GetOperationRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/722): Leverage `retry` + # to allow configuring retryable error codes. + retry=gapic_v1.method_async.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Asynchronously call the get operation method over HTTP. + + Args: + request (~.operations_pb2.GetOperationRequest): + The request object. The request message for + [Operations.GetOperation][google.api_core.operations_v1.Operations.GetOperation]. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a long- + running operation that is the result of a + network API call. + + """ + + http_options = [ + { + "method": "get", + "uri": "/{}/{{name=**/operations/*}}".format(self._path_prefix), + }, + ] + if "google.longrunning.Operations.GetOperation" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.GetOperation" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.GetOperationRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = await getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + content = await response.read() + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format(host=self._host, uri=uri) + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + # Return the response + api_response = operations_pb2.Operation() + json_format.Parse(content, api_response, ignore_unknown_fields=False) + return api_response + + async def _delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/722): Leverage `retry` + # to allow configuring retryable error codes. + retry=gapic_v1.method_async.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> empty_pb2.Empty: + r"""Asynchronously call the delete operation method over HTTP. + + Args: + request (~.operations_pb2.DeleteOperationRequest): + The request object. The request message for + [Operations.DeleteOperation][google.api_core.operations_v1.Operations.DeleteOperation]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options = [ + { + "method": "delete", + "uri": "/{}/{{name=**/operations/*}}".format(self._path_prefix), + }, + ] + if "google.longrunning.Operations.DeleteOperation" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.DeleteOperation" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.DeleteOperationRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = await getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format(host=self._host, uri=uri) + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return empty_pb2.Empty() + + async def _cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + *, + # TODO(https://github.com/googleapis/python-api-core/issues/722): Leverage `retry` + # to allow configuring retryable error codes. + retry=gapic_v1.method_async.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + # TODO(https://github.com/googleapis/python-api-core/issues/722): Add `retry` parameter + # to allow configuring retryable error codes. + ) -> empty_pb2.Empty: + r"""Asynchronously call the cancel operation method over HTTP. + + Args: + request (~.operations_pb2.CancelOperationRequest): + The request object. The request message for + [Operations.CancelOperation][google.api_core.operations_v1.Operations.CancelOperation]. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options = [ + { + "method": "post", + "uri": "/{}/{{name=**/operations/*}}:cancel".format(self._path_prefix), + "body": "*", + }, + ] + if "google.longrunning.Operations.CancelOperation" in self._http_options: + http_options = self._http_options[ + "google.longrunning.Operations.CancelOperation" + ] + + request_kwargs = self._convert_protobuf_message_to_dict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + # Jsonify the request body + body_request = operations_pb2.CancelOperationRequest() + json_format.ParseDict(transcoded_request["body"], body_request) + body = json_format.MessageToDict( + body_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params_request = operations_pb2.CancelOperationRequest() + json_format.ParseDict(transcoded_request["query_params"], query_params_request) + query_params = json_format.MessageToDict( + query_params_request, + preserving_proto_field_name=False, + use_integers_for_enums=False, + ) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + # TODO(https://github.com/googleapis/python-api-core/issues/721): Update incorrect use of `uri`` variable name. + response = await getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + content = await response.read() + payload = json.loads(content.decode("utf-8")) + request_url = "{host}{uri}".format(host=self._host, uri=uri) + raise core_exceptions.format_http_response_error(response, method, request_url, payload) # type: ignore + + return empty_pb2.Empty() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Coroutine[Any, Any, operations_pb2.ListOperationsResponse], + ]: + return self._list_operations + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Coroutine[Any, Any, operations_pb2.Operation], + ]: + return self._get_operation + + @property + def delete_operation( + self, + ) -> Callable[ + [operations_pb2.DeleteOperationRequest], Coroutine[Any, Any, empty_pb2.Empty] + ]: + return self._delete_operation + + @property + def cancel_operation( + self, + ) -> Callable[ + [operations_pb2.CancelOperationRequest], Coroutine[Any, Any, empty_pb2.Empty] + ]: + return self._cancel_operation + + +__all__ = ("AsyncOperationsRestTransport",) diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/answer.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/answer.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dded898aac3d6ef796f61f3cfbd8d4ed368fd75 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/answer.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/caching.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/caching.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..040cfbb819163f09acce09a51787cab11094dd4a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/caching.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/embedding.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/embedding.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04222bbe4bcd01328106a3b462237560ba76be53 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/embedding.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/operations.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/operations.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8538a7edb05645363fc8e3814658dc72eb725614 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/operations.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/permission.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/permission.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31fe2a947bf5eff3d09cbb780910f975c4389505 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/permission.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/retriever.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/retriever.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5109c383279d0ccd3a9faf22e53167b5c9d8d404 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/retriever.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/version.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/version.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b02d35e97e5f16323d6eeb1a0c89a07bab796988 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/__pycache__/version.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a91bcd0835da02fcec682d2d662fec71c2712351 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/argument_parser.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/argument_parser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69dab1f78e683dbf01a073637e5b3d27f02eedb0 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/argument_parser.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/cmd_line_parser.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/cmd_line_parser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2eb84fa9cf74758c470bfbf82315dcf0a4d6663 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/cmd_line_parser.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/command_utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/command_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0af6661c62b5c338932b8bd239d3f9190a991cb4 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/command_utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/compile_cmd.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/compile_cmd.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65ffc16cf0c554490a2896bdcbf59ccb0ee20906 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/compile_cmd.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/gspread_client.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/gspread_client.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..412fc228e8c8a66daa3886a28baa64203597b12f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/gspread_client.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/html_utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/html_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ac7c76efd17cc2651e097552a75241d45600697 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/html_utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/ipython_env.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/ipython_env.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23420c0df686e35fb5c120ecd98eef5e044a67b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/ipython_env.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/ipython_env_impl.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/ipython_env_impl.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61fa7760a63dc40bd5df76bc163266dc5356846a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/ipython_env_impl.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/magics.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/magics.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..768de9db541f2f134127d43cd3a2071b2e5af4f3 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/magics.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/model_registry.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/model_registry.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71280e4ca4b66a3e6c7e2b6a80f59b53259a6de8 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/model_registry.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/output_utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/output_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b24ff971695fe94cb25067868aff26f1ba9c85db Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/output_utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/post_process_utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/post_process_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d62309723cecc9892d061097e4d69b8fb36f555 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/post_process_utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/post_process_utils_test_helper.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/post_process_utils_test_helper.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f0b7682804ae2595b367a3f697a30d98a00f78c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/post_process_utils_test_helper.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/py_utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/py_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02cea862a1ad6faa401b27da4d741206fdfe5e9d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/py_utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/sheets_sanitize_url.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/sheets_sanitize_url.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..262bc05fa332999c53d8d71b3d082d95a538e2ed Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/sheets_sanitize_url.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/text_model.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/text_model.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..065f03dc295b208dfd0a064a403070b3ffa705af Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/__pycache__/text_model.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/argument_parser.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/argument_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..e3b56949894e599de0ac4c7f6511aaf25ca7c8f2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/argument_parser.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Customized ArgumentParser. + +The default behvaior of argparse.ArgumentParser's parse_args() method is to +exit with a SystemExit exception in the following cases: +1. When the user requests a help message (with the --help or -h flags), or +2. When there's a parsing error (e.g. missing required flags or mistyped flags) + +To make the errors more user-friendly, this class customizes +argparse.ArgumentParser and raises either ParserNormalExit for (1) or +ParserError for (2); this way the caller has control over how to display them +to the user. +""" +from __future__ import annotations + +import abc +import argparse +from typing import Sequence +from google.generativeai.notebook import ipython_env + + +# pylint: disable-next=g-bad-exception-name +class _ParserBaseException(RuntimeError, metaclass=abc.ABCMeta): + """Base class for parser exceptions including normal exit.""" + + def __init__(self, msgs: Sequence[str], *args, **kwargs): + super().__init__("".join(msgs), *args, **kwargs) + self._msgs = msgs + self._ipython_env: ipython_env.IPythonEnv | None = None + + def set_ipython_env(self, env: ipython_env.IPythonEnv) -> None: + self._ipython_env = env + + def _ipython_display_(self): + self.display(self._ipython_env) + + def msgs(self) -> Sequence[str]: + return self._msgs + + @abc.abstractmethod + def display(self, env: ipython_env.IPythonEnv | None) -> None: + """Display this exception on an IPython console.""" + + +# ParserNormalExit is not an error: it's a way for ArgumentParser to indicate +# that the user has entered a special request (e.g. "--help") instead of a +# runnable command. +# pylint: disable-next=g-bad-exception-name +class ParserNormalExit(_ParserBaseException): + """Exception thrown when the parser exits normally. + + This is usually thrown when the user requests the help message. + """ + + def display(self, env: ipython_env.IPythonEnv | None) -> None: + for msg in self._msgs: + print(msg) + + +class ParserError(_ParserBaseException): + """Exception thrown when there is an error.""" + + def display(self, env: ipython_env.IPythonEnv | None) -> None: + for msg in self._msgs: + print(msg) + if env is not None: + # Highlight to the user that an error has occurred. + env.display_html("ERROR") + + +class ArgumentParser(argparse.ArgumentParser): + """Customized ArgumentParser for LLM Magics. + + This class overrides the parent argparse.ArgumentParser's error-handling + methods to avoid side-effects like printing to stderr. The messages are + accumulated and passed into the raised exceptions for the caller to + handle them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._messages: list[str] = [] + + def _print_message(self, message, file=None): + """Override ArgumentParser's _print_message() method.""" + del file + self._messages.append(message) + + def exit(self, status=0, message=None): + """Override ArgumentParser's exit() method.""" + if message: + self._print_message(message) + + msgs = self._messages + self._messages = [] + if status == 0: + raise ParserNormalExit(msgs=msgs) + else: + raise ParserError(msgs=msgs) diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/command_utils.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/command_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..355592c213d5a7cf71875ecedbc00c80272c7e03 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/command_utils.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for Commands. + +Common methods for Commands such as RunCommand and CompileCommand. +""" +from __future__ import annotations + +from typing import AbstractSet, Any, Callable, Sequence + +from google.generativeai.notebook import ipython_env +from google.generativeai.notebook import model_registry +from google.generativeai.notebook import parsed_args_lib +from google.generativeai.notebook import post_process_utils +from google.generativeai.notebook.lib import llm_function +from google.generativeai.notebook.lib import llmfn_input_utils +from google.generativeai.notebook.lib import llmfn_output_row +from google.generativeai.notebook.lib import llmfn_outputs +from google.generativeai.notebook.lib import unique_fn + + +class _GroundTruthLLMFunction(llm_function.LLMFunction): + """LLMFunction that returns pre-generated ground truth data.""" + + def __init__(self, data: Sequence[str]): + super().__init__(outputs_ipython_display_fn=None) + self._data = data + + def get_placeholders(self) -> AbstractSet[str]: + # Ground truth is fixed and thus has no placeholders. + return frozenset({}) + + def _call_impl( + self, inputs: llmfn_input_utils.LLMFunctionInputs | None + ) -> Sequence[llmfn_outputs.LLMFnOutputEntry]: + normalized_inputs = llmfn_input_utils.to_normalized_inputs(inputs) + if len(self._data) != len(normalized_inputs): + raise RuntimeError( + "Ground truth should have same number of entries as inputs: {} vs {}".format( + len(self._data), len(normalized_inputs) + ) + ) + + outputs: list[llmfn_outputs.LLMFnOutputEntry] = [] + for idx, (value, prompt_vars) in enumerate(zip(self._data, normalized_inputs)): + output_row = llmfn_output_row.LLMFnOutputRow( + data={ + llmfn_outputs.ColumnNames.RESULT_NUM: 0, + llmfn_outputs.ColumnNames.TEXT_RESULT: value, + }, + result_type=str, + ) + outputs.append( + llmfn_outputs.LLMFnOutputEntry( + prompt_num=0, + input_num=idx, + prompt_vars=prompt_vars, + output_rows=[output_row], + ) + ) + return outputs + + +def _get_ipython_display_fn( + env: ipython_env.IPythonEnv, +) -> Callable[[llmfn_outputs.LLMFnOutputs], None]: + return lambda x: env.display(x.as_pandas_dataframe()) + + +def create_llm_function( + models: model_registry.ModelRegistry, + env: ipython_env.IPythonEnv | None, + parsed_args: parsed_args_lib.ParsedArgs, + cell_content: str, + post_processing_fns: Sequence[post_process_utils.ParsedPostProcessExpr], +) -> llm_function.LLMFunction: + """Creates an LLMFunction from Command.execute() arguments.""" + prompts: list[str] = [cell_content] + + llmfn_outputs_display_fn = _get_ipython_display_fn(env) if env else None + + llm_fn = llm_function.LLMFunctionImpl( + model=models.get_model(parsed_args.model_type), + model_args=parsed_args.model_args, + prompts=prompts, + outputs_ipython_display_fn=llmfn_outputs_display_fn, + ) + if parsed_args.unique: + llm_fn = llm_fn.add_post_process_reorder_fn(name="unique", fn=unique_fn.unique_fn) + for fn in post_processing_fns: + llm_fn = fn.add_to_llm_function(llm_fn) + + return llm_fn + + +def _convert_simple_compare_fn( + name_and_simple_fn: tuple[str, Callable[[str, str], Any]] +) -> tuple[str, llm_function.CompareFn]: + simple_fn = name_and_simple_fn[1] + new_fn = lambda x, y: simple_fn(x.result_value(), y.result_value()) + return name_and_simple_fn[0], new_fn + + +def create_llm_compare_function( + env: ipython_env.IPythonEnv | None, + parsed_args: parsed_args_lib.ParsedArgs, + post_processing_fns: Sequence[post_process_utils.ParsedPostProcessExpr], +) -> llm_function.LLMFunction: + """Creates an LLMCompareFunction from Command.execute() arguments.""" + llmfn_outputs_display_fn = _get_ipython_display_fn(env) if env else None + + llm_cmp_fn = llm_function.LLMCompareFunction( + lhs_name_and_fn=parsed_args.lhs_name_and_fn, + rhs_name_and_fn=parsed_args.rhs_name_and_fn, + compare_name_and_fns=[_convert_simple_compare_fn(x) for x in parsed_args.compare_fn], + outputs_ipython_display_fn=llmfn_outputs_display_fn, + ) + for fn in post_processing_fns: + llm_cmp_fn = fn.add_to_llm_function(llm_cmp_fn) + + return llm_cmp_fn + + +def create_llm_eval_function( + models: model_registry.ModelRegistry, + env: ipython_env.IPythonEnv | None, + parsed_args: parsed_args_lib.ParsedArgs, + cell_content: str, + post_processing_fns: Sequence[post_process_utils.ParsedPostProcessExpr], +) -> llm_function.LLMFunction: + """Creates an LLMCompareFunction from Command.execute() arguments.""" + llmfn_outputs_display_fn = _get_ipython_display_fn(env) if env else None + + # First construct a regular LLMFunction from the cell contents. + llm_fn = create_llm_function( + models=models, + env=env, + parsed_args=parsed_args, + cell_content=cell_content, + post_processing_fns=post_processing_fns, + ) + + # Next create a LLMCompareFunction. + ground_truth_fn = _GroundTruthLLMFunction(data=parsed_args.ground_truth) + llm_cmp_fn = llm_function.LLMCompareFunction( + lhs_name_and_fn=("actual", llm_fn), + rhs_name_and_fn=("ground_truth", ground_truth_fn), + compare_name_and_fns=[_convert_simple_compare_fn(x) for x in parsed_args.compare_fn], + outputs_ipython_display_fn=llmfn_outputs_display_fn, + ) + + return llm_cmp_fn diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/ipython_env.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/ipython_env.py new file mode 100644 index 0000000000000000000000000000000000000000..9d5b153598d6710b75b27df4a91f017756816c35 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/ipython_env.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Abstract IPythonEnv base class. + +This module provides a layer of abstraction to address the following problems: +1. Sometimes the code needs to run in an environment where IPython is not +available, e.g. inside a unittest. +2. We want to limit dependencies on IPython to code that deals directly with +the notebook environment. +""" +from __future__ import annotations + +import abc +from typing import Any + + +class IPythonEnv(abc.ABC): + """Abstract base class that provides a wrapper around IPython methods.""" + + @abc.abstractmethod + def display(self, x: Any) -> None: + """Wrapper around IPython.core.display.display().""" + + @abc.abstractmethod + def display_html(self, x: str) -> None: + """Wrapper to display HTML. + + This method is equivalent to calling: + display.display(display.HTML(x)) + + display() and HTML() are combined into a single method because + display.HTML() returns an object, which would be complicated to model with + this abstract interface. + + Args: + x: An HTML string to be displayed. + """ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__init__.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..35a14b7b86015f564928dc6967f0a7c00fbc42f8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1be66bdcbdccaf43fa5b4222469cba5310afba9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llm_function.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llm_function.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3c771bd704a9e80131f292aaa355ff838b6bfa7 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llm_function.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_input_utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_input_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ea3e006b45db85b8e9ed93a17fbcf88fd1894b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_input_utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_inputs_source.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_inputs_source.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a868309f76e2ebc28d1566562c5660d1f5e4c35 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_inputs_source.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_output_row.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_output_row.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c5f1b124d136844a8ad4eda8f888f27b0cff49e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_output_row.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_outputs.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_outputs.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b2ed896d247f8e3d80b24953ca2494d8b02d7f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_outputs.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_post_process.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_post_process.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c246a8ba78a2445742c91b09c466d9a747b08140 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_post_process.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_post_process_cmds.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_post_process_cmds.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebd198e3d401458345553408a5d6b98c4bc3bdd0 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/llmfn_post_process_cmds.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/model.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/model.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd6b4a659089fe03611411283956d4dfdaeb3497 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/model.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/prompt_utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/prompt_utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc55ba758303baa0faeecc58394c4757ababc721 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/prompt_utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/unique_fn.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/unique_fn.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..753df6ec17ad594d3edc88228414326739e5df15 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/__pycache__/unique_fn.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llm_function.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llm_function.py new file mode 100644 index 0000000000000000000000000000000000000000..c3eb7b52de5170ceeba87372cd259f8aa40931c5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llm_function.py @@ -0,0 +1,468 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""LLMFunction.""" +from __future__ import annotations + +import abc +import dataclasses +from typing import ( + AbstractSet, + Any, + Callable, + Iterable, + Mapping, + Optional, + Sequence, + Union, +) + +from google.generativeai.notebook.lib import llmfn_input_utils +from google.generativeai.notebook.lib import llmfn_output_row +from google.generativeai.notebook.lib import llmfn_outputs +from google.generativeai.notebook.lib import llmfn_post_process +from google.generativeai.notebook.lib import llmfn_post_process_cmds +from google.generativeai.notebook.lib import model as model_lib +from google.generativeai.notebook.lib import prompt_utils + + +# In the same spirit as post-processing functions (see: llmfn_post_process.py), +# we keep the LLM functions more flexible by providing the entire left- and +# right-hand side rows to the user-defined comparison function. +# +# Possible use-cases include adding a scoring function as a post-process +# command, then comparing the scores. +CompareFn = Callable[ + [llmfn_output_row.LLMFnOutputRowView, llmfn_output_row.LLMFnOutputRowView], + Any, +] + + +def _is_equal_fn( + lhs: llmfn_output_row.LLMFnOutputRowView, + rhs: llmfn_output_row.LLMFnOutputRowView, +) -> bool: + """Default function used when comparing outputs.""" + return lhs.result_value() == rhs.result_value() + + +def _convert_compare_fn_to_batch_add_fn( + fn: Callable[ + [ + llmfn_output_row.LLMFnOutputRowView, + llmfn_output_row.LLMFnOutputRowView, + ], + Any, + ] +) -> llmfn_post_process.LLMCompareFnPostProcessBatchAddFn: + """Vectorize a single-row-based comparison function.""" + + def _fn( + lhs_and_rhs_rows: Sequence[ + tuple[ + llmfn_output_row.LLMFnOutputRowView, + llmfn_output_row.LLMFnOutputRowView, + ] + ] + ) -> Sequence[Any]: + return [fn(lhs, rhs) for lhs, rhs in lhs_and_rhs_rows] + + return _fn + + +@dataclasses.dataclass +class _PromptInfo: + prompt_num: int + prompt: str + input_num: int + prompt_vars: Mapping[str, str] + model_input: str + + +def _generate_prompts( + prompts: Sequence[str], inputs: llmfn_input_utils.LLMFunctionInputs | None +) -> Iterable[_PromptInfo]: + """Generate a tuple of fields needed for processing prompts. + + Args: + prompts: A list of prompts, with optional keyword placeholders. + inputs: A list of key/value pairs to substitute into placeholders in + `prompts`. + + Yields: + A _PromptInfo instance. + """ + normalized_inputs: Sequence[Mapping[str, str]] = [] + if inputs is not None: + normalized_inputs = llmfn_input_utils.to_normalized_inputs(inputs) + + # Must have at least one entry so that we execute the prompt at least once. + if not normalized_inputs: + normalized_inputs = [{}] + + for prompt_num, prompt in enumerate(prompts): + for input_num, prompt_vars in enumerate(normalized_inputs): + # Perform keyword substitution on the prompt based on `prompt_vars`. + model_input = prompt.format(**prompt_vars) + yield _PromptInfo( + prompt_num=prompt_num, + prompt=prompt, + input_num=input_num, + prompt_vars=prompt_vars, + model_input=model_input, + ) + + +class LLMFunction( + Callable[ + [Union[llmfn_input_utils.LLMFunctionInputs, None]], + llmfn_outputs.LLMFnOutputs, + ], + metaclass=abc.ABCMeta, +): + """Base class for LLMFunctionImpl and LLMCompareFunction.""" + + def __init__( + self, + outputs_ipython_display_fn: Callable[[llmfn_outputs.LLMFnOutputs], None] | None = None, + ): + """Constructor. + + Args: + outputs_ipython_display_fn: Optional function that will be used to + override how the outputs of this LLMFunction will be displayed in a + notebook (See further documentation in LLMFnOutputs.__init__().) + """ + self._post_process_cmds: list[llmfn_post_process_cmds.LLMFnPostProcessCommand] = [] + self._outputs_ipython_display_fn = outputs_ipython_display_fn + + @abc.abstractmethod + def get_placeholders(self) -> AbstractSet[str]: + """Returns the placeholders that should be present in inputs for this function.""" + + @abc.abstractmethod + def _call_impl( + self, inputs: llmfn_input_utils.LLMFunctionInputs | None + ) -> Sequence[llmfn_outputs.LLMFnOutputEntry]: + """Concrete implementation of __call__().""" + + def __call__( + self, inputs: llmfn_input_utils.LLMFunctionInputs | None = None + ) -> llmfn_outputs.LLMFnOutputs: + """Runs and returns results based on `inputs`.""" + outputs = self._call_impl(inputs) + + return llmfn_outputs.LLMFnOutputs( + outputs=outputs, ipython_display_fn=self._outputs_ipython_display_fn + ) + + def add_post_process_reorder_fn( + self, name: str, fn: llmfn_post_process.LLMFnPostProcessBatchReorderFn + ) -> LLMFunction: + self._post_process_cmds.append( + llmfn_post_process_cmds.LLMFnPostProcessReorderCommand(name=name, fn=fn) + ) + return self + + def add_post_process_add_fn( + self, + name: str, + fn: llmfn_post_process.LLMFnPostProcessBatchAddFn, + ) -> LLMFunction: + self._post_process_cmds.append( + llmfn_post_process_cmds.LLMFnPostProcessAddCommand(name=name, fn=fn) + ) + return self + + def add_post_process_replace_fn( + self, + name: str, + fn: llmfn_post_process.LLMFnPostProcessBatchReplaceFn, + ) -> LLMFunction: + self._post_process_cmds.append( + llmfn_post_process_cmds.LLMFnPostProcessReplaceCommand(name=name, fn=fn) + ) + return self + + +class LLMFunctionImpl(LLMFunction): + """Callable class that executes the contents of a Magics cell. + + An LLMFunction is constructed from the Magics command line and cell contents + specified by the user. It is defined by: + - A model instance, + - Model arguments + - A prompt template (e.g. "the opposite of hot is {word}") with an optional + keyword placeholder. + + The LLMFunction takes as its input a sequence of dictionaries containing + values for keyword replacement, e.g. [{"word": "hot"}, {"word": "tall"}]. + + This will cause the model to be executed with the following prompts: + "The opposite of hot is" + "The opposite of tall is" + + The results will be returned in a LLMFnOutputs instance. + """ + + def __init__( + self, + model: model_lib.AbstractModel, + prompts: Sequence[str], + model_args: model_lib.ModelArguments | None = None, + outputs_ipython_display_fn: Callable[[llmfn_outputs.LLMFnOutputs], None] | None = None, + ): + """Constructor. + + Args: + model: The model that the prompts will execute on. + prompts: A sequence of prompt templates with optional placeholders. The + placeholders will be replaced by the inputs passed into this function. + model_args: Optional set of model arguments to configure how the model + executes the prompts. + outputs_ipython_display_fn: See documentation in LLMFunction.__init__(). + """ + super().__init__(outputs_ipython_display_fn=outputs_ipython_display_fn) + self._model = model + self._prompts = prompts + self._model_args = model_lib.ModelArguments() if model_args is None else model_args + + # Compute placeholders. + self._placeholders = frozenset({}) + for prompt in self._prompts: + self._placeholders = self._placeholders.union(prompt_utils.get_placeholders(prompt)) + + def _run_post_processing_cmds( + self, results: Sequence[llmfn_output_row.LLMFnOutputRow] + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + """Runs post-processing commands over `results`.""" + for cmd in self._post_process_cmds: + try: + if isinstance(cmd, llmfn_post_process_cmds.LLMFnImplPostProcessCommand): + results = cmd.run(results) + else: + raise llmfn_post_process.PostProcessExecutionError( + "Unsupported post-process command type: {}".format(type(cmd)) + ) + except llmfn_post_process.PostProcessExecutionError: + raise + except RuntimeError as e: + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}", got {}: {}'.format(cmd.name(), type(e).__name__, e) + ) + return results + + def get_placeholders(self) -> AbstractSet[str]: + return self._placeholders + + def _call_impl( + self, inputs: llmfn_input_utils.LLMFunctionInputs | None + ) -> Sequence[llmfn_outputs.LLMFnOutputEntry]: + results: list[llmfn_outputs.LLMFnOutputEntry] = [] + for info in _generate_prompts(prompts=self._prompts, inputs=inputs): + model_results = self._model.call_model( + model_input=info.model_input, model_args=self._model_args + ) + output_rows: list[llmfn_output_row.LLMFnOutputRow] = [] + for result_num, text_result in enumerate(model_results.text_results): + output_rows.append( + llmfn_output_row.LLMFnOutputRow( + data={ + llmfn_outputs.ColumnNames.RESULT_NUM: result_num, + llmfn_outputs.ColumnNames.TEXT_RESULT: text_result, + }, + result_type=str, + ) + ) + results.append( + llmfn_outputs.LLMFnOutputEntry( + prompt_num=info.prompt_num, + input_num=info.input_num, + prompt=info.prompt, + prompt_vars=info.prompt_vars, + model_input=info.model_input, + model_results=model_results, + output_rows=self._run_post_processing_cmds(output_rows), + ) + ) + return results + + +class LLMCompareFunction(LLMFunction): + """LLMFunction for comparisons. + + LLMCompareFunction runs an input over a pair of LLMFunctions and compares the + result. + """ + + def __init__( + self, + lhs_name_and_fn: tuple[str, LLMFunction], + rhs_name_and_fn: tuple[str, LLMFunction], + compare_name_and_fns: Sequence[tuple[str, CompareFn]] | None = None, + outputs_ipython_display_fn: Callable[[llmfn_outputs.LLMFnOutputs], None] | None = None, + ): + """Constructor. + + Args: + lhs_name_and_fn: Name and function for the left-hand side of the + comparison. + rhs_name_and_fn: Name and function for the right-hand side of the + comparison. + compare_name_and_fns: Optional names and functions for comparing the + results of the left- and right-hand sides. + outputs_ipython_display_fn: See documentation in LLMFunction.__init__(). + """ + super().__init__(outputs_ipython_display_fn=outputs_ipython_display_fn) + self._lhs_name: str = lhs_name_and_fn[0] + self._lhs_fn: LLMFunction = lhs_name_and_fn[1] + self._rhs_name: str = rhs_name_and_fn[0] + self._rhs_fn: LLMFunction = rhs_name_and_fn[1] + self._placeholders = frozenset(self._lhs_fn.get_placeholders()).union( + self._rhs_fn.get_placeholders() + ) + + if not compare_name_and_fns: + self._result_name = "is_equal" + self._result_compare_fn = _is_equal_fn + else: + # Assume the last entry in `compare_name_and_fns` is the one that + # produces value for the result cell. + name, fn = compare_name_and_fns[-1] + self._result_name = name + self._result_compare_fn = fn + + # Treat the other compare_fns as post-processing operators. + for name, cmp_fn in compare_name_and_fns[:-1]: + self.add_compare_post_process_add_fn( + name=name, fn=_convert_compare_fn_to_batch_add_fn(cmp_fn) + ) + + def _run_post_processing_cmds( + self, + lhs_output_rows: Sequence[llmfn_output_row.LLMFnOutputRow], + rhs_output_rows: Sequence[llmfn_output_row.LLMFnOutputRow], + results: Sequence[llmfn_output_row.LLMFnOutputRow], + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + """Runs post-processing commands over `results`.""" + for cmd in self._post_process_cmds: + try: + if isinstance(cmd, llmfn_post_process_cmds.LLMFnImplPostProcessCommand): + results = cmd.run(results) + elif isinstance(cmd, llmfn_post_process_cmds.LLMCompareFnPostProcessCommand): + results = cmd.run(list(zip(lhs_output_rows, rhs_output_rows, results))) + else: + raise RuntimeError( + "Unsupported post-process command type: {}".format(type(cmd)) + ) + except llmfn_post_process.PostProcessExecutionError: + raise + except RuntimeError as e: + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}", got {}: {}'.format(cmd.name(), type(e).__name__, e) + ) + return results + + def get_placeholders(self) -> AbstractSet[str]: + return self._placeholders + + def _call_impl( + self, inputs: llmfn_input_utils.LLMFunctionInputs | None + ) -> Sequence[llmfn_outputs.LLMFnOutputEntry]: + lhs_results = self._lhs_fn(inputs) + rhs_results = self._rhs_fn(inputs) + + # Combine the results. + outputs: list[llmfn_outputs.LLMFnOutputEntry] = [] + for lhs_entry, rhs_entry in zip(lhs_results, rhs_results): + if lhs_entry.prompt_num != rhs_entry.prompt_num: + raise RuntimeError( + "Prompt num mismatch: {} vs {}".format( + lhs_entry.prompt_num, rhs_entry.prompt_num + ) + ) + if lhs_entry.input_num != rhs_entry.input_num: + raise RuntimeError( + "Input num mismatch: {} vs {}".format(lhs_entry.input_num, rhs_entry.input_num) + ) + if lhs_entry.prompt_vars != rhs_entry.prompt_vars: + raise RuntimeError( + "Prompt vars mismatch: {} vs {}".format( + lhs_entry.prompt_vars, rhs_entry.prompt_vars + ) + ) + + # The two functions may have different numbers of results due to + # options like candidate_count, so we can only compare up to the + # minimum of the two. + num_output_rows = min(len(lhs_entry.output_rows), len(rhs_entry.output_rows)) + lhs_output_rows = lhs_entry.output_rows[:num_output_rows] + rhs_output_rows = rhs_entry.output_rows[:num_output_rows] + output_rows: list[llmfn_output_row.LLMFnOutputRow] = [] + for result_num, lhs_and_rhs_output_row in enumerate( + zip(lhs_output_rows, rhs_output_rows) + ): + lhs_output_row, rhs_output_row = lhs_and_rhs_output_row + + # Combine cells from lhs_output_row and rhs_output_row into a + # single row. + # Although it is possible for RESULT_NUM (the index of each + # text_result if a prompt produces multiple text_results) to be + # different between the left and right sides, we ignore their + # RESULT_NUM entries and write our own. + row_data: dict[str, Any] = { + llmfn_outputs.ColumnNames.RESULT_NUM: result_num, + self._result_name: self._result_compare_fn(lhs_output_row, rhs_output_row), + } + output_row = llmfn_output_row.LLMFnOutputRow(data=row_data, result_type=Any) + + # Add the prompt vars. + output_row.add(llmfn_outputs.ColumnNames.PROMPT_VARS, lhs_entry.prompt_vars) + + # Add the results from the left-hand side and right-hand side. + for name, row in [ + (self._lhs_name, lhs_output_row), + (self._rhs_name, rhs_output_row), + ]: + for k, v in row.items(): + if k != llmfn_outputs.ColumnNames.RESULT_NUM: + # We use LLMFnOutputRow.add() because it handles column + # name collisions. + output_row.add("{}_{}".format(name, k), v) + + output_rows.append(output_row) + + outputs.append( + llmfn_outputs.LLMFnOutputEntry( + prompt_num=lhs_entry.prompt_num, + input_num=lhs_entry.input_num, + prompt_vars=lhs_entry.prompt_vars, + output_rows=self._run_post_processing_cmds( + lhs_output_rows=lhs_output_rows, + rhs_output_rows=rhs_output_rows, + results=output_rows, + ), + ) + ) + return outputs + + def add_compare_post_process_add_fn( + self, + name: str, + fn: llmfn_post_process.LLMCompareFnPostProcessBatchAddFn, + ) -> LLMFunction: + self._post_process_cmds.append( + llmfn_post_process_cmds.LLMCompareFnPostProcessAddCommand(name=name, fn=fn) + ) + return self diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_input_utils.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_input_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1112f182658f03ae190206d7578da16ef9912ed4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_input_utils.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for handling input variables.""" +from __future__ import annotations + +from typing import Any, Mapping, Sequence, Union + +from google.generativeai.notebook.lib import llmfn_inputs_source + + +_NormalizedInputsList = llmfn_inputs_source.NormalizedInputsList + +_ColumnOrderValuesList = Mapping[str, Sequence[str]] + +LLMFunctionInputs = Union[_ColumnOrderValuesList, llmfn_inputs_source.LLMFnInputsSource] + + +def _is_column_order_values_list(inputs: Any) -> bool: + """See if inputs is of the form: {"key1": ["val1", "val2", ...]}. + + This is similar to the format produced by: + pandas.DataFrame.to_dict(orient="list") + + Args: + inputs: The inputs passed into an LLMFunction. + + Returns: + Whether `inputs` is a column-ordered list of values. + """ + if not isinstance(inputs, Mapping): + return False + for x in inputs.values(): + if not isinstance(x, Sequence): + return False + # Strings and bytes are also considered Sequences but we disallow them + # here because the values contained in their Sequences are single + # characters rather than words. + if isinstance(x, str) or isinstance(x, bytes): + return False + return True + + +# TODO(b/273688393): Perform stricter validation on `inputs`. +def _normalize_column_order_values_list( + inputs: _ColumnOrderValuesList, +) -> _NormalizedInputsList: + """Transforms prompt inputs into a list of dictionaries.""" + return_list: list[dict[str, str]] = [] + keys = list(inputs.keys()) + if keys: + first_key = keys[0] + for row_num in range(len(inputs[first_key])): + row_dict = {} + return_list.append(row_dict) + for key in keys: + row_dict[key] = inputs[key][row_num] + return return_list + + +def to_normalized_inputs(inputs: LLMFunctionInputs) -> _NormalizedInputsList: + """Handles the different types of `inputs` and returns a normalized form.""" + normalized_inputs: list[Mapping[str, str]] = [] + if isinstance(inputs, llmfn_inputs_source.LLMFnInputsSource): + normalized_inputs.extend(inputs.to_normalized_inputs()) + elif _is_column_order_values_list(inputs): + normalized_inputs.extend(_normalize_column_order_values_list(inputs)) + else: + raise ValueError("Unsupported input type {!r}".format(inputs)) + return normalized_inputs diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_inputs_source.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_inputs_source.py new file mode 100644 index 0000000000000000000000000000000000000000..ff134fcc758be607efc6456ca544da9cc678a489 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_inputs_source.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""LLMFnInputsSource.""" +from __future__ import annotations + +import abc +from typing import Callable, Mapping, Sequence + + +NormalizedInputsList = Sequence[Mapping[str, str]] + + +class LLMFnInputsSource(abc.ABC): + """Abstract class representing a source of inputs for LLMFunction. + + This class could be extended with concrete implementations that read data + from external sources, such as Google Sheets. + """ + + def __init__(self): + self._cached_inputs: NormalizedInputsList | None = None + self._display_status_fn: Callable[[], None] = lambda: None + + def to_normalized_inputs(self, suppress_status_msgs: bool = False) -> NormalizedInputsList: + """Returns a sequence of normalized inputs. + + The return value is a sequence of dictionaries of (placeholder, value) + pairs, e.g. [{"word": "hot"}, {"word: "cold"}, ....] + + These are used for keyword-substitution for prompts in LLMFunctions. + + Args: + suppress_status_msgs: If True, suppress status messages regarding the + input being read. + + Returns: + A sequence of normalized inputs. + """ + if self._cached_inputs is None: + ( + self._cached_inputs, + self._display_status_fn, + ) = self._to_normalized_inputs_impl() + if not suppress_status_msgs: + self._display_status_fn() + return self._cached_inputs + + @abc.abstractmethod + def _to_normalized_inputs_impl( + self, + ) -> tuple[NormalizedInputsList, Callable[[], None]]: + """Returns a tuple of NormalizedInputsList and a display function. + + The display function displays some status about the input (e.g. where + it is read from). This way the status continues to be displayed + even though the results are cached. + """ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_output_row.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_output_row.py new file mode 100644 index 0000000000000000000000000000000000000000..e53730fdca6f9ca4eb16d2b328118451cfa5869c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_output_row.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""LLMFnOutputRow.""" +from __future__ import annotations + +import abc +from typing import Any, Iterator, Mapping + + +# The type of value stored in a cell. +_CELLVALUETYPE = Any + + +def _get_name_of_type(x: type[Any]) -> str: + if hasattr(x, "__name__"): + return x.__name__ + return str(x) + + +def _validate_is_result_type(value: Any, result_type: type[Any]) -> None: + if result_type == Any: + return + if not isinstance(value, result_type): + raise ValueError( + 'Value of last entry must be of type "{}", got "{}"'.format( + _get_name_of_type(result_type), + _get_name_of_type(type(value)), + ) + ) + + +class LLMFnOutputRowView(Mapping[str, _CELLVALUETYPE], metaclass=abc.ABCMeta): + """Immutable view of LLMFnOutputRow.""" + + # Additional methods (not required by Mapping[str, _CELLVALUETYPE]) + @abc.abstractmethod + def __contains__(self, k: str) -> bool: + """For expressions like: x in this_instance.""" + + @abc.abstractmethod + def __str__(self) -> str: + """For expressions like: str(this_instance).""" + + # Own methods. + @abc.abstractmethod + def result_type(self) -> type[Any]: + """Returns the type enforced for the result cell.""" + + @abc.abstractmethod + def result_value(self) -> Any: + """Get the value of the result cell.""" + + @abc.abstractmethod + def result_key(self) -> str: + """Get the key of the result cell.""" + + +class LLMFnOutputRow(LLMFnOutputRowView): + """Container that represents a single row in a table of outputs. + + We represent outputs as a table. This class represents a single row in the + table like a dictionary, where the key is the column name and the value is the + cell value. + + A single cell is designated the "result". This contains the output of the LLM + model after running any post-processing functions specified by the user. + + In addition to behaving like a dictionary, this class provides additional + methods, including: + - Getting the value of the "result" cell + - Setting the value (and optionally the key) of the "result" cell. + - Add a new non-result cell + + Notes: As an implementation detail, the result-cell is always kept as the + rightmost cell. + """ + + def __init__(self, data: Mapping[str, _CELLVALUETYPE], result_type: type[Any]): + """Constructor. + + Args: + data: The initial value of the row. The last entry will be treated as the + result. Cannot be empty. The value of the last entry must be `str`. + result_type: The type of the result cell. This will be enforced at + runtime. + """ + self._data: dict[str, _CELLVALUETYPE] = dict(data) + if not self._data: + raise ValueError("Must provide non-empty data") + + self._result_type = result_type + result_value = list(self._data.values())[-1] + _validate_is_result_type(result_value, self._result_type) + + # Methods needed for Mapping[str, _CELLVALUETYPE]: + def __iter__(self) -> Iterator[str]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __getitem__(self, k: str) -> _CELLVALUETYPE: + return self._data.__getitem__(k) + + # Additional methods for LLMFnOutputRowView. + def __contains__(self, k: str) -> bool: + return self._data.__contains__(k) + + def __str__(self) -> str: + return "LLMFnOutputRow: {}".format(self._data.__str__()) + + def result_type(self) -> type[Any]: + return self._result_type + + def result_value(self) -> Any: + return self._data[self.result_key()] + + def result_key(self) -> str: + # Our invariant is that the result-cell is always the rightmost cell. + return list(self._data.keys())[-1] + + # Mutable methods. + def set_result_value(self, value: Any, key: str | None = None) -> None: + """Set the value of the result cell. + + Sets the value (and optionally the key) of the result cell. + + Args: + value: The value to set the result cell today. + key: Optionally change the key as well. + """ + _validate_is_result_type(value, self._result_type) + + current_key = self.result_key() + if key is None or key == current_key: + self._data[current_key] = value + return + + del self._data[current_key] + self._data[key] = value + + def add(self, key: str, value: _CELLVALUETYPE) -> None: + """Add a non-result cell. + + Adds a new non-result cell. This does not affect the result cell. + + Args: + key: The key of the new cell to add. + value: The value of the new cell to add. + """ + # Handle collisions with `key`. + if key in self._data: + idx = 1 + candidate_key = key + while candidate_key in self._data: + candidate_key = "{}_{}".format(key, idx) + idx = idx + 1 + key = candidate_key + + # Insert the new key/value into the second rightmost position to keep + # the result cell as the rightmost cell. + result_key = self.result_key() + result_value = self._data.pop(result_key) + self._data[key] = value + self._data[result_key] = result_value diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_outputs.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..c0bf50fe218b77c3fe35fb5b721a07ad3d9d32f3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_outputs.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Output of LLMFunction.""" +from __future__ import annotations + +import abc +import dataclasses +from typing import ( + overload, + Any, + Callable, + Iterable, + Iterator, + Mapping, + Sequence, +) + +from google.generativeai.notebook.lib import llmfn_output_row +from google.generativeai.notebook.lib import model as model_lib +import pandas + + +class ColumnNames: + """Names of columns that are used to represent output.""" + + PROMPT_NUM = "Prompt Num" + INPUT_NUM = "Input Num" + RESULT_NUM = "Result Num" + # In the code we refer to "model_input" as the full keyword-substituted prompt + # and "prompt" as the template with placeholders. + # When displaying the results however we use "prompt" since "model_input" is + # an internal name. + MODEL_INPUT = "Prompt" + PROMPT_VARS = "Prompt vars" + TEXT_RESULT = "text_result" + + +@dataclasses.dataclass +class LLMFnOutputEntry: + """The output of a single model input from LLMFunction. + + A model input is a prompt where the keyword placeholders have been + substituted (by `prompt_vars`). + + E.g. If we have: + prompt: "the opposite of {word} is" + prompt_vars: {"word", "hot"} + Then we will have the following model input: + model_input: "the opposite of hot is" + + Note: The model may produce one-or-more results for a given model_input. + This is represented by the sequence `output_rows`. + """ + + prompt_num: int + input_num: int + prompt_vars: Mapping[str, str] + output_rows: Sequence[llmfn_output_row.LLMFnOutputRow] + + prompt: str | None = None + model_input: str | None = None + model_results: model_lib.ModelResults | None = None + + +def _has_model_input_field(outputs: Iterable[LLMFnOutputEntry]): + for entry in outputs: + if entry.model_input is not None: + return True + return False + + +class LLMFnOutputsBase(Sequence[LLMFnOutputEntry]): + """Parent class for LLMFnOutputs. + + This class exists mainly to avoid a circular dependency between LLMFnOutputs + and LLMFnOutputsSink. Most users should use LLMFnOutputs directly instead. + """ + + def __init__( + self, + outputs: Iterable[LLMFnOutputEntry] | None = None, + ): + """Constructor. + + Args: + outputs: The contents of this LLMFnOutputs instance. + """ + self._outputs: list[LLMFnOutputEntry] = list(outputs) if outputs is not None else [] + + # Needed for Iterable[LLMFnOutputEntry]. + def __iter__(self) -> Iterator[LLMFnOutputEntry]: + return self._outputs.__iter__() + + # Needed for Sequence[LLMFnOutputEntry]. + def __len__(self) -> int: + return self._outputs.__len__() + + # Needed for Sequence[LLMFnOutputEntry]. + @overload + def __getitem__(self, x: int) -> LLMFnOutputEntry: ... + + @overload + def __getitem__(self, x: slice) -> Sequence[LLMFnOutputEntry]: ... + + def __getitem__(self, x: int | slice) -> LLMFnOutputEntry | Sequence[LLMFnOutputEntry]: + return self._outputs.__getitem__(x) + + # Convenience methods. + def __bool__(self) -> bool: + return bool(self._outputs) + + def __str__(self) -> str: + return self.as_pandas_dataframe().__str__() + + # Own methods + def as_dict(self) -> Mapping[str, Sequence[Any]]: + """Formats returned results as dictionary.""" + + # `data` is a table in column order, with the columns listed from left to + # right. + data = { + ColumnNames.PROMPT_NUM: [], + ColumnNames.INPUT_NUM: [], + # RESULT_NUM is special: each LLMFnOutputRow in self._outputs is + # expected to have a RESULT_NUM key. + ColumnNames.RESULT_NUM: [], + } + if _has_model_input_field(self._outputs): + data[ColumnNames.MODEL_INPUT] = [] + + if not self._outputs: + return data + + # Add column names of added data. + # The last key in LLMFnOutputRow is special as it is considered + # the result. To preserve order in the (unlikely) event of inconsistent + # keys across rows, we first add all-but-the-last key to `total_keys_set`, + # then the last key. + # Note: `total_keys_set` is a Python dictionary instead of a Python set + # because Python dictionaries preserve the order in which entries are + # added, whereas Python sets do not. + total_keys_set: dict[str, None] = {k: None for k in data.keys()} + for output in self._outputs: + for result in output.output_rows: + for key in list(result.keys())[:-1]: + total_keys_set[key] = None + for output in self._outputs: + for result in output.output_rows: + total_keys_set[list(result.keys())[-1]] = None + + # `data` represents the table as a dictionary of: + # column names -> list of values + for key in total_keys_set: + data[key] = [] + + next_num_rows = 1 + for output in self._outputs: + for result in output.output_rows: + data[ColumnNames.PROMPT_NUM].append(output.prompt_num) + data[ColumnNames.INPUT_NUM].append(output.input_num) + if ColumnNames.MODEL_INPUT in data: + data[ColumnNames.MODEL_INPUT].append(output.model_input) + + for key, value in result.items(): + data[key].append(value) + + # Look for empty cells and pad them with None. + for column in data.values(): + if len(column) < next_num_rows: + column.append(None) + + next_num_rows += 1 + + return data + + def as_pandas_dataframe(self) -> pandas.DataFrame: + return pandas.DataFrame(self.as_dict()) + + +class LLMFnOutputsSink(abc.ABC): + """Abstract class representing an exporter for the output of LLMFunction. + + This class could be extended to write to external documents, such as + Google Sheets. + """ + + def write_outputs(self, outputs: LLMFnOutputsBase) -> None: + """Writes `outputs` to some destination.""" + + +class LLMFnOutputs(LLMFnOutputsBase): + """A sequence of LLMFnOutputEntry instances. + + Notes: + - Each LLMFnOutputEntry represents the results of running one model + input (see documentation for LLMFnOutputEntry for what "model input" + means.) + - A single model input may produce more-than-one text results. + """ + + def __init__( + self, + outputs: Iterable[LLMFnOutputEntry] | None = None, + ipython_display_fn: Callable[[LLMFnOutputs], None] | None = None, + ): + """Constructor. + + Args: + outputs: The contents of this LLMFnOutputs instance. + ipython_display_fn: An optional function for pretty-printing this instance + when it is the output of a cell in a notebook. If this argument is not + None, the _ipython_display_ method will be defined which will in turn + invoke this function. + """ + super().__init__(outputs=outputs) + + if ipython_display_fn: + self._ipython_display_fn = ipython_display_fn + # We define the _ipython_display_ method only when `ipython_display_fn` + # is set. This lets us fall back to a default implementation defined by + # the notebook when `ipython_display_fn` is not set, instead of having to + # provide our own default implementation. + setattr( + self, + "_ipython_display_", + getattr(self, "_ipython_display_impl"), + ) + + def _ipython_display_impl(self): + """Actual implementation of _ipython_display_. + + This method should only be used invoked if self._ipython_display_fn is set. + """ + self._ipython_display_fn(self) + + def export(self, sink: LLMFnOutputsSink) -> None: + """Export contents to `sink`.""" + sink.write_outputs(self) diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_post_process.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_post_process.py new file mode 100644 index 0000000000000000000000000000000000000000..c9485efae690b996128b3733b7d81f81b5045bce --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_post_process.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Signatures for post-processing functions and other common definitions.""" +from __future__ import annotations + +from typing import Any, Callable, Sequence, Tuple + +from google.generativeai.notebook.lib import llmfn_output_row + + +class PostProcessExecutionError(RuntimeError): + """An error while executing a post-processing command.""" + + +# A batch-process function takes a batch of rows, and returns a sequence of +# indices representing which rows to keep. +# This can be used to implement operations such as filtering and sorting. +# +# Requires: +# - Indices must be in the range [0, len(input rows)). +LLMFnPostProcessBatchReorderFn = Callable[ + [Sequence[llmfn_output_row.LLMFnOutputRowView]], + Sequence[int], +] + +# An add function takes a batch of rows and returns a sequence of values to +# be added as new columns. +# +# Requires: +# - Output sequence must be exactly the same length as number of rows. +LLMFnPostProcessBatchAddFn = Callable[ + [Sequence[llmfn_output_row.LLMFnOutputRowView]], Sequence[Any] +] + +# A replace function takes a batch of rows and returns a sequence of values +# to replace the existing results. +# +# Requires: +# - Output sequence must be exactly the same length as number of rows. +# - Return type must match the result_type of LLMFnOutputRow. +LLMFnPostProcessBatchReplaceFn = Callable[ + [Sequence[llmfn_output_row.LLMFnOutputRowView]], Sequence[Any] +] + +# An add function takes a batch of pairs of rows and returns a sequence of +# values to be added as new columns. +# +# This is used for LLMCompareFunction. +# +# Requires: +# - Output sequence must be exactly the same length as number of rows. +LLMCompareFnPostProcessBatchAddFn = Callable[ + [ + Sequence[ + Tuple[ + llmfn_output_row.LLMFnOutputRowView, + llmfn_output_row.LLMFnOutputRowView, + ] + ] + ], + Sequence[Any], +] diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_post_process_cmds.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_post_process_cmds.py new file mode 100644 index 0000000000000000000000000000000000000000..8f97ac94247fdb0d55fa9888f12c574e3fd6307f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/llmfn_post_process_cmds.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Internal representation of post-process commands for LLMFunction. + +This module is internal to LLMFunction and should only be used by +llm_function.py. +""" +from __future__ import annotations + +import abc +from typing import Sequence + +from google.generativeai.notebook.lib import llmfn_output_row +from google.generativeai.notebook.lib import llmfn_post_process + + +def _convert_view_to_output_row( + row: llmfn_output_row.LLMFnOutputRowView, +) -> llmfn_output_row.LLMFnOutputRow: + """Convenience method to convert a LLMFnOutputRowView to LLMFnOutputRow. + + If `row` is already a LLMFnOutputRow, return as-is for efficiency. + This could potentially break encapsulation as it could let code to modify + a LLMFnOutputRowView that was intended to be immutable, so it should be + used with care. + + Args: + row: An instance of LLMFnOutputRowView. + + Returns: + An instance of LLMFnOutputRow. May be the same instance as `row` if + `row` is already an instance of LLMFnOutputRow. + """ + if isinstance(row, llmfn_output_row.LLMFnOutputRow): + return row + return llmfn_output_row.LLMFnOutputRow(data=row, result_type=row.result_type()) + + +class LLMFnPostProcessCommand(abc.ABC): + """Abstract class representing post-processing commands.""" + + @abc.abstractmethod + def name(self) -> str: + """Returns the name of this post-processing command.""" + + +class LLMFnImplPostProcessCommand(LLMFnPostProcessCommand): + """Post-processing commands for LLMFunctionImpl.""" + + @abc.abstractmethod + def run( + self, rows: Sequence[llmfn_output_row.LLMFnOutputRowView] + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + """Processes a batch of results and returns a new batch. + + Args: + rows: The rows in a batch. Note that `rows` are not guaranteed to be + remain unmodified. + + Returns: + A new set of rows that should replace the batch. + """ + + +class LLMFnPostProcessReorderCommand(LLMFnImplPostProcessCommand): + """A batch command processes a set of results at once. + + Note that a "batch" represents a set of results coming from a single prompt, + as the model may produce more-than-one result for a prompt. + """ + + def __init__(self, name: str, fn: llmfn_post_process.LLMFnPostProcessBatchReorderFn): + self._name = name + self._fn = fn + + def name(self) -> str: + return self._name + + def run( + self, + rows: Sequence[llmfn_output_row.LLMFnOutputRowView], + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + new_row_indices = self._fn(rows) + if len(set(new_row_indices)) != len(new_row_indices): + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}": returned indices should be unique'.format(self._name) + ) + + new_rows: list[llmfn_output_row.LLMFnOutputRow] = [] + for idx in new_row_indices: + if idx < 0: + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}": returned indices must be greater than or' + " equal to zero, got {}".format(self._name, idx) + ) + if idx >= len(rows): + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}": returned indices must be less than length of' + " rows (={}), got {}".format(self._name, len(rows), idx) + ) + new_rows.append(_convert_view_to_output_row(rows[idx])) + return new_rows + + +class LLMFnPostProcessAddCommand(LLMFnImplPostProcessCommand): + """A command that adds each row with a new column. + + This does not change the value of the results cell. + """ + + def __init__(self, name: str, fn: llmfn_post_process.LLMFnPostProcessBatchAddFn): + self._name = name + self._fn = fn + + def name(self) -> str: + return self._name + + def run( + self, + rows: Sequence[llmfn_output_row.LLMFnOutputRowView], + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + new_values = self._fn(rows) + if len(new_values) != len(rows): + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}": returned length ({}) != number of input rows' + " ({})".format(self._name, len(new_values), len(rows)) + ) + + new_rows: list[llmfn_output_row.LLMFnOutputRow] = [] + for new_value, row in zip(new_values, rows): + new_row = _convert_view_to_output_row(row) + new_row.add(key=self._name, value=new_value) + new_rows.append(new_row) + + return new_rows + + +class LLMFnPostProcessReplaceCommand(LLMFnImplPostProcessCommand): + """A command that modifies the results in each row.""" + + def __init__(self, name: str, fn: llmfn_post_process.LLMFnPostProcessBatchReplaceFn): + self._name = name + self._fn = fn + + def name(self) -> str: + return self._name + + def run( + self, + rows: Sequence[llmfn_output_row.LLMFnOutputRowView], + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + new_values = self._fn(rows) + if len(new_values) != len(rows): + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}": returned length ({}) != number of input rows' + " ({})".format(self._name, len(new_values), len(rows)) + ) + + new_rows: list[llmfn_output_row.LLMFnOutputRow] = [] + for new_value, row in zip(new_values, rows): + new_row = _convert_view_to_output_row(row) + new_row.set_result_value(value=new_value) + new_rows.append(new_row) + + return new_rows + + +class LLMCompareFnPostProcessCommand(LLMFnPostProcessCommand): + """Post-processing commands for LLMCompareFunction.""" + + @abc.abstractmethod + def run( + self, + rows: Sequence[ + tuple[ + llmfn_output_row.LLMFnOutputRowView, + llmfn_output_row.LLMFnOutputRowView, + llmfn_output_row.LLMFnOutputRowView, + ] + ], + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + """Processes a batch of left- and right-hand side results. + + Args: + rows: The rows in a batch. Each row is a three-tuple containing: - The + left-hand side results, - The right-hand side results, and - The current + combined results + + Returns: + A new set of rows that should replace the combined results. + """ + + +class LLMCompareFnPostProcessAddCommand(LLMCompareFnPostProcessCommand): + """A command that adds each row with a new column. + + This does not change the value of the results cell. + """ + + def __init__( + self, + name: str, + fn: llmfn_post_process.LLMCompareFnPostProcessBatchAddFn, + ): + self._name = name + self._fn = fn + + def name(self) -> str: + return self._name + + def run( + self, + rows: Sequence[ + tuple[ + llmfn_output_row.LLMFnOutputRowView, + llmfn_output_row.LLMFnOutputRowView, + llmfn_output_row.LLMFnOutputRowView, + ] + ], + ) -> Sequence[llmfn_output_row.LLMFnOutputRow]: + new_values = self._fn([(lhs, rhs) for lhs, rhs, _ in rows]) + if len(new_values) != len(rows): + raise llmfn_post_process.PostProcessExecutionError( + 'Error executing "{}": returned length ({}) != number of input rows' + " ({})".format(self._name, len(new_values), len(rows)) + ) + + new_rows: list[llmfn_output_row.LLMFnOutputRow] = [] + for new_value, row in zip(new_values, [combined for _, _, combined in rows]): + new_row = _convert_view_to_output_row(row) + new_row.add(key=self._name, value=new_value) + new_rows.append(new_row) + + return new_rows diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/model.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/model.py new file mode 100644 index 0000000000000000000000000000000000000000..ff6922896ab41323f58c4b76c9c5f972af26e7b6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/model.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Abstract interface for models.""" +from __future__ import annotations + +import abc +import dataclasses +from typing import Sequence + + +@dataclasses.dataclass(frozen=True) +class ModelArguments: + """Common arguments for models. + + Attributes: + model: The model string to use. If None a default model will be selected. + temperature: The temperature. Must be greater-than-or-equal-to zero. + candidate_count: Number of candidates to return. + """ + + model: str | None = None + temperature: float | None = None + candidate_count: int | None = None + + +@dataclasses.dataclass +class ModelResults: + """Results from calling AbstractModel.call_model().""" + + model_input: str + text_results: Sequence[str] + + +class AbstractModel(abc.ABC): + @abc.abstractmethod + def call_model( + self, model_input: str, model_args: ModelArguments | None = None + ) -> ModelResults: + """Executes the model.""" + + +class EchoModel(AbstractModel): + """Model that returns the original input. + + This is primarily used for testing. + """ + + def call_model( + self, model_input: str, model_args: ModelArguments | None = None + ) -> ModelResults: + candidate_count = model_args.candidate_count if model_args else None + if candidate_count is None: + candidate_count = 1 + return ModelResults( + model_input=model_input, + text_results=[model_input] * candidate_count, + ) diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/prompt_utils.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/prompt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e4919f68843f6cd84d6ca8455264929baf30b336 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/prompt_utils.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for processing prompts.""" +from __future__ import annotations + +import string +from typing import AbstractSet + + +def get_placeholders(prompt: str) -> AbstractSet[str]: + """Returns the placeholders for `prompt`. + + E.g. Given "A for {word_one} B for {word_two}", returns {"word_one", + "word_two"}. + + Args: + prompt: A prompt template with optional placeholders. + + Returns: + A sequence of placeholders in `prompt`. + """ + placeholders: list[str] = [] + for _, field_name, _, _ in string.Formatter().parse(prompt): + if field_name is not None: + placeholders.append(field_name) + return frozenset(placeholders) diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/unique_fn.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/unique_fn.py new file mode 100644 index 0000000000000000000000000000000000000000..3492130f55860a8e46e4531a3c9cc0e1cfb652b5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/lib/unique_fn.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Function for de-duping results.""" +from __future__ import annotations + +from typing import Sequence +from google.generativeai.notebook.lib import llmfn_output_row + + +def unique_fn( + rows: Sequence[llmfn_output_row.LLMFnOutputRowView], +) -> Sequence[int]: + """Returns a list of indices with duplicates removed. + + E.g. if rows has results ["hello", "hello", "world"], the return value would + be [0, 2], indicating that the results at index 1 is a duplicate and should be + removed. + + Args: + rows: The input rows + + Returns: + A sequence of indices indicating which entries have unique results. + """ + indices: list[int] = [] + seen_entries = set() + for idx, row in enumerate(rows): + value = row.result_value() + if value in seen_entries: + continue + + seen_entries.add(value) + indices.append(idx) + + return indices diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/output_utils.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/output_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bcc3c5abf15a605051c520da6f208652e5de3e37 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/output_utils.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for exporting outputs from LLMFunctions.""" +from __future__ import annotations + +import copy + +from google.generativeai.notebook import parsed_args_lib +from google.generativeai.notebook import py_utils +from google.generativeai.notebook.lib import llmfn_outputs + + +class _PyVarOutputsSink(llmfn_outputs.LLMFnOutputsSink): + """Sink that writes results to a Python variable.""" + + def __init__(self, var_name: str): + self._var_name = var_name + + def write_outputs(self, outputs: llmfn_outputs.LLMFnOutputsBase) -> None: + # Clone our results so that they are all independent. + py_utils.set_py_var(self._var_name, copy.deepcopy(outputs)) + + +def get_outputs_sink_from_py_var( + var_name: str, +) -> llmfn_outputs.LLMFnOutputsSink: + # The output variable `var_name` will be created if it does not already + # exist. + if py_utils.has_py_var(var_name): + data = py_utils.get_py_var(var_name) + if isinstance(data, llmfn_outputs.LLMFnOutputsSink): + return data + return _PyVarOutputsSink(var_name) + + +def write_to_outputs( + results: llmfn_outputs.LLMFnOutputs, + parsed_args: parsed_args_lib.ParsedArgs, +) -> None: + """Writes `results` to the sinks provided. + + Args: + results: The results to export. + parsed_args: Arguments parsed from the command line. + """ + for sink in parsed_args.outputs: + results.export(sink) + for sink in parsed_args.sheets_output_names: + results.export(sink) diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/post_process_utils.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/post_process_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c01e83d36bb7e9108a1324481ad16975f783f4b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/post_process_utils.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for working with post-processing tokens.""" +from __future__ import annotations + +import abc +from typing import Any, Callable, Sequence + +from google.generativeai.notebook import py_utils +from google.generativeai.notebook.lib import llm_function +from google.generativeai.notebook.lib import llmfn_output_row +from google.generativeai.notebook.lib import llmfn_post_process + + +class PostProcessParseError(RuntimeError): + """An error parsing the post-processing tokens.""" + + +class ParsedPostProcessExpr(abc.ABC): + """A post-processing expression parsed from the command line.""" + + @abc.abstractmethod + def name(self) -> str: + """Returns the name of this expression.""" + + @abc.abstractmethod + def add_to_llm_function(self, llm_fn: llm_function.LLMFunction) -> llm_function.LLMFunction: + """Adds this parsed expression to `llm_fn` as a post-processing command.""" + + +class _ParsedPostProcessAddExpr( + ParsedPostProcessExpr, llmfn_post_process.LLMFnPostProcessBatchAddFn +): + """An expression that returns the value of a new column to add to a row.""" + + def __init__(self, name: str, fn: Callable[[str], Any]): + """Constructor. + + Args: + name: The name of the expression. The name of the new column will be + derived from this. + fn: A function that takes the result of a row and returns a new value to + add as a new column in the row. + """ + self._name = name + self._fn = fn + + def name(self) -> str: + return self._name + + def __call__(self, rows: Sequence[llmfn_output_row.LLMFnOutputRowView]) -> Sequence[Any]: + return [self._fn(row.result_value()) for row in rows] + + def add_to_llm_function(self, llm_fn: llm_function.LLMFunction) -> llm_function.LLMFunction: + return llm_fn.add_post_process_add_fn(name=self._name, fn=self) + + +class _ParsedPostProcessReplaceExpr( + ParsedPostProcessExpr, llmfn_post_process.LLMFnPostProcessBatchReplaceFn +): + """An expression that returns the new result value for a row.""" + + def __init__(self, name: str, fn: Callable[[str], str]): + """Constructor. + + Args: + name: The name of the expression. + fn: A function that takes the result of a row and returns the new result. + """ + self._name = name + self._fn = fn + + def name(self) -> str: + return self._name + + def __call__(self, rows: Sequence[llmfn_output_row.LLMFnOutputRowView]) -> Sequence[str]: + return [self._fn(row.result_value()) for row in rows] + + def add_to_llm_function(self, llm_fn: llm_function.LLMFunction) -> llm_function.LLMFunction: + return llm_fn.add_post_process_replace_fn(name=self._name, fn=self) + + +# Decorator functions. +def post_process_add_fn(fn: Callable[[str], Any]): + return _ParsedPostProcessAddExpr(name=fn.__name__, fn=fn) + + +def post_process_replace_fn(fn: Callable[[str], str]): + return _ParsedPostProcessReplaceExpr(name=fn.__name__, fn=fn) + + +def validate_one_post_processing_expression( + tokens: Sequence[str], +) -> None: + if not tokens: + raise PostProcessParseError("Cannot have empty post-processing expression") + if len(tokens) > 1: + raise PostProcessParseError("Post-processing expression should be a single token") + + +def _resolve_one_post_processing_expression( + tokens: Sequence[str], +) -> tuple[str, Any]: + """Returns name and the resolved expression.""" + validate_one_post_processing_expression(tokens) + + token_parts = tokens[0].split(".") + + current_module = py_utils.get_main_module() + for part_num, part in enumerate(token_parts): + current_module_vars = vars(current_module) + if part not in current_module_vars: + raise PostProcessParseError( + 'Unable to resolve "{}"'.format(".".join(token_parts[: part_num + 1])) + ) + + current_module = current_module_vars[part] + + return (" ".join(tokens), current_module) + + +def resolve_post_processing_tokens( + tokens: Sequence[Sequence[str]], +) -> Sequence[ParsedPostProcessExpr]: + """Resolves post-processing tokens into ParsedPostProcessExprs. + + E.g. Given [["add_length"], ["to_upper"]] as input, this function will return + a sequence of ParsedPostProcessExprs that will execute add_length() and + to_upper() on each entry of the LLM output as post-processing operations. + + Raises: + PostProcessParseError: An error parsing or resolving the tokens. + + Args: + tokens: A sequence of post-processing tokens after splitting. + + Returns: + A sequence of ParsedPostProcessExprs. + """ + results: list[ParsedPostProcessExpr] = [] + for expression in tokens: + expr_name, expr_value = _resolve_one_post_processing_expression(expression) + if isinstance(expr_value, ParsedPostProcessExpr): + results.append(expr_value) + elif isinstance(expr_value, Callable): + # By default, assume that an undecorated function is an "add" function. + results.append(_ParsedPostProcessAddExpr(name=expr_name, fn=expr_value)) + else: + raise PostProcessParseError("{} is not callable".format(expr_name)) + + return results diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/notebook/py_utils.py b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/py_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6800cfeeec43ba5198140fb50e6f2d8d2d3e0e4c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/notebook/py_utils.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convenience functions for writing to and reading from Python variables.""" +from __future__ import annotations + +import builtins +import keyword +import sys +from typing import Any + + +def validate_var_name(var_name: str) -> None: + """Validates that the variable name is a valid identifier.""" + if not var_name.isidentifier(): + raise ValueError('Invalid Python variable name, got "{}"'.format(var_name)) + if keyword.iskeyword(var_name): + raise ValueError('Cannot use Python keywords, got "{}"'.format(var_name)) + + +def get_main_module(): + return sys.modules["__main__"] + + +def get_py_var(var_name: str) -> Any: + """Retrieves the value of `var_name` from the global environment.""" + validate_var_name(var_name) + g_vars = vars(get_main_module()) + if var_name in g_vars: + return g_vars[var_name] + elif var_name in vars(builtins): + return vars(builtins)[var_name] + raise NameError('"{}" not found'.format(var_name)) + + +def has_py_var(var_name: str) -> bool: + """Returns true if `var_name` is defined in the global environment.""" + try: + validate_var_name(var_name) + _ = get_py_var(var_name) + except ValueError: + return False + except NameError: + return False + + return True + + +def set_py_var(var_name: str, val: Any) -> None: + """Sets the value of `var_name` in the global environment.""" + validate_var_name(var_name) + g_vars = vars(get_main_module()) + g_vars[var_name] = val diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/types/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/google/generativeai/types/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1787002f35d6c8c3bb416b6505a7c6130f53a29 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/google/generativeai/types/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/types/image_types/__init__.py b/.venv/lib/python3.11/site-packages/google/generativeai/types/image_types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6e9d0a3fe1856b699b306488504a92b801fb406b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/types/image_types/__init__.py @@ -0,0 +1 @@ +from google.generativeai.types.image_types._image_types import * diff --git a/.venv/lib/python3.11/site-packages/google/generativeai/types/permission_types.py b/.venv/lib/python3.11/site-packages/google/generativeai/types/permission_types.py new file mode 100644 index 0000000000000000000000000000000000000000..48cc9c132fa34fff68ca50ecd3fa8388afce807c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/google/generativeai/types/permission_types.py @@ -0,0 +1,479 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import dataclasses +from typing import Optional, Union, Any, Iterable, AsyncIterable +import re + +import google.ai.generativelanguage as glm +from google.generativeai import protos + +from google.protobuf import field_mask_pb2 + +from google.generativeai.client import get_default_permission_client +from google.generativeai.client import get_default_permission_async_client +from google.generativeai.utils import flatten_update_paths +from google.generativeai import string_utils + +__all__ = ["Permission", "Permissions"] + +GranteeType = protos.Permission.GranteeType +Role = protos.Permission.Role + +GranteeTypeOptions = Union[str, int, GranteeType] +RoleOptions = Union[str, int, Role] + +_GRANTEE_TYPE: dict[GranteeTypeOptions, GranteeType] = { + GranteeType.GRANTEE_TYPE_UNSPECIFIED: GranteeType.GRANTEE_TYPE_UNSPECIFIED, + 0: GranteeType.GRANTEE_TYPE_UNSPECIFIED, + "grantee_type_unspecified": GranteeType.GRANTEE_TYPE_UNSPECIFIED, + "unspecified": GranteeType.GRANTEE_TYPE_UNSPECIFIED, + GranteeType.USER: GranteeType.USER, + 1: GranteeType.USER, + "user": GranteeType.USER, + GranteeType.GROUP: GranteeType.GROUP, + 2: GranteeType.GROUP, + "group": GranteeType.GROUP, + GranteeType.EVERYONE: GranteeType.EVERYONE, + 3: GranteeType.EVERYONE, + "everyone": GranteeType.EVERYONE, +} + +_ROLE: dict[RoleOptions, Role] = { + Role.ROLE_UNSPECIFIED: Role.ROLE_UNSPECIFIED, + 0: Role.ROLE_UNSPECIFIED, + "role_unspecified": Role.ROLE_UNSPECIFIED, + "unspecified": Role.ROLE_UNSPECIFIED, + Role.OWNER: Role.OWNER, + 1: Role.OWNER, + "owner": Role.OWNER, + Role.WRITER: Role.WRITER, + 2: Role.WRITER, + "writer": Role.WRITER, + Role.READER: Role.READER, + 3: Role.READER, + "reader": Role.READER, +} + +_VALID_PERMISSION_ID = r"permissions/([a-z0-9]+)$" +INVALID_PERMISSION_ID_MSG = """`permission_id` must follow the pattern: `permissions/` and must \ +consist of only alphanumeric characters. Got: `{permission_id}` instead.""" + + +def to_grantee_type(x: GranteeTypeOptions) -> GranteeType: + if isinstance(x, str): + x = x.lower() + return _GRANTEE_TYPE[x] + + +def to_role(x: RoleOptions) -> Role: + if isinstance(x, str): + x = x.lower() + return _ROLE[x] + + +def valid_id(name: str) -> bool: + return re.match(_VALID_PERMISSION_ID, name) is not None + + +@string_utils.prettyprint +@dataclasses.dataclass(init=False) +class Permission: + """ + A permission to access a resource. + """ + + name: str + role: Role + grantee_type: Optional[GranteeType] + email_address: Optional[str] = None + + def __init__( + self, + name: str, + role: RoleOptions, + grantee_type: Optional[GranteeTypeOptions] = None, + email_address: Optional[str] = None, + ): + self.name = name + if role is None: + self.role = None + else: + self.role = to_role(role) + if grantee_type is None: + self.grantee_type = None + else: + self.grantee_type = to_grantee_type(grantee_type) + self.email_address = email_address + + def delete( + self, + client: glm.PermissionServiceClient | None = None, + ) -> None: + """ + Delete permission (self). + """ + if client is None: + client = get_default_permission_client() + delete_request = protos.DeletePermissionRequest(name=self.name) + client.delete_permission(request=delete_request) + + async def delete_async( + self, + client: glm.PermissionServiceAsyncClient | None = None, + ) -> None: + """ + This is the async version of `Permission.delete`. + """ + if client is None: + client = get_default_permission_async_client() + delete_request = protos.DeletePermissionRequest(name=self.name) + await client.delete_permission(request=delete_request) + + # TODO (magashe): Add a method to validate update value. As of now only `role` is supported as a mask path + def _apply_update(self, path, value): + parts = path.split(".") + for part in parts[:-1]: + self = getattr(self, part) + setattr(self, parts[-1], value) + + def update( + self, + updates: dict[str, Any], + client: glm.PermissionServiceClient | None = None, + ) -> Permission: + """ + Update a list of fields for a specified permission. + + Args: + updates: The list of fields to update. + Currently only `role` is supported as an update path. + + Returns: + `Permission` object with specified updates. + """ + if client is None: + client = get_default_permission_client() + + updates = flatten_update_paths(updates) + for update_path in updates: + if update_path != "role": + raise ValueError( + f"Invalid update path: '{update_path}'. Currently, only the 'role' attribute can be updated for 'Permission'." + ) + field_mask = field_mask_pb2.FieldMask() + + for path in updates.keys(): + field_mask.paths.append(path) + for path, value in updates.items(): + self._apply_update(path, value) + + update_request = protos.UpdatePermissionRequest( + permission=self._to_proto(), update_mask=field_mask + ) + client.update_permission(request=update_request) + return self + + async def update_async( + self, + updates: dict[str, Any], + client: glm.PermissionServiceAsyncClient | None = None, + ) -> Permission: + """ + This is the async version of `Permission.update`. + """ + if client is None: + client = get_default_permission_async_client() + + updates = flatten_update_paths(updates) + for update_path in updates: + if update_path != "role": + raise ValueError( + f"Invalid update path: '{update_path}'. Currently, only the 'role' attribute can be updated for 'Permission'." + ) + field_mask = field_mask_pb2.FieldMask() + + for path in updates.keys(): + field_mask.paths.append(path) + for path, value in updates.items(): + self._apply_update(path, value) + + update_request = protos.UpdatePermissionRequest( + permission=self._to_proto(), update_mask=field_mask + ) + await client.update_permission(request=update_request) + return self + + def _to_proto(self) -> protos.Permission: + return protos.Permission( + name=self.name, + role=self.role, + grantee_type=self.grantee_type, + email_address=self.email_address, + ) + + def to_dict(self) -> dict[str, Any]: + return dataclasses.asdict(self) + + @classmethod + def get( + cls, + name: str, + client: glm.PermissionServiceClient | None = None, + ) -> Permission: + """ + Get information about a specific permission. + + Args: + name: The name of the permission to get. + + Returns: + Requested permission as an instance of `Permission`. + """ + if client is None: + client = get_default_permission_client() + get_perm_request = protos.GetPermissionRequest(name=name) + get_perm_response = client.get_permission(request=get_perm_request) + get_perm_response = type(get_perm_response).to_dict(get_perm_response) + return cls(**get_perm_response) + + @classmethod + async def get_async( + cls, + name: str, + client: glm.PermissionServiceAsyncClient | None = None, + ) -> Permission: + """ + This is the async version of `Permission.get`. + """ + if client is None: + client = get_default_permission_async_client() + get_perm_request = protos.GetPermissionRequest(name=name) + get_perm_response = await client.get_permission(request=get_perm_request) + get_perm_response = type(get_perm_response).to_dict(get_perm_response) + return cls(**get_perm_response) + + +class Permissions: + def __init__(self, parent): + if isinstance(parent, str): + self._parent = parent + else: + self._parent = parent.name + + @property + def parent(self): + return self._parent + + def _make_create_permission_request( + self, + role: RoleOptions, + grantee_type: Optional[GranteeTypeOptions] = None, + email_address: Optional[str] = None, + ) -> protos.CreatePermissionRequest: + role = to_role(role) + + if grantee_type: + grantee_type = to_grantee_type(grantee_type) + + if email_address and grantee_type == GranteeType.EVERYONE: + raise ValueError( + f"Invalid operation: Access cannot be limited for a specific email address ('{email_address}') when 'grantee_type' is set to 'EVERYONE'." + ) + if not email_address and grantee_type != GranteeType.EVERYONE: + raise ValueError( + f"Invalid operation: An 'email_address' must be provided when 'grantee_type' is not set to 'EVERYONE'. Currently, 'grantee_type' is set to '{grantee_type}' and 'email_address' is '{email_address if email_address else 'not provided'}'." + ) + + if email_address and grantee_type is None: + if email_address.endswith("googlegroups.com"): + grantee_type = GranteeType.GROUP + else: + grantee_type = GranteeType.USER + + permission = protos.Permission( + role=role, + grantee_type=grantee_type, + email_address=email_address, + ) + return protos.CreatePermissionRequest( + parent=self.parent, + permission=permission, + ) + + def create( + self, + role: RoleOptions, + grantee_type: Optional[GranteeTypeOptions] = None, + email_address: Optional[str] = None, + client: glm.PermissionServiceClient | None = None, + ) -> Permission: + """ + Create a new permission on a resource (self). + + Args: + parent: The resource name of the parent resource in which the permission will be listed. + role: role that will be granted by the permission. + grantee_type: The type of the grantee for the permission. + email_address: The email address of the grantee. + + Returns: + `Permission` object with specified parent, role, grantee type, and email address. + + Raises: + ValueError: When email_address is specified and grantee_type is set to EVERYONE. + ValueError: When email_address is not specified and grantee_type is not set to EVERYONE. + """ + if client is None: + client = get_default_permission_client() + + request = self._make_create_permission_request( + role=role, grantee_type=grantee_type, email_address=email_address + ) + permission_response = client.create_permission(request=request) + permission_response = type(permission_response).to_dict(permission_response) + return Permission(**permission_response) + + async def create_async( + self, + role: RoleOptions, + grantee_type: Optional[GranteeTypeOptions] = None, + email_address: Optional[str] = None, + client: glm.PermissionServiceAsyncClient | None = None, + ) -> Permission: + """ + This is the async version of `PermissionAdapter.create_permission`. + """ + if client is None: + client = get_default_permission_async_client() + + request = self._make_create_permission_request( + role=role, grantee_type=grantee_type, email_address=email_address + ) + permission_response = await client.create_permission(request=request) + permission_response = type(permission_response).to_dict(permission_response) + return Permission(**permission_response) + + def list( + self, + page_size: Optional[int] = None, + client: glm.PermissionServiceClient | None = None, + ) -> Iterable[Permission]: + """ + List `Permission`s enforced on a resource (self). + + Args: + parent: The resource name of the parent resource in which the permission will be listed. + page_size: The maximum number of permissions to return (per page). The service may return fewer permissions. + + Returns: + Paginated list of `Permission` objects. + """ + if client is None: + client = get_default_permission_client() + + request = protos.ListPermissionsRequest( + parent=self.parent, page_size=page_size # pytype: disable=attribute-error + ) + for permission in client.list_permissions(request): + permission = type(permission).to_dict(permission) + yield Permission(**permission) + + def __iter__(self): + return self.list() + + async def list_async( + self, + page_size: Optional[int] = None, + client: glm.PermissionServiceAsyncClient | None = None, + ) -> AsyncIterable[Permission]: + """ + This is the async version of `PermissionAdapter.list_permissions`. + """ + if client is None: + client = get_default_permission_async_client() + + request = protos.ListPermissionsRequest( + parent=self.parent, page_size=page_size # pytype: disable=attribute-error + ) + async for permission in await client.list_permissions(request): + permission = type(permission).to_dict(permission) + yield Permission(**permission) + + async def __aiter__(self): + return self.list_async() + + @classmethod + def get(cls, name: str) -> Permission: + """ + Get information about a specific permission. + + Args: + name: The name of the permission to get. + + Returns: + Requested permission as an instance of `Permission`. + """ + return Permission.get(name) + + @classmethod + async def get_async(cls, name: str) -> Permission: + """ + Get information about a specific permission. + + Args: + name: The name of the permission to get. + + Returns: + Requested permission as an instance of `Permission`. + """ + return await Permission.get_async(name) + + def transfer_ownership( + self, + email_address: str, + client: glm.PermissionServiceClient | None = None, + ) -> None: + """ + Transfer ownership of a resource (self) to a new owner. + + Args: + name: Name of the resource to transfer ownership. + email_address: Email address of the new owner. + """ + if self.parent.startswith("corpora"): + raise NotImplementedError("Can'/t transfer_ownership for a Corpus") + if client is None: + client = get_default_permission_client() + transfer_request = protos.TransferOwnershipRequest( + name=self.parent, email_address=email_address # pytype: disable=attribute-error + ) + return client.transfer_ownership(request=transfer_request) + + async def transfer_ownership_async( + self, + email_address: str, + client: glm.PermissionServiceAsyncClient | None = None, + ) -> None: + """This is the async version of `PermissionAdapter.transfer_ownership`.""" + if self.parent.startswith("corpora"): + raise NotImplementedError("Can'/t transfer_ownership for a Corpus") + if client is None: + client = get_default_permission_async_client() + transfer_request = protos.TransferOwnershipRequest( + name=self.parent, email_address=email_address # pytype: disable=attribute-error + ) + return await client.transfer_ownership(request=transfer_request)