text stringlengths 81 112k |
|---|
Add a "delete" rule to lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. literalinclude:: snippets.py
:start-after: [START add_lifecycle_delete_rule]
:end-before: [END add_lifecycle_delete_rule]
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
def add_lifecycle_delete_rule(self, **kw):
"""Add a "delete" rule to lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. literalinclude:: snippets.py
:start-after: [START add_lifecycle_delete_rule]
:end-before: [END add_lifecycle_delete_rule]
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
"""
rules = list(self.lifecycle_rules)
rules.append(LifecycleRuleDelete(**kw))
self.lifecycle_rules = rules |
Add a "delete" rule to lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. literalinclude:: snippets.py
:start-after: [START add_lifecycle_set_storage_class_rule]
:end-before: [END add_lifecycle_set_storage_class_rule]
:type storage_class: str, one of :attr:`_STORAGE_CLASSES`.
:param storage_class: new storage class to assign to matching items.
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
"""Add a "delete" rule to lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. literalinclude:: snippets.py
:start-after: [START add_lifecycle_set_storage_class_rule]
:end-before: [END add_lifecycle_set_storage_class_rule]
:type storage_class: str, one of :attr:`_STORAGE_CLASSES`.
:param storage_class: new storage class to assign to matching items.
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
"""
rules = list(self.lifecycle_rules)
rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
self.lifecycle_rules = rules |
(Deprecated) Set `Bucket.location`
This can only be set at bucket **creation** time.
See https://cloud.google.com/storage/docs/json_api/v1/buckets and
https://cloud.google.com/storage/docs/bucket-locations
.. warning::
Assignment to 'Bucket.location' is deprecated, as it is only
valid before the bucket is created. Instead, pass the location
to `Bucket.create`.
def location(self, value):
"""(Deprecated) Set `Bucket.location`
This can only be set at bucket **creation** time.
See https://cloud.google.com/storage/docs/json_api/v1/buckets and
https://cloud.google.com/storage/docs/bucket-locations
.. warning::
Assignment to 'Bucket.location' is deprecated, as it is only
valid before the bucket is created. Instead, pass the location
to `Bucket.create`.
"""
warnings.warn(_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
self._location = value |
Enable access logging for this bucket.
See https://cloud.google.com/storage/docs/access-logs
:type bucket_name: str
:param bucket_name: name of bucket in which to store access logs
:type object_prefix: str
:param object_prefix: prefix for access log filenames
def enable_logging(self, bucket_name, object_prefix=""):
"""Enable access logging for this bucket.
See https://cloud.google.com/storage/docs/access-logs
:type bucket_name: str
:param bucket_name: name of bucket in which to store access logs
:type object_prefix: str
:param object_prefix: prefix for access log filenames
"""
info = {"logBucket": bucket_name, "logObjectPrefix": object_prefix}
self._patch_property("logging", info) |
Retrieve the effective time of the bucket's retention policy.
:rtype: datetime.datetime or ``NoneType``
:returns: point-in time at which the bucket's retention policy is
effective, or ``None`` if the property is not
set locally.
def retention_policy_effective_time(self):
"""Retrieve the effective time of the bucket's retention policy.
:rtype: datetime.datetime or ``NoneType``
:returns: point-in time at which the bucket's retention policy is
effective, or ``None`` if the property is not
set locally.
"""
policy = self._properties.get("retentionPolicy")
if policy is not None:
timestamp = policy.get("effectiveTime")
if timestamp is not None:
return _rfc3339_to_datetime(timestamp) |
Retrieve or set the retention period for items in the bucket.
:rtype: int or ``NoneType``
:returns: number of seconds to retain items after upload or release
from event-based lock, or ``None`` if the property is not
set locally.
def retention_period(self):
"""Retrieve or set the retention period for items in the bucket.
:rtype: int or ``NoneType``
:returns: number of seconds to retain items after upload or release
from event-based lock, or ``None`` if the property is not
set locally.
"""
policy = self._properties.get("retentionPolicy")
if policy is not None:
period = policy.get("retentionPeriod")
if period is not None:
return int(period) |
Set the retention period for items in the bucket.
:type value: int
:param value:
number of seconds to retain items after upload or release from
event-based lock.
:raises ValueError: if the bucket's retention policy is locked.
def retention_period(self, value):
"""Set the retention period for items in the bucket.
:type value: int
:param value:
number of seconds to retain items after upload or release from
event-based lock.
:raises ValueError: if the bucket's retention policy is locked.
"""
policy = self._properties.setdefault("retentionPolicy", {})
if value is not None:
policy["retentionPeriod"] = str(value)
else:
policy = None
self._patch_property("retentionPolicy", policy) |
Set the storage class for the bucket.
See https://cloud.google.com/storage/docs/storage-classes
:type value: str
:param value: one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE",
"COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY"
def storage_class(self, value):
"""Set the storage class for the bucket.
See https://cloud.google.com/storage/docs/storage-classes
:type value: str
:param value: one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE",
"COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY"
"""
if value not in self._STORAGE_CLASSES:
raise ValueError("Invalid storage class: %s" % (value,))
self._patch_property("storageClass", value) |
Configure website-related properties.
See https://cloud.google.com/storage/docs/hosting-static-website
.. note::
This (apparently) only works
if your bucket name is a domain name
(and to do that, you need to get approved somehow...).
If you want this bucket to host a website, just provide the name
of an index page and a page to use when a blob isn't found:
.. literalinclude:: snippets.py
:start-after: [START configure_website]
:end-before: [END configure_website]
You probably should also make the whole bucket public:
.. literalinclude:: snippets.py
:start-after: [START make_public]
:end-before: [END make_public]
This says: "Make the bucket public, and all the stuff already in
the bucket, and anything else I add to the bucket. Just make it
all public."
:type main_page_suffix: str
:param main_page_suffix: The page to use as the main page
of a directory.
Typically something like index.html.
:type not_found_page: str
:param not_found_page: The file to use when a page isn't found.
def configure_website(self, main_page_suffix=None, not_found_page=None):
"""Configure website-related properties.
See https://cloud.google.com/storage/docs/hosting-static-website
.. note::
This (apparently) only works
if your bucket name is a domain name
(and to do that, you need to get approved somehow...).
If you want this bucket to host a website, just provide the name
of an index page and a page to use when a blob isn't found:
.. literalinclude:: snippets.py
:start-after: [START configure_website]
:end-before: [END configure_website]
You probably should also make the whole bucket public:
.. literalinclude:: snippets.py
:start-after: [START make_public]
:end-before: [END make_public]
This says: "Make the bucket public, and all the stuff already in
the bucket, and anything else I add to the bucket. Just make it
all public."
:type main_page_suffix: str
:param main_page_suffix: The page to use as the main page
of a directory.
Typically something like index.html.
:type not_found_page: str
:param not_found_page: The file to use when a page isn't found.
"""
data = {"mainPageSuffix": main_page_suffix, "notFoundPage": not_found_page}
self._patch_property("website", data) |
Retrieve the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``getIamPolicy`` API request.
def get_iam_policy(self, client=None):
"""Retrieve the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``getIamPolicy`` API request.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
info = client._connection.api_request(
method="GET",
path="%s/iam" % (self.path,),
query_params=query_params,
_target_object=None,
)
return Policy.from_api_repr(info) |
Update the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type policy: :class:`google.api_core.iam.Policy`
:param policy: policy instance used to update bucket's IAM policy.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``setIamPolicy`` API request.
def set_iam_policy(self, policy, client=None):
"""Update the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type policy: :class:`google.api_core.iam.Policy`
:param policy: policy instance used to update bucket's IAM policy.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.api_core.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``setIamPolicy`` API request.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params["userProject"] = self.user_project
resource = policy.to_api_repr()
resource["resourceId"] = self.path
info = client._connection.api_request(
method="PUT",
path="%s/iam" % (self.path,),
query_params=query_params,
data=resource,
_target_object=None,
)
return Policy.from_api_repr(info) |
Update bucket's ACL, revoking read access for anonymous users.
:type recursive: bool
:param recursive: If True, this will make all blobs inside the bucket
private as well.
:type future: bool
:param future: If True, this will make all objects created in the
future private as well.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises ValueError:
If ``recursive`` is True, and the bucket contains more than 256
blobs. This is to prevent extremely long runtime of this
method. For such buckets, iterate over the blobs returned by
:meth:`list_blobs` and call
:meth:`~google.cloud.storage.blob.Blob.make_private`
for each blob.
def make_private(self, recursive=False, future=False, client=None):
"""Update bucket's ACL, revoking read access for anonymous users.
:type recursive: bool
:param recursive: If True, this will make all blobs inside the bucket
private as well.
:type future: bool
:param future: If True, this will make all objects created in the
future private as well.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises ValueError:
If ``recursive`` is True, and the bucket contains more than 256
blobs. This is to prevent extremely long runtime of this
method. For such buckets, iterate over the blobs returned by
:meth:`list_blobs` and call
:meth:`~google.cloud.storage.blob.Blob.make_private`
for each blob.
"""
self.acl.all().revoke_read()
self.acl.save(client=client)
if future:
doa = self.default_object_acl
if not doa.loaded:
doa.reload(client=client)
doa.all().revoke_read()
doa.save(client=client)
if recursive:
blobs = list(
self.list_blobs(
projection="full",
max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
client=client,
)
)
if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
message = (
"Refusing to make private recursively with more than "
"%d objects. If you actually want to make every object "
"in this bucket private, iterate through the blobs "
"returned by 'Bucket.list_blobs()' and call "
"'make_private' on each one."
) % (self._MAX_OBJECTS_FOR_ITERATION,)
raise ValueError(message)
for blob in blobs:
blob.acl.all().revoke_read()
blob.acl.save(client=client) |
Create a signed upload policy for uploading objects.
This method generates and signs a policy document. You can use
`policy documents`_ to allow visitors to a website to upload files to
Google Cloud Storage without giving them direct write access.
For example:
.. literalinclude:: snippets.py
:start-after: [START policy_document]
:end-before: [END policy_document]
.. _policy documents:
https://cloud.google.com/storage/docs/xml-api\
/post-object#policydocument
:type expiration: datetime
:param expiration: Optional expiration in UTC. If not specified, the
policy will expire in 1 hour.
:type conditions: list
:param conditions: A list of conditions as described in the
`policy documents`_ documentation.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: dict
:returns: A dictionary of (form field name, form field value) of form
fields that should be added to your HTML upload form in order
to attach the signature.
def generate_upload_policy(self, conditions, expiration=None, client=None):
"""Create a signed upload policy for uploading objects.
This method generates and signs a policy document. You can use
`policy documents`_ to allow visitors to a website to upload files to
Google Cloud Storage without giving them direct write access.
For example:
.. literalinclude:: snippets.py
:start-after: [START policy_document]
:end-before: [END policy_document]
.. _policy documents:
https://cloud.google.com/storage/docs/xml-api\
/post-object#policydocument
:type expiration: datetime
:param expiration: Optional expiration in UTC. If not specified, the
policy will expire in 1 hour.
:type conditions: list
:param conditions: A list of conditions as described in the
`policy documents`_ documentation.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: dict
:returns: A dictionary of (form field name, form field value) of form
fields that should be added to your HTML upload form in order
to attach the signature.
"""
client = self._require_client(client)
credentials = client._base_connection.credentials
_signing.ensure_signed_credentials(credentials)
if expiration is None:
expiration = _NOW() + datetime.timedelta(hours=1)
conditions = conditions + [{"bucket": self.name}]
policy_document = {
"expiration": _datetime_to_rfc3339(expiration),
"conditions": conditions,
}
encoded_policy_document = base64.b64encode(
json.dumps(policy_document).encode("utf-8")
)
signature = base64.b64encode(credentials.sign_bytes(encoded_policy_document))
fields = {
"bucket": self.name,
"GoogleAccessId": credentials.signer_email,
"policy": encoded_policy_document.decode("utf-8"),
"signature": signature.decode("utf-8"),
}
return fields |
Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked.
def lock_retention_policy(self, client=None):
"""Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked.
"""
if "metageneration" not in self._properties:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
policy = self._properties.get("retentionPolicy")
if policy is None:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
if policy.get("isLocked"):
raise ValueError("Bucket's retention policy is already locked.")
client = self._require_client(client)
query_params = {"ifMetagenerationMatch": self.metageneration}
if self.user_project is not None:
query_params["userProject"] = self.user_project
path = "/b/{}/lockRetentionPolicy".format(self.name)
api_response = client._connection.api_request(
method="POST", path=path, query_params=query_params, _target_object=self
)
self._set_properties(api_response) |
Generates a signed URL for this bucket.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL using GCE service account. Follow `Issue 50`_ for updates on
this. If you'd like to be able to generate a signed URL from GCE,
you can use a standard service account from a JSON file rather
than a GCE service account.
.. _Issue 50: https://github.com/GoogleCloudPlatform/\
google-auth-library-python/issues/50
If you have a bucket that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible buckets, but don't want to require users to explicitly
log in.
:type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
:param expiration: Point in time when the signed URL should expire.
:type api_access_endpoint: str
:param api_access_endpoint: Optional URI base.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type headers: dict
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: (Optional) The OAuth2 credentials to use to sign
the URL. Defaults to the credentials stored on the
client used.
:type version: str
:param version: (Optional) The version of signed credential to create.
Must be one of 'v2' | 'v4'.
:raises: :exc:`ValueError` when version is invalid.
:raises: :exc:`TypeError` when expiration is not a valid type.
:raises: :exc:`AttributeError` if credentials is not an instance
of :class:`google.auth.credentials.Signing`.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration.
def generate_signed_url(
self,
expiration=None,
api_access_endpoint=_API_ACCESS_ENDPOINT,
method="GET",
headers=None,
query_parameters=None,
client=None,
credentials=None,
version=None,
):
"""Generates a signed URL for this bucket.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL using GCE service account. Follow `Issue 50`_ for updates on
this. If you'd like to be able to generate a signed URL from GCE,
you can use a standard service account from a JSON file rather
than a GCE service account.
.. _Issue 50: https://github.com/GoogleCloudPlatform/\
google-auth-library-python/issues/50
If you have a bucket that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible buckets, but don't want to require users to explicitly
log in.
:type expiration: Union[Integer, datetime.datetime, datetime.timedelta]
:param expiration: Point in time when the signed URL should expire.
:type api_access_endpoint: str
:param api_access_endpoint: Optional URI base.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type headers: dict
:param headers:
(Optional) Additional HTTP headers to be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers
Requests using the signed URL *must* pass the specified header
(name and value) with each request for the URL.
:type query_parameters: dict
:param query_parameters:
(Optional) Additional query paramtersto be included as part of the
signed URLs. See:
https://cloud.google.com/storage/docs/xml-api/reference-headers#query
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: (Optional) The OAuth2 credentials to use to sign
the URL. Defaults to the credentials stored on the
client used.
:type version: str
:param version: (Optional) The version of signed credential to create.
Must be one of 'v2' | 'v4'.
:raises: :exc:`ValueError` when version is invalid.
:raises: :exc:`TypeError` when expiration is not a valid type.
:raises: :exc:`AttributeError` if credentials is not an instance
of :class:`google.auth.credentials.Signing`.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration.
"""
if version is None:
version = "v2"
elif version not in ("v2", "v4"):
raise ValueError("'version' must be either 'v2' or 'v4'")
resource = "/{bucket_name}".format(bucket_name=self.name)
if credentials is None:
client = self._require_client(client)
credentials = client._credentials
if version == "v2":
helper = generate_signed_url_v2
else:
helper = generate_signed_url_v4
return helper(
credentials,
resource=resource,
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method.upper(),
headers=headers,
query_parameters=query_parameters,
) |
Convert a datetime to microseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Microseconds since the unix epoch.
def to_microseconds(value):
"""Convert a datetime to microseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Microseconds since the unix epoch.
"""
if not value.tzinfo:
value = value.replace(tzinfo=pytz.utc)
# Regardless of what timezone is on the value, convert it to UTC.
value = value.astimezone(pytz.utc)
# Convert the datetime to a microsecond timestamp.
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond |
Convert a microsecond-precision timestamp to datetime.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
def from_rfc3339(value):
"""Convert a microsecond-precision timestamp to datetime.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
"""
return datetime.datetime.strptime(value, _RFC3339_MICROS).replace(tzinfo=pytz.utc) |
Convert a nanosecond-precision timestamp to a native datetime.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
Raises:
ValueError: If the timestamp does not match the RFC 3339
regular expression.
def from_rfc3339_nanos(value):
"""Convert a nanosecond-precision timestamp to a native datetime.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
Raises:
ValueError: If the timestamp does not match the RFC 3339
regular expression.
"""
with_nanos = _RFC3339_NANOS.match(value)
if with_nanos is None:
raise ValueError(
"Timestamp: {!r}, does not match pattern: {!r}".format(
value, _RFC3339_NANOS.pattern
)
)
bare_seconds = datetime.datetime.strptime(
with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION
)
fraction = with_nanos.group("nanos")
if fraction is None:
micros = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
micros = nanos // 1000
return bare_seconds.replace(microsecond=micros, tzinfo=pytz.utc) |
Return an RFC 3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC 3339 spec.
def rfc3339(self):
"""Return an RFC 3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC 3339 spec.
"""
if self._nanosecond == 0:
return to_rfc3339(self)
nanos = str(self._nanosecond).rjust(9, '0').rstrip("0")
return "{}.{}Z".format(self.strftime(_RFC3339_NO_FRACTION), nanos) |
Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (str): RFC 3339 stamp, with up to nanosecond precision
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp string
Raises:
ValueError: if `stamp` does not match the expected format
def from_rfc3339(cls, stamp):
"""Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (str): RFC 3339 stamp, with up to nanosecond precision
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp string
Raises:
ValueError: if `stamp` does not match the expected format
"""
with_nanos = _RFC3339_NANOS.match(stamp)
if with_nanos is None:
raise ValueError(
"Timestamp: {}, does not match pattern: {}".format(
stamp, _RFC3339_NANOS.pattern
)
)
bare = datetime.datetime.strptime(
with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION
)
fraction = with_nanos.group("nanos")
if fraction is None:
nanos = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
return cls(
bare.year,
bare.month,
bare.day,
bare.hour,
bare.minute,
bare.second,
nanosecond=nanos,
tzinfo=pytz.UTC,
) |
Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message
def timestamp_pb(self):
"""Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message
"""
inst = self if self.tzinfo is not None else self.replace(tzinfo=pytz.UTC)
delta = inst - _UTC_EPOCH
seconds = int(delta.total_seconds())
nanos = self._nanosecond or self.microsecond * 1000
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) |
Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message
def from_timestamp_pb(cls, stamp):
"""Parse RFC 3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message
"""
microseconds = int(stamp.seconds * 1e6)
bare = from_microseconds(microseconds)
return cls(
bare.year,
bare.month,
bare.day,
bare.hour,
bare.minute,
bare.second,
nanosecond=stamp.nanos,
tzinfo=pytz.UTC,
) |
Creates a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(project_id, region, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Cluster`
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``CreateClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def create_cluster(
self,
project_id,
region,
cluster,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(project_id, region, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Cluster`
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``CreateClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"create_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs["CreateCluster"].retry,
default_timeout=self._method_configs["CreateCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.CreateClusterRequest(
project_id=project_id, region=region, cluster=cluster, request_id=request_id
)
operation = self._inner_api_calls["create_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
) |
Deletes a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.delete_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail
(with error NOT\_FOUND) if cluster with specified UUID does not exist.
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``DeleteClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def delete_cluster(
self,
project_id,
region,
cluster_name,
cluster_uuid=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.delete_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail
(with error NOT\_FOUND) if cluster with specified UUID does not exist.
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``DeleteClusterRequest`` requests with the same id, then
the second request will be ignored and the first
``google.longrunning.Operation`` created and stored in the backend is
returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`__.
The id must contain only letters (a-z, A-Z), numbers (0-9), underscores
(\_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"delete_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_cluster,
default_retry=self._method_configs["DeleteCluster"].retry,
default_timeout=self._method_configs["DeleteCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DeleteClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster_uuid=cluster_uuid,
request_id=request_id,
)
operation = self._inner_api_calls["delete_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
) |
Gets the resource representation for a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.get_cluster(project_id, region, cluster_name)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def get_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the resource representation for a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.get_cluster(project_id, region, cluster_name)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"get_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs["GetCluster"].retry,
default_timeout=self._method_configs["GetCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.GetClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
return self._inner_api_calls["get_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
Lists all regions/{region}/clusters in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_clusters(project_id, region):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_clusters(project_id, region).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
filter_ (str): Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``, ``clusterName``, or
``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*``
to match all values. ``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``,
``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``,
``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the
``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the
cluster provided at creation time. Only the logical ``AND`` operator is
supported; space-separated items are treated as having an implicit
``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster AND labels.env =
staging AND labels.starred = \*
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def list_clusters(
self,
project_id,
region,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists all regions/{region}/clusters in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_clusters(project_id, region):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_clusters(project_id, region).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
filter_ (str): Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``, ``clusterName``, or
``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*``
to match all values. ``status.state`` can be one of the following:
``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``,
``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``,
``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the
``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the
cluster provided at creation time. Only the logical ``AND`` operator is
supported; space-separated items are treated as having an implicit
``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster AND labels.env =
staging AND labels.starred = \*
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_clusters" not in self._inner_api_calls:
self._inner_api_calls[
"list_clusters"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs["ListClusters"].retry,
default_timeout=self._method_configs["ListClusters"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.ListClustersRequest(
project_id=project_id, region=region, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_clusters"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="clusters",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator |
Gets cluster diagnostic information. After the operation completes, the
Operation.response field contains ``DiagnoseClusterOutputLocation``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.diagnose_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def diagnose_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets cluster diagnostic information. After the operation completes, the
Operation.response field contains ``DiagnoseClusterOutputLocation``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.diagnose_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "diagnose_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"diagnose_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.diagnose_cluster,
default_retry=self._method_configs["DiagnoseCluster"].retry,
default_timeout=self._method_configs["DiagnoseCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.DiagnoseClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
operation = self._inner_api_calls["diagnose_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=clusters_pb2.DiagnoseClusterResults,
) |
Actually publish all of the messages on the active batch.
.. note::
This method is non-blocking. It opens a new thread, which calls
:meth:`_commit`, which does block.
This synchronously sets the batch status to "starting", and then opens
a new thread, which handles actually sending the messages to Pub/Sub.
If the current batch is **not** accepting messages, this method
does nothing.
def commit(self):
"""Actually publish all of the messages on the active batch.
.. note::
This method is non-blocking. It opens a new thread, which calls
:meth:`_commit`, which does block.
This synchronously sets the batch status to "starting", and then opens
a new thread, which handles actually sending the messages to Pub/Sub.
If the current batch is **not** accepting messages, this method
does nothing.
"""
# Set the status to "starting" synchronously, to ensure that
# this batch will necessarily not accept new messages.
with self._state_lock:
if self._status == base.BatchStatus.ACCEPTING_MESSAGES:
self._status = base.BatchStatus.STARTING
else:
return
# Start a new thread to actually handle the commit.
commit_thread = threading.Thread(
name="Thread-CommitBatchPublisher", target=self._commit
)
commit_thread.start() |
Actually publish all of the messages on the active batch.
This moves the batch out from being the active batch to an in progress
batch on the publisher, and then the batch is discarded upon
completion.
.. note::
This method blocks. The :meth:`commit` method is the non-blocking
version, which calls this one.
def _commit(self):
"""Actually publish all of the messages on the active batch.
This moves the batch out from being the active batch to an in progress
batch on the publisher, and then the batch is discarded upon
completion.
.. note::
This method blocks. The :meth:`commit` method is the non-blocking
version, which calls this one.
"""
with self._state_lock:
if self._status in _CAN_COMMIT:
self._status = base.BatchStatus.IN_PROGRESS
else:
# If, in the intervening period between when this method was
# called and now, the batch started to be committed, or
# completed a commit, then no-op at this point.
_LOGGER.debug("Batch is already in progress, exiting commit")
return
# Sanity check: If there are no messages, no-op.
if not self._messages:
_LOGGER.debug("No messages to publish, exiting commit")
self._status = base.BatchStatus.SUCCESS
return
# Begin the request to publish these messages.
# Log how long the underlying request takes.
start = time.time()
try:
response = self._client.api.publish(self._topic, self._messages)
except google.api_core.exceptions.GoogleAPIError as exc:
# We failed to publish, set the exception on all futures and
# exit.
self._status = base.BatchStatus.ERROR
for future in self._futures:
future.set_exception(exc)
_LOGGER.exception("Failed to publish %s messages.", len(self._futures))
return
end = time.time()
_LOGGER.debug("gRPC Publish took %s seconds.", end - start)
if len(response.message_ids) == len(self._futures):
# Iterate over the futures on the queue and return the response
# IDs. We are trusting that there is a 1:1 mapping, and raise
# an exception if not.
self._status = base.BatchStatus.SUCCESS
zip_iter = six.moves.zip(response.message_ids, self._futures)
for message_id, future in zip_iter:
future.set_result(message_id)
else:
# Sanity check: If the number of message IDs is not equal to
# the number of futures I have, then something went wrong.
self._status = base.BatchStatus.ERROR
exception = exceptions.PublishError(
"Some messages were not successfully published."
)
for future in self._futures:
future.set_exception(exception)
_LOGGER.error(
"Only %s of %s messages were published.",
len(response.message_ids),
len(self._futures),
) |
Commit this batch after sufficient time has elapsed.
This simply sleeps for ``self._settings.max_latency`` seconds,
and then calls commit unless the batch has already been committed.
def monitor(self):
"""Commit this batch after sufficient time has elapsed.
This simply sleeps for ``self._settings.max_latency`` seconds,
and then calls commit unless the batch has already been committed.
"""
# NOTE: This blocks; it is up to the calling code to call it
# in a separate thread.
# Sleep for however long we should be waiting.
time.sleep(self._settings.max_latency)
_LOGGER.debug("Monitor is waking up")
return self._commit() |
Publish a single message.
Add the given message to this object; this will cause it to be
published once the batch either has enough messages or a sufficient
period of time has elapsed.
This method is called by :meth:`~.PublisherClient.publish`.
Args:
message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message.
Returns:
Optional[~google.api_core.future.Future]: An object conforming to
the :class:`~concurrent.futures.Future` interface or :data:`None`.
If :data:`None` is returned, that signals that the batch cannot
accept a message.
def publish(self, message):
"""Publish a single message.
Add the given message to this object; this will cause it to be
published once the batch either has enough messages or a sufficient
period of time has elapsed.
This method is called by :meth:`~.PublisherClient.publish`.
Args:
message (~.pubsub_v1.types.PubsubMessage): The Pub/Sub message.
Returns:
Optional[~google.api_core.future.Future]: An object conforming to
the :class:`~concurrent.futures.Future` interface or :data:`None`.
If :data:`None` is returned, that signals that the batch cannot
accept a message.
"""
# Coerce the type, just in case.
if not isinstance(message, types.PubsubMessage):
message = types.PubsubMessage(**message)
future = None
with self._state_lock:
if not self.will_accept(message):
return future
new_size = self._size + message.ByteSize()
new_count = len(self._messages) + 1
overflow = (
new_size > self.settings.max_bytes
or new_count >= self._settings.max_messages
)
if not self._messages or not overflow:
# Store the actual message in the batch's message queue.
self._messages.append(message)
self._size = new_size
# Track the future on this batch (so that the result of the
# future can be set).
future = futures.Future(completed=threading.Event())
self._futures.append(future)
# Try to commit, but it must be **without** the lock held, since
# ``commit()`` will try to obtain the lock.
if overflow:
self.commit()
return future |
Factory: construct a change set given its API representation
:type resource: dict
:param resource: change set representation returned from the API.
:type zone: :class:`google.cloud.dns.zone.ManagedZone`
:param zone: A zone which holds zero or more change sets.
:rtype: :class:`google.cloud.dns.changes.Changes`
:returns: RRS parsed from ``resource``.
def from_api_repr(cls, resource, zone):
"""Factory: construct a change set given its API representation
:type resource: dict
:param resource: change set representation returned from the API.
:type zone: :class:`google.cloud.dns.zone.ManagedZone`
:param zone: A zone which holds zero or more change sets.
:rtype: :class:`google.cloud.dns.changes.Changes`
:returns: RRS parsed from ``resource``.
"""
changes = cls(zone=zone)
changes._set_properties(resource)
return changes |
Helper method for :meth:`from_api_repr`, :meth:`create`, etc.
:type resource: dict
:param resource: change set representation returned from the API.
def _set_properties(self, resource):
"""Helper method for :meth:`from_api_repr`, :meth:`create`, etc.
:type resource: dict
:param resource: change set representation returned from the API.
"""
resource = resource.copy()
self._additions = tuple(
[
ResourceRecordSet.from_api_repr(added_res, self.zone)
for added_res in resource.pop("additions", ())
]
)
self._deletions = tuple(
[
ResourceRecordSet.from_api_repr(added_res, self.zone)
for added_res in resource.pop("deletions", ())
]
)
self._properties = resource |
URL path for change set APIs.
:rtype: str
:returns: the path based on project, zone, and change set names.
def path(self):
"""URL path for change set APIs.
:rtype: str
:returns: the path based on project, zone, and change set names.
"""
return "/projects/%s/managedZones/%s/changes/%s" % (
self.zone.project,
self.zone.name,
self.name,
) |
Update name of the change set.
:type value: str
:param value: New name for the changeset.
def name(self, value):
"""Update name of the change set.
:type value: str
:param value: New name for the changeset.
"""
if not isinstance(value, six.string_types):
raise ValueError("Pass a string")
self._properties["id"] = value |
Append a record set to the 'additions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
def add_record_set(self, record_set):
"""Append a record set to the 'additions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
"""
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._additions += (record_set,) |
Append a record set to the 'deletions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
def delete_record_set(self, record_set):
"""Append a record set to the 'deletions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
"""
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._deletions += (record_set,) |
Generate a resource for ``create``.
def _build_resource(self):
"""Generate a resource for ``create``."""
additions = [
{
"name": added.name,
"type": added.record_type,
"ttl": str(added.ttl),
"rrdatas": added.rrdatas,
}
for added in self.additions
]
deletions = [
{
"name": deleted.name,
"type": deleted.record_type,
"ttl": str(deleted.ttl),
"rrdatas": deleted.rrdatas,
}
for deleted in self.deletions
]
return {"additions": additions, "deletions": deletions} |
API call: create the change set via a POST request.
See
https://cloud.google.com/dns/api/v1/changes/create
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
def create(self, client=None):
"""API call: create the change set via a POST request.
See
https://cloud.google.com/dns/api/v1/changes/create
:type client: :class:`google.cloud.dns.client.Client`
:param client:
(Optional) the client to use. If not passed, falls back to the
``client`` stored on the current zone.
"""
if len(self.additions) == 0 and len(self.deletions) == 0:
raise ValueError("No record sets added or deleted")
client = self._require_client(client)
path = "/projects/%s/managedZones/%s/changes" % (
self.zone.project,
self.zone.name,
)
api_response = client._connection.api_request(
method="POST", path=path, data=self._build_resource()
)
self._set_properties(api_response) |
Helper for logging-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs
def logging_api(self):
"""Helper for logging-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs
"""
if self._logging_api is None:
if self._use_grpc:
self._logging_api = _gapic.make_logging_api(self)
else:
self._logging_api = JSONLoggingAPI(self)
return self._logging_api |
Helper for log sink-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
def sinks_api(self):
"""Helper for log sink-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
"""
if self._sinks_api is None:
if self._use_grpc:
self._sinks_api = _gapic.make_sinks_api(self)
else:
self._sinks_api = JSONSinksAPI(self)
return self._sinks_api |
Helper for log metric-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics
def metrics_api(self):
"""Helper for log metric-related API calls.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics
"""
if self._metrics_api is None:
if self._use_grpc:
self._metrics_api = _gapic.make_metrics_api(self)
else:
self._metrics_api = JSONMetricsAPI(self)
return self._metrics_api |
Return a page of log entries.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_:
a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size:
Optional. The maximum number of entries in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of entries, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry`
accessible to the current client.
def list_entries(
self,
projects=None,
filter_=None,
order_by=None,
page_size=None,
page_token=None,
):
"""Return a page of log entries.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_:
a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size:
Optional. The maximum number of entries in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of entries, using
the value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing
the token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry`
accessible to the current client.
"""
if projects is None:
projects = [self.project]
return self.logging_api.list_entries(
projects=projects,
filter_=filter_,
order_by=order_by,
page_size=page_size,
page_token=page_token,
) |
Creates a sink bound to the current client.
:type name: str
:param name: the name of the sink to be constructed.
:type filter_: str
:param filter_: (optional) the advanced logs filter expression
defining the entries exported by the sink. If not
passed, the instance should already exist, to be
refreshed via :meth:`Sink.reload`.
:type destination: str
:param destination: destination URI for the entries exported by
the sink. If not passed, the instance should
already exist, to be refreshed via
:meth:`Sink.reload`.
:rtype: :class:`google.cloud.logging.sink.Sink`
:returns: Sink created with the current client.
def sink(self, name, filter_=None, destination=None):
"""Creates a sink bound to the current client.
:type name: str
:param name: the name of the sink to be constructed.
:type filter_: str
:param filter_: (optional) the advanced logs filter expression
defining the entries exported by the sink. If not
passed, the instance should already exist, to be
refreshed via :meth:`Sink.reload`.
:type destination: str
:param destination: destination URI for the entries exported by
the sink. If not passed, the instance should
already exist, to be refreshed via
:meth:`Sink.reload`.
:rtype: :class:`google.cloud.logging.sink.Sink`
:returns: Sink created with the current client.
"""
return Sink(name, filter_, destination, client=self) |
List sinks for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list
:type page_size: int
:param page_size:
Optional. The maximum number of sinks in each page of results from
this request. Non-positive values are ignored. Defaults to a
sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of sinks, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.sink.Sink`
accessible to the current client.
def list_sinks(self, page_size=None, page_token=None):
"""List sinks for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list
:type page_size: int
:param page_size:
Optional. The maximum number of sinks in each page of results from
this request. Non-positive values are ignored. Defaults to a
sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of sinks, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.sink.Sink`
accessible to the current client.
"""
return self.sinks_api.list_sinks(self.project, page_size, page_token) |
Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
def metric(self, name, filter_=None, description=""):
"""Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
"""
return Metric(name, filter_, client=self, description=description) |
List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type page_size: int
:param page_size:
Optional. The maximum number of metrics in each page of results
from this request. Non-positive values are ignored. Defaults to a
sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of metrics, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.metric.Metric`
accessible to the current client.
def list_metrics(self, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type page_size: int
:param page_size:
Optional. The maximum number of metrics in each page of results
from this request. Non-positive values are ignored. Defaults to a
sensible value set by the API.
:type page_token: str
:param page_token:
Optional. If present, return the next batch of metrics, using the
value, which must correspond to the ``nextPageToken`` value
returned in the previous response. Deprecated: use the ``pages``
property of the returned iterator instead of manually passing the
token.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.metric.Metric`
accessible to the current client.
"""
return self.metrics_api.list_metrics(self.project, page_size, page_token) |
Return the default logging handler based on the local environment.
:type kw: dict
:param kw: keyword args passed to handler constructor
:rtype: :class:`logging.Handler`
:returns: The default log handler based on the environment
def get_default_handler(self, **kw):
"""Return the default logging handler based on the local environment.
:type kw: dict
:param kw: keyword args passed to handler constructor
:rtype: :class:`logging.Handler`
:returns: The default log handler based on the environment
"""
gke_cluster_name = retrieve_metadata_server(_GKE_CLUSTER_NAME)
if (
_APPENGINE_FLEXIBLE_ENV_VM in os.environ
or _APPENGINE_INSTANCE_ID in os.environ
):
return AppEngineHandler(self, **kw)
elif gke_cluster_name is not None:
return ContainerEngineHandler(**kw)
else:
return CloudLoggingHandler(self, **kw) |
Attach default Stackdriver logging handler to the root logger.
This method uses the default log handler, obtained by
:meth:`~get_default_handler`, and attaches it to the root Python
logger, so that a call such as ``logging.warn``, as well as all child
loggers, will report to Stackdriver logging.
:type log_level: int
:param log_level: (Optional) Python logging log level. Defaults to
:const:`logging.INFO`.
:type excluded_loggers: tuple
:param excluded_loggers: (Optional) The loggers to not attach the
handler to. This will always include the
loggers in the path of the logging client
itself.
:type kw: dict
:param kw: keyword args passed to handler constructor
def setup_logging(
self, log_level=logging.INFO, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, **kw
):
"""Attach default Stackdriver logging handler to the root logger.
This method uses the default log handler, obtained by
:meth:`~get_default_handler`, and attaches it to the root Python
logger, so that a call such as ``logging.warn``, as well as all child
loggers, will report to Stackdriver logging.
:type log_level: int
:param log_level: (Optional) Python logging log level. Defaults to
:const:`logging.INFO`.
:type excluded_loggers: tuple
:param excluded_loggers: (Optional) The loggers to not attach the
handler to. This will always include the
loggers in the path of the logging client
itself.
:type kw: dict
:param kw: keyword args passed to handler constructor
"""
handler = self.get_default_handler(**kw)
setup_logging(handler, log_level=log_level, excluded_loggers=excluded_loggers) |
Return a fully-qualified key_ring string.
def key_ring_path(cls, project, location, key_ring):
"""Return a fully-qualified key_ring string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/keyRings/{key_ring}",
project=project,
location=location,
key_ring=key_ring,
) |
Return a fully-qualified crypto_key_path string.
def crypto_key_path_path(cls, project, location, key_ring, crypto_key_path):
"""Return a fully-qualified crypto_key_path string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key_path=**}",
project=project,
location=location,
key_ring=key_ring,
crypto_key_path=crypto_key_path,
) |
Return a fully-qualified crypto_key_version string.
def crypto_key_version_path(
cls, project, location, key_ring, crypto_key, crypto_key_version
):
"""Return a fully-qualified crypto_key_version string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}",
project=project,
location=location,
key_ring=key_ring,
crypto_key=crypto_key,
crypto_key_version=crypto_key_version,
) |
Create a new ``KeyRing`` in a given Project and Location.
Example:
>>> from google.cloud import kms_v1
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `key_ring_id`:
>>> key_ring_id = ''
>>>
>>> # TODO: Initialize `key_ring`:
>>> key_ring = {}
>>>
>>> response = client.create_key_ring(parent, key_ring_id, key_ring)
Args:
parent (str): Required. The resource name of the location associated with the
``KeyRings``, in the format ``projects/*/locations/*``.
key_ring_id (str): Required. It must be unique within a location and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
key_ring (Union[dict, ~google.cloud.kms_v1.types.KeyRing]): A ``KeyRing`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.KeyRing`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.KeyRing` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def create_key_ring(
self,
parent,
key_ring_id,
key_ring,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Create a new ``KeyRing`` in a given Project and Location.
Example:
>>> from google.cloud import kms_v1
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `key_ring_id`:
>>> key_ring_id = ''
>>>
>>> # TODO: Initialize `key_ring`:
>>> key_ring = {}
>>>
>>> response = client.create_key_ring(parent, key_ring_id, key_ring)
Args:
parent (str): Required. The resource name of the location associated with the
``KeyRings``, in the format ``projects/*/locations/*``.
key_ring_id (str): Required. It must be unique within a location and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
key_ring (Union[dict, ~google.cloud.kms_v1.types.KeyRing]): A ``KeyRing`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.KeyRing`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.KeyRing` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_key_ring" not in self._inner_api_calls:
self._inner_api_calls[
"create_key_ring"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_key_ring,
default_retry=self._method_configs["CreateKeyRing"].retry,
default_timeout=self._method_configs["CreateKeyRing"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateKeyRingRequest(
parent=parent, key_ring_id=key_ring_id, key_ring=key_ring
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_key_ring"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
Create a new ``CryptoKey`` within a ``KeyRing``.
``CryptoKey.purpose`` and ``CryptoKey.version_template.algorithm`` are
required.
Example:
>>> from google.cloud import kms_v1
>>> from google.cloud.kms_v1 import enums
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
>>> crypto_key_id = 'my-app-key'
>>> purpose = enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
>>> seconds = 2147483647
>>> next_rotation_time = {'seconds': seconds}
>>> seconds_2 = 604800
>>> rotation_period = {'seconds': seconds_2}
>>> crypto_key = {'purpose': purpose, 'next_rotation_time': next_rotation_time, 'rotation_period': rotation_period}
>>>
>>> response = client.create_crypto_key(parent, crypto_key_id, crypto_key)
Args:
parent (str): Required. The ``name`` of the KeyRing associated with the
``CryptoKeys``.
crypto_key_id (str): Required. It must be unique within a KeyRing and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
crypto_key (Union[dict, ~google.cloud.kms_v1.types.CryptoKey]): A ``CryptoKey`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.CryptoKey`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.CryptoKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def create_crypto_key(
self,
parent,
crypto_key_id,
crypto_key,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Create a new ``CryptoKey`` within a ``KeyRing``.
``CryptoKey.purpose`` and ``CryptoKey.version_template.algorithm`` are
required.
Example:
>>> from google.cloud import kms_v1
>>> from google.cloud.kms_v1 import enums
>>>
>>> client = kms_v1.KeyManagementServiceClient()
>>>
>>> parent = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
>>> crypto_key_id = 'my-app-key'
>>> purpose = enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
>>> seconds = 2147483647
>>> next_rotation_time = {'seconds': seconds}
>>> seconds_2 = 604800
>>> rotation_period = {'seconds': seconds_2}
>>> crypto_key = {'purpose': purpose, 'next_rotation_time': next_rotation_time, 'rotation_period': rotation_period}
>>>
>>> response = client.create_crypto_key(parent, crypto_key_id, crypto_key)
Args:
parent (str): Required. The ``name`` of the KeyRing associated with the
``CryptoKeys``.
crypto_key_id (str): Required. It must be unique within a KeyRing and match the regular
expression ``[a-zA-Z0-9_-]{1,63}``
crypto_key (Union[dict, ~google.cloud.kms_v1.types.CryptoKey]): A ``CryptoKey`` with initial field values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.kms_v1.types.CryptoKey`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.kms_v1.types.CryptoKey` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_crypto_key" not in self._inner_api_calls:
self._inner_api_calls[
"create_crypto_key"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_crypto_key,
default_retry=self._method_configs["CreateCryptoKey"].retry,
default_timeout=self._method_configs["CreateCryptoKey"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateCryptoKeyRequest(
parent=parent, crypto_key_id=crypto_key_id, crypto_key=crypto_key
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_crypto_key"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
Return a fully-qualified span string.
def span_path(cls, project, trace, span):
"""Return a fully-qualified span string."""
return google.api_core.path_template.expand(
"projects/{project}/traces/{trace}/spans/{span}",
project=project,
trace=trace,
span=span,
) |
Sends new spans to new or existing traces. You cannot update
existing spans.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `spans`:
>>> spans = []
>>>
>>> client.batch_write_spans(name, spans)
Args:
name (str): Required. The name of the project where the spans belong. The format is
``projects/[PROJECT_ID]``.
spans (list[Union[dict, ~google.cloud.trace_v2.types.Span]]): A list of new spans. The span names must not match existing
spans, or the results are undefined.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Span`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def batch_write_spans(
self,
name,
spans,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sends new spans to new or existing traces. You cannot update
existing spans.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `spans`:
>>> spans = []
>>>
>>> client.batch_write_spans(name, spans)
Args:
name (str): Required. The name of the project where the spans belong. The format is
``projects/[PROJECT_ID]``.
spans (list[Union[dict, ~google.cloud.trace_v2.types.Span]]): A list of new spans. The span names must not match existing
spans, or the results are undefined.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Span`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_write_spans" not in self._inner_api_calls:
self._inner_api_calls[
"batch_write_spans"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_write_spans,
default_retry=self._method_configs["BatchWriteSpans"].retry,
default_timeout=self._method_configs["BatchWriteSpans"].timeout,
client_info=self._client_info,
)
request = tracing_pb2.BatchWriteSpansRequest(name=name, spans=spans)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["batch_write_spans"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
Creates a new span.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.span_path('[PROJECT]', '[TRACE]', '[SPAN]')
>>>
>>> # TODO: Initialize `span_id`:
>>> span_id = ''
>>>
>>> # TODO: Initialize `display_name`:
>>> display_name = {}
>>>
>>> # TODO: Initialize `start_time`:
>>> start_time = {}
>>>
>>> # TODO: Initialize `end_time`:
>>> end_time = {}
>>>
>>> response = client.create_span(name, span_id, display_name, start_time, end_time)
Args:
name (str): The resource name of the span in the following format:
::
projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
[TRACE\_ID] is a unique identifier for a trace within a project; it is a
32-character hexadecimal encoding of a 16-byte array.
[SPAN\_ID] is a unique identifier for a span within a trace; it is a
16-character hexadecimal encoding of an 8-byte array.
span_id (str): The [SPAN\_ID] portion of the span's resource name.
display_name (Union[dict, ~google.cloud.trace_v2.types.TruncatableString]): A description of the span's operation (up to 128 bytes). Stackdriver
Trace displays the description in the {% dynamic print
site\_values.console\_name %}. For example, the display name can be a
qualified method name or a file name and a line number where the
operation is called. A best practice is to use the same display name
within an application and at the same call point. This makes it easier
to correlate spans in different traces.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TruncatableString`
start_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The start time of the span. On the client side, this is the time kept by
the local machine where the span execution starts. On the server side, this
is the time when the server's application handler starts running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
end_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The end time of the span. On the client side, this is the time kept by
the local machine where the span execution ends. On the server side, this
is the time when the server application handler stops running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
parent_span_id (str): The [SPAN\_ID] of this span's parent span. If this is a root span, then
this field must be empty.
attributes (Union[dict, ~google.cloud.trace_v2.types.Attributes]): A set of attributes on the span. You can have up to 32 attributes per
span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Attributes`
stack_trace (Union[dict, ~google.cloud.trace_v2.types.StackTrace]): Stack trace captured at the start of the span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.StackTrace`
time_events (Union[dict, ~google.cloud.trace_v2.types.TimeEvents]): A set of time events. You can have up to 32 annotations and 128 message
events per span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TimeEvents`
links (Union[dict, ~google.cloud.trace_v2.types.Links]): Links associated with the span. You can have up to 128 links per Span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Links`
status (Union[dict, ~google.cloud.trace_v2.types.Status]): An optional final status for this span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Status`
same_process_as_parent_span (Union[dict, ~google.cloud.trace_v2.types.BoolValue]): (Optional) Set this parameter to indicate whether this span is in
the same process as its parent. If you do not set this parameter,
Stackdriver Trace is unable to take advantage of this helpful
information.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.BoolValue`
child_span_count (Union[dict, ~google.cloud.trace_v2.types.Int32Value]): An optional number of child spans that were generated while this span
was active. If set, allows implementation to detect missing child spans.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Int32Value`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.trace_v2.types.Span` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def create_span(
self,
name,
span_id,
display_name,
start_time,
end_time,
parent_span_id=None,
attributes=None,
stack_trace=None,
time_events=None,
links=None,
status=None,
same_process_as_parent_span=None,
child_span_count=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new span.
Example:
>>> from google.cloud import trace_v2
>>>
>>> client = trace_v2.TraceServiceClient()
>>>
>>> name = client.span_path('[PROJECT]', '[TRACE]', '[SPAN]')
>>>
>>> # TODO: Initialize `span_id`:
>>> span_id = ''
>>>
>>> # TODO: Initialize `display_name`:
>>> display_name = {}
>>>
>>> # TODO: Initialize `start_time`:
>>> start_time = {}
>>>
>>> # TODO: Initialize `end_time`:
>>> end_time = {}
>>>
>>> response = client.create_span(name, span_id, display_name, start_time, end_time)
Args:
name (str): The resource name of the span in the following format:
::
projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
[TRACE\_ID] is a unique identifier for a trace within a project; it is a
32-character hexadecimal encoding of a 16-byte array.
[SPAN\_ID] is a unique identifier for a span within a trace; it is a
16-character hexadecimal encoding of an 8-byte array.
span_id (str): The [SPAN\_ID] portion of the span's resource name.
display_name (Union[dict, ~google.cloud.trace_v2.types.TruncatableString]): A description of the span's operation (up to 128 bytes). Stackdriver
Trace displays the description in the {% dynamic print
site\_values.console\_name %}. For example, the display name can be a
qualified method name or a file name and a line number where the
operation is called. A best practice is to use the same display name
within an application and at the same call point. This makes it easier
to correlate spans in different traces.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TruncatableString`
start_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The start time of the span. On the client side, this is the time kept by
the local machine where the span execution starts. On the server side, this
is the time when the server's application handler starts running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
end_time (Union[dict, ~google.cloud.trace_v2.types.Timestamp]): The end time of the span. On the client side, this is the time kept by
the local machine where the span execution ends. On the server side, this
is the time when the server application handler stops running.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Timestamp`
parent_span_id (str): The [SPAN\_ID] of this span's parent span. If this is a root span, then
this field must be empty.
attributes (Union[dict, ~google.cloud.trace_v2.types.Attributes]): A set of attributes on the span. You can have up to 32 attributes per
span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Attributes`
stack_trace (Union[dict, ~google.cloud.trace_v2.types.StackTrace]): Stack trace captured at the start of the span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.StackTrace`
time_events (Union[dict, ~google.cloud.trace_v2.types.TimeEvents]): A set of time events. You can have up to 32 annotations and 128 message
events per span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.TimeEvents`
links (Union[dict, ~google.cloud.trace_v2.types.Links]): Links associated with the span. You can have up to 128 links per Span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Links`
status (Union[dict, ~google.cloud.trace_v2.types.Status]): An optional final status for this span.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Status`
same_process_as_parent_span (Union[dict, ~google.cloud.trace_v2.types.BoolValue]): (Optional) Set this parameter to indicate whether this span is in
the same process as its parent. If you do not set this parameter,
Stackdriver Trace is unable to take advantage of this helpful
information.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.BoolValue`
child_span_count (Union[dict, ~google.cloud.trace_v2.types.Int32Value]): An optional number of child spans that were generated while this span
was active. If set, allows implementation to detect missing child spans.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v2.types.Int32Value`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.trace_v2.types.Span` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_span" not in self._inner_api_calls:
self._inner_api_calls[
"create_span"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_span,
default_retry=self._method_configs["CreateSpan"].retry,
default_timeout=self._method_configs["CreateSpan"].timeout,
client_info=self._client_info,
)
request = trace_pb2.Span(
name=name,
span_id=span_id,
display_name=display_name,
start_time=start_time,
end_time=end_time,
parent_span_id=parent_span_id,
attributes=attributes,
stack_trace=stack_trace,
time_events=time_events,
links=links,
status=status,
same_process_as_parent_span=same_process_as_parent_span,
child_span_count=child_span_count,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_span"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
Wraps a user callback so that if an exception occurs the message is
nacked.
Args:
callback (Callable[None, Message]): The user callback.
message (~Message): The Pub/Sub message.
def _wrap_callback_errors(callback, message):
"""Wraps a user callback so that if an exception occurs the message is
nacked.
Args:
callback (Callable[None, Message]): The user callback.
message (~Message): The Pub/Sub message.
"""
try:
callback(message)
except Exception:
# Note: the likelihood of this failing is extremely low. This just adds
# a message to a queue, so if this doesn't work the world is in an
# unrecoverable state and this thread should just bail.
_LOGGER.exception(
"Top-level exception occurred in callback while processing a " "message"
)
message.nack() |
Return the current ack deadline based on historical time-to-ack.
This method is "sticky". It will only perform the computations to
check on the right ack deadline if the histogram has gained a
significant amount of new information.
Returns:
int: The ack deadline.
def ack_deadline(self):
"""Return the current ack deadline based on historical time-to-ack.
This method is "sticky". It will only perform the computations to
check on the right ack deadline if the histogram has gained a
significant amount of new information.
Returns:
int: The ack deadline.
"""
target = min([self._last_histogram_size * 2, self._last_histogram_size + 100])
if len(self.ack_histogram) > target:
self._ack_deadline = self.ack_histogram.percentile(percent=99)
return self._ack_deadline |
Return the current load.
The load is represented as a float, where 1.0 represents having
hit one of the flow control limits, and values between 0.0 and 1.0
represent how close we are to them. (0.5 means we have exactly half
of what the flow control setting allows, for example.)
There are (currently) two flow control settings; this property
computes how close the manager is to each of them, and returns
whichever value is higher. (It does not matter that we have lots of
running room on setting A if setting B is over.)
Returns:
float: The load value.
def load(self):
"""Return the current load.
The load is represented as a float, where 1.0 represents having
hit one of the flow control limits, and values between 0.0 and 1.0
represent how close we are to them. (0.5 means we have exactly half
of what the flow control setting allows, for example.)
There are (currently) two flow control settings; this property
computes how close the manager is to each of them, and returns
whichever value is higher. (It does not matter that we have lots of
running room on setting A if setting B is over.)
Returns:
float: The load value.
"""
if self._leaser is None:
return 0
return max(
[
self._leaser.message_count / self._flow_control.max_messages,
self._leaser.bytes / self._flow_control.max_bytes,
]
) |
Check the current load and pause the consumer if needed.
def maybe_pause_consumer(self):
"""Check the current load and pause the consumer if needed."""
if self.load >= 1.0:
if self._consumer is not None and not self._consumer.is_paused:
_LOGGER.debug("Message backlog over load at %.2f, pausing.", self.load)
self._consumer.pause() |
Check the current load and resume the consumer if needed.
def maybe_resume_consumer(self):
"""Check the current load and resume the consumer if needed."""
# If we have been paused by flow control, check and see if we are
# back within our limits.
#
# In order to not thrash too much, require us to have passed below
# the resume threshold (80% by default) of each flow control setting
# before restarting.
if self._consumer is None or not self._consumer.is_paused:
return
if self.load < self.flow_control.resume_threshold:
self._consumer.resume()
else:
_LOGGER.debug("Did not resume, current load is %s", self.load) |
Send a request using a separate unary request instead of over the
stream.
Args:
request (types.StreamingPullRequest): The stream request to be
mapped into unary requests.
def _send_unary_request(self, request):
"""Send a request using a separate unary request instead of over the
stream.
Args:
request (types.StreamingPullRequest): The stream request to be
mapped into unary requests.
"""
if request.ack_ids:
self._client.acknowledge(
subscription=self._subscription, ack_ids=list(request.ack_ids)
)
if request.modify_deadline_ack_ids:
# Send ack_ids with the same deadline seconds together.
deadline_to_ack_ids = collections.defaultdict(list)
for n, ack_id in enumerate(request.modify_deadline_ack_ids):
deadline = request.modify_deadline_seconds[n]
deadline_to_ack_ids[deadline].append(ack_id)
for deadline, ack_ids in six.iteritems(deadline_to_ack_ids):
self._client.modify_ack_deadline(
subscription=self._subscription,
ack_ids=ack_ids,
ack_deadline_seconds=deadline,
)
_LOGGER.debug("Sent request(s) over unary RPC.") |
Queue a request to be sent to the RPC.
def send(self, request):
"""Queue a request to be sent to the RPC."""
if self._UNARY_REQUESTS:
try:
self._send_unary_request(request)
except exceptions.GoogleAPICallError:
_LOGGER.debug(
"Exception while sending unary RPC. This is typically "
"non-fatal as stream requests are best-effort.",
exc_info=True,
)
else:
self._rpc.send(request) |
Sends an empty request over the streaming pull RPC.
This always sends over the stream, regardless of if
``self._UNARY_REQUESTS`` is set or not.
def heartbeat(self):
"""Sends an empty request over the streaming pull RPC.
This always sends over the stream, regardless of if
``self._UNARY_REQUESTS`` is set or not.
"""
if self._rpc is not None and self._rpc.is_active:
self._rpc.send(types.StreamingPullRequest()) |
Begin consuming messages.
Args:
callback (Callable[None, google.cloud.pubsub_v1.message.Messages]):
A callback that will be called for each message received on the
stream.
def open(self, callback):
"""Begin consuming messages.
Args:
callback (Callable[None, google.cloud.pubsub_v1.message.Messages]):
A callback that will be called for each message received on the
stream.
"""
if self.is_active:
raise ValueError("This manager is already open.")
if self._closed:
raise ValueError("This manager has been closed and can not be re-used.")
self._callback = functools.partial(_wrap_callback_errors, callback)
# Create the RPC
self._rpc = bidi.ResumableBidiRpc(
start_rpc=self._client.api.streaming_pull,
initial_request=self._get_initial_request,
should_recover=self._should_recover,
)
self._rpc.add_done_callback(self._on_rpc_done)
# Create references to threads
self._dispatcher = dispatcher.Dispatcher(self, self._scheduler.queue)
self._consumer = bidi.BackgroundConsumer(self._rpc, self._on_response)
self._leaser = leaser.Leaser(self)
self._heartbeater = heartbeater.Heartbeater(self)
# Start the thread to pass the requests.
self._dispatcher.start()
# Start consuming messages.
self._consumer.start()
# Start the lease maintainer thread.
self._leaser.start()
# Start the stream heartbeater thread.
self._heartbeater.start() |
Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. This is passed to the callbacks
specified via :meth:`add_close_callback`.
def close(self, reason=None):
"""Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. This is passed to the callbacks
specified via :meth:`add_close_callback`.
"""
with self._closing:
if self._closed:
return
# Stop consuming messages.
if self.is_active:
_LOGGER.debug("Stopping consumer.")
self._consumer.stop()
self._consumer = None
# Shutdown all helper threads
_LOGGER.debug("Stopping scheduler.")
self._scheduler.shutdown()
self._scheduler = None
_LOGGER.debug("Stopping leaser.")
self._leaser.stop()
self._leaser = None
_LOGGER.debug("Stopping dispatcher.")
self._dispatcher.stop()
self._dispatcher = None
_LOGGER.debug("Stopping heartbeater.")
self._heartbeater.stop()
self._heartbeater = None
self._rpc = None
self._closed = True
_LOGGER.debug("Finished stopping manager.")
for callback in self._close_callbacks:
callback(self, reason) |
Return the initial request for the RPC.
This defines the initial request that must always be sent to Pub/Sub
immediately upon opening the subscription.
Returns:
google.cloud.pubsub_v1.types.StreamingPullRequest: A request
suitable for being the first request on the stream (and not
suitable for any other purpose).
def _get_initial_request(self):
"""Return the initial request for the RPC.
This defines the initial request that must always be sent to Pub/Sub
immediately upon opening the subscription.
Returns:
google.cloud.pubsub_v1.types.StreamingPullRequest: A request
suitable for being the first request on the stream (and not
suitable for any other purpose).
"""
# Any ack IDs that are under lease management need to have their
# deadline extended immediately.
if self._leaser is not None:
# Explicitly copy the list, as it could be modified by another
# thread.
lease_ids = list(self._leaser.ack_ids)
else:
lease_ids = []
# Put the request together.
request = types.StreamingPullRequest(
modify_deadline_ack_ids=list(lease_ids),
modify_deadline_seconds=[self.ack_deadline] * len(lease_ids),
stream_ack_deadline_seconds=self.ack_histogram.percentile(99),
subscription=self._subscription,
)
# Return the initial request.
return request |
Process all received Pub/Sub messages.
For each message, send a modified acknowledgment request to the
server. This prevents expiration of the message due to buffering by
gRPC or proxy/firewall. This makes the server and client expiration
timer closer to each other thus preventing the message being
redelivered multiple times.
After the messages have all had their ack deadline updated, execute
the callback for each message using the executor.
def _on_response(self, response):
"""Process all received Pub/Sub messages.
For each message, send a modified acknowledgment request to the
server. This prevents expiration of the message due to buffering by
gRPC or proxy/firewall. This makes the server and client expiration
timer closer to each other thus preventing the message being
redelivered multiple times.
After the messages have all had their ack deadline updated, execute
the callback for each message using the executor.
"""
_LOGGER.debug(
"Scheduling callbacks for %s messages.", len(response.received_messages)
)
# Immediately modack the messages we received, as this tells the server
# that we've received them.
items = [
requests.ModAckRequest(message.ack_id, self._ack_histogram.percentile(99))
for message in response.received_messages
]
self._dispatcher.modify_ack_deadline(items)
for received_message in response.received_messages:
message = google.cloud.pubsub_v1.subscriber.message.Message(
received_message.message, received_message.ack_id, self._scheduler.queue
)
# TODO: Immediately lease instead of using the callback queue.
self._scheduler.schedule(self._callback, message) |
Determine if an error on the RPC stream should be recovered.
If the exception is one of the retryable exceptions, this will signal
to the consumer thread that it should "recover" from the failure.
This will cause the stream to exit when it returns :data:`False`.
Returns:
bool: Indicates if the caller should recover or shut down.
Will be :data:`True` if the ``exception`` is "acceptable", i.e.
in a list of retryable / idempotent exceptions.
def _should_recover(self, exception):
"""Determine if an error on the RPC stream should be recovered.
If the exception is one of the retryable exceptions, this will signal
to the consumer thread that it should "recover" from the failure.
This will cause the stream to exit when it returns :data:`False`.
Returns:
bool: Indicates if the caller should recover or shut down.
Will be :data:`True` if the ``exception`` is "acceptable", i.e.
in a list of retryable / idempotent exceptions.
"""
exception = _maybe_wrap_exception(exception)
# If this is in the list of idempotent exceptions, then we want to
# recover.
if isinstance(exception, _RETRYABLE_STREAM_ERRORS):
_LOGGER.info("Observed recoverable stream error %s", exception)
return True
_LOGGER.info("Observed non-recoverable stream error %s", exception)
return False |
Add a "change" to this batch to create a document.
If the document given by ``reference`` already exists, then this
batch will fail when :meth:`commit`-ed.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference to be created in this batch.
document_data (dict): Property names and values to use for
creating a document.
def create(self, reference, document_data):
"""Add a "change" to this batch to create a document.
If the document given by ``reference`` already exists, then this
batch will fail when :meth:`commit`-ed.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference to be created in this batch.
document_data (dict): Property names and values to use for
creating a document.
"""
write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)
self._add_write_pbs(write_pbs) |
Add a "change" to replace a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.set` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
def set(self, reference, document_data, merge=False):
"""Add a "change" to replace a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.set` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
"""
if merge is not False:
write_pbs = _helpers.pbs_for_set_with_merge(
reference._document_path, document_data, merge
)
else:
write_pbs = _helpers.pbs_for_set_no_merge(
reference._document_path, document_data
)
self._add_write_pbs(write_pbs) |
Add a "change" to update a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.update` for
more information on ``field_updates`` and ``option``.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
field_updates (dict): Field names or paths to update and values
to update with.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
def update(self, reference, field_updates, option=None):
"""Add a "change" to update a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.update` for
more information on ``field_updates`` and ``option``.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
field_updates (dict): Field names or paths to update and values
to update with.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
"""
if option.__class__.__name__ == "ExistsOption":
raise ValueError("you must not pass an explicit write option to " "update.")
write_pbs = _helpers.pbs_for_update(
reference._document_path, field_updates, option
)
self._add_write_pbs(write_pbs) |
Add a "change" to delete a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.delete` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
def delete(self, reference, option=None):
"""Add a "change" to delete a document.
See
:meth:`~.firestore_v1beta1.document.DocumentReference.delete` for
more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
"""
write_pb = _helpers.pb_for_delete(reference._document_path, option)
self._add_write_pbs([write_pb]) |
Commit the changes accumulated in this batch.
Returns:
List[google.cloud.proto.firestore.v1beta1.\
write_pb2.WriteResult, ...]: The write results corresponding
to the changes committed, returned in the same order as the
changes were applied to this batch. A write result contains an
``update_time`` field.
def commit(self):
"""Commit the changes accumulated in this batch.
Returns:
List[google.cloud.proto.firestore.v1beta1.\
write_pb2.WriteResult, ...]: The write results corresponding
to the changes committed, returned in the same order as the
changes were applied to this batch. A write result contains an
``update_time`` field.
"""
commit_response = self._client._firestore_api.commit(
self._client._database_string,
self._write_pbs,
transaction=None,
metadata=self._client._rpc_metadata,
)
self._write_pbs = []
self.write_results = results = list(commit_response.write_results)
self.commit_time = commit_response.commit_time
return results |
Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
def _ensure_tuple_or_list(arg_name, tuple_or_list):
"""Ensures an input is a tuple or list.
This effectively reduces the iterable types allowed to a very short
whitelist: list and tuple.
:type arg_name: str
:param arg_name: Name of argument to use in error message.
:type tuple_or_list: sequence of str
:param tuple_or_list: Sequence to be verified.
:rtype: list of str
:returns: The ``tuple_or_list`` passed in cast to a ``list``.
:raises TypeError: if the ``tuple_or_list`` is not a tuple or list.
"""
if not isinstance(tuple_or_list, (tuple, list)):
raise TypeError(
"Expected %s to be a tuple or list. "
"Received %r" % (arg_name, tuple_or_list)
)
return list(tuple_or_list) |
Convert non-none datetime to microseconds.
:type value: :class:`datetime.datetime`
:param value: The timestamp to convert.
:rtype: int
:returns: The timestamp, in microseconds.
def _microseconds_from_datetime(value):
"""Convert non-none datetime to microseconds.
:type value: :class:`datetime.datetime`
:param value: The timestamp to convert.
:rtype: int
:returns: The timestamp, in microseconds.
"""
if not value.tzinfo:
value = value.replace(tzinfo=UTC)
# Regardless of what timezone is on the value, convert it to UTC.
value = value.astimezone(UTC)
# Convert the datetime to a microsecond timestamp.
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond |
Convert a zoneless ISO8601 time string to naive datetime time
:type value: str
:param value: The time string to convert
:rtype: :class:`datetime.time`
:returns: A datetime time object created from the string
:raises ValueError: if the value does not match a known format.
def _time_from_iso8601_time_naive(value):
"""Convert a zoneless ISO8601 time string to naive datetime time
:type value: str
:param value: The time string to convert
:rtype: :class:`datetime.time`
:returns: A datetime time object created from the string
:raises ValueError: if the value does not match a known format.
"""
if len(value) == 8: # HH:MM:SS
fmt = _TIMEONLY_NO_FRACTION
elif len(value) == 15: # HH:MM:SS.micros
fmt = _TIMEONLY_W_MICROS
else:
raise ValueError("Unknown time format: {}".format(value))
return datetime.datetime.strptime(value, fmt).time() |
Convert a microsecond-precision timestamp to a native datetime.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
def _rfc3339_to_datetime(dt_str):
"""Convert a microsecond-precision timestamp to a native datetime.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
"""
return datetime.datetime.strptime(dt_str, _RFC3339_MICROS).replace(tzinfo=UTC) |
Convert a nanosecond-precision timestamp to a native datetime.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
:raises ValueError: If the timestamp does not match the RFC 3339
regular expression.
def _rfc3339_nanos_to_datetime(dt_str):
"""Convert a nanosecond-precision timestamp to a native datetime.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
:type dt_str: str
:param dt_str: The string to convert.
:rtype: :class:`datetime.datetime`
:returns: The datetime object created from the string.
:raises ValueError: If the timestamp does not match the RFC 3339
regular expression.
"""
with_nanos = _RFC3339_NANOS.match(dt_str)
if with_nanos is None:
raise ValueError(
"Timestamp: %r, does not match pattern: %r"
% (dt_str, _RFC3339_NANOS.pattern)
)
bare_seconds = datetime.datetime.strptime(
with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION
)
fraction = with_nanos.group("nanos")
if fraction is None:
micros = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
micros = nanos // 1000
return bare_seconds.replace(microsecond=micros, tzinfo=UTC) |
Convert a timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:type ignore_zone: bool
:param ignore_zone: If True, then the timezone (if any) of the datetime
object is ignored.
:rtype: str
:returns: The string representing the datetime stamp.
def _datetime_to_rfc3339(value, ignore_zone=True):
"""Convert a timestamp to a string.
:type value: :class:`datetime.datetime`
:param value: The datetime object to be converted to a string.
:type ignore_zone: bool
:param ignore_zone: If True, then the timezone (if any) of the datetime
object is ignored.
:rtype: str
:returns: The string representing the datetime stamp.
"""
if not ignore_zone and value.tzinfo is not None:
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
return value.strftime(_RFC3339_MICROS) |
Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python2 it does not modify ``unicode`` objects.
:type value: str / bytes or unicode
:param value: The string/bytes value to be converted.
:type encoding: str
:param encoding: The encoding to use to convert unicode to bytes. Defaults
to "ascii", which will not allow any characters from
ordinals larger than 127. Other useful values are
"latin-1", which which will only allows byte ordinals
(up to 255) and "utf-8", which will encode any unicode
that needs to be.
:rtype: str / bytes
:returns: The original value converted to bytes (if unicode) or as passed
in if it started out as bytes.
:raises TypeError: if the value could not be converted to bytes.
def _to_bytes(value, encoding="ascii"):
"""Converts a string value to bytes, if necessary.
Unfortunately, ``six.b`` is insufficient for this task since in
Python2 it does not modify ``unicode`` objects.
:type value: str / bytes or unicode
:param value: The string/bytes value to be converted.
:type encoding: str
:param encoding: The encoding to use to convert unicode to bytes. Defaults
to "ascii", which will not allow any characters from
ordinals larger than 127. Other useful values are
"latin-1", which which will only allows byte ordinals
(up to 255) and "utf-8", which will encode any unicode
that needs to be.
:rtype: str / bytes
:returns: The original value converted to bytes (if unicode) or as passed
in if it started out as bytes.
:raises TypeError: if the value could not be converted to bytes.
"""
result = value.encode(encoding) if isinstance(value, six.text_type) else value
if isinstance(result, six.binary_type):
return result
else:
raise TypeError("%r could not be converted to bytes" % (value,)) |
Converts bytes to a unicode value, if necessary.
:type value: bytes
:param value: bytes value to attempt string conversion on.
:rtype: str
:returns: The original value converted to unicode (if bytes) or as passed
in if it started out as unicode.
:raises ValueError: if the value could not be converted to unicode.
def _bytes_to_unicode(value):
"""Converts bytes to a unicode value, if necessary.
:type value: bytes
:param value: bytes value to attempt string conversion on.
:rtype: str
:returns: The original value converted to unicode (if bytes) or as passed
in if it started out as unicode.
:raises ValueError: if the value could not be converted to unicode.
"""
result = value.decode("utf-8") if isinstance(value, six.binary_type) else value
if isinstance(result, six.text_type):
return result
else:
raise ValueError("%r could not be converted to unicode" % (value,)) |
Converts an Any protobuf to the specified message type
Args:
pb_type (type): the type of the message that any_pb stores an instance
of.
any_pb (google.protobuf.any_pb2.Any): the object to be converted.
Returns:
pb_type: An instance of the pb_type message.
Raises:
TypeError: if the message could not be converted.
def _from_any_pb(pb_type, any_pb):
"""Converts an Any protobuf to the specified message type
Args:
pb_type (type): the type of the message that any_pb stores an instance
of.
any_pb (google.protobuf.any_pb2.Any): the object to be converted.
Returns:
pb_type: An instance of the pb_type message.
Raises:
TypeError: if the message could not be converted.
"""
msg = pb_type()
if not any_pb.Unpack(msg):
raise TypeError(
"Could not convert {} to {}".format(
any_pb.__class__.__name__, pb_type.__name__
)
)
return msg |
Convert a Timestamp protobuf to a datetime object.
:type timestamp_pb: :class:`google.protobuf.timestamp_pb2.Timestamp`
:param timestamp_pb: A Google returned timestamp protobuf.
:rtype: :class:`datetime.datetime`
:returns: A UTC datetime object converted from a protobuf timestamp.
def _pb_timestamp_to_datetime(timestamp_pb):
"""Convert a Timestamp protobuf to a datetime object.
:type timestamp_pb: :class:`google.protobuf.timestamp_pb2.Timestamp`
:param timestamp_pb: A Google returned timestamp protobuf.
:rtype: :class:`datetime.datetime`
:returns: A UTC datetime object converted from a protobuf timestamp.
"""
return _EPOCH + datetime.timedelta(
seconds=timestamp_pb.seconds, microseconds=(timestamp_pb.nanos / 1000.0)
) |
Convert a datetime object to a Timestamp protobuf.
:type when: :class:`datetime.datetime`
:param when: the datetime to convert
:rtype: :class:`google.protobuf.timestamp_pb2.Timestamp`
:returns: A timestamp protobuf corresponding to the object.
def _datetime_to_pb_timestamp(when):
"""Convert a datetime object to a Timestamp protobuf.
:type when: :class:`datetime.datetime`
:param when: the datetime to convert
:rtype: :class:`google.protobuf.timestamp_pb2.Timestamp`
:returns: A timestamp protobuf corresponding to the object.
"""
ms_value = _microseconds_from_datetime(when)
seconds, micros = divmod(ms_value, 10 ** 6)
nanos = micros * 10 ** 3
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos) |
Convert a duration protobuf to a Python timedelta object.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type duration_pb: :class:`google.protobuf.duration_pb2.Duration`
:param duration_pb: A protobuf duration object.
:rtype: :class:`datetime.timedelta`
:returns: The converted timedelta object.
def _duration_pb_to_timedelta(duration_pb):
"""Convert a duration protobuf to a Python timedelta object.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type duration_pb: :class:`google.protobuf.duration_pb2.Duration`
:param duration_pb: A protobuf duration object.
:rtype: :class:`datetime.timedelta`
:returns: The converted timedelta object.
"""
return datetime.timedelta(
seconds=duration_pb.seconds, microseconds=(duration_pb.nanos / 1000.0)
) |
Validate a URI path and get the leaf object's name.
:type path: str
:param path: URI path containing the name.
:type project: str
:param project: (Optional) The project associated with the request. It is
included for validation purposes. If passed as None,
disables validation.
:type template: str
:param template: Template regex describing the expected form of the path.
The regex must have two named groups, 'project' and
'name'.
:rtype: str
:returns: Name parsed from ``path``.
:raises ValueError: if the ``path`` is ill-formed or if the project from
the ``path`` does not agree with the ``project``
passed in.
def _name_from_project_path(path, project, template):
"""Validate a URI path and get the leaf object's name.
:type path: str
:param path: URI path containing the name.
:type project: str
:param project: (Optional) The project associated with the request. It is
included for validation purposes. If passed as None,
disables validation.
:type template: str
:param template: Template regex describing the expected form of the path.
The regex must have two named groups, 'project' and
'name'.
:rtype: str
:returns: Name parsed from ``path``.
:raises ValueError: if the ``path`` is ill-formed or if the project from
the ``path`` does not agree with the ``project``
passed in.
"""
if isinstance(template, str):
template = re.compile(template)
match = template.match(path)
if not match:
raise ValueError(
'path "%s" did not match expected pattern "%s"' % (path, template.pattern)
)
if project is not None:
found_project = match.group("project")
if found_project != project:
raise ValueError(
"Project from client (%s) should agree with "
"project from resource(%s)." % (project, found_project)
)
return match.group("name") |
Makes a secure channel for an RPC service.
Uses / depends on gRPC.
:type credentials: :class:`google.auth.credentials.Credentials`
:param credentials: The OAuth2 Credentials to use for creating
access tokens.
:type user_agent: str
:param user_agent: The user agent to be used with API requests.
:type host: str
:param host: The host for the service.
:type extra_options: tuple
:param extra_options: (Optional) Extra gRPC options used when creating the
channel.
:rtype: :class:`grpc._channel.Channel`
:returns: gRPC secure channel with credentials attached.
def make_secure_channel(credentials, user_agent, host, extra_options=()):
"""Makes a secure channel for an RPC service.
Uses / depends on gRPC.
:type credentials: :class:`google.auth.credentials.Credentials`
:param credentials: The OAuth2 Credentials to use for creating
access tokens.
:type user_agent: str
:param user_agent: The user agent to be used with API requests.
:type host: str
:param host: The host for the service.
:type extra_options: tuple
:param extra_options: (Optional) Extra gRPC options used when creating the
channel.
:rtype: :class:`grpc._channel.Channel`
:returns: gRPC secure channel with credentials attached.
"""
target = "%s:%d" % (host, http_client.HTTPS_PORT)
http_request = google.auth.transport.requests.Request()
user_agent_option = ("grpc.primary_user_agent", user_agent)
options = (user_agent_option,) + extra_options
return google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, target, options=options
) |
Makes a secure stub for an RPC service.
Uses / depends on gRPC.
:type credentials: :class:`google.auth.credentials.Credentials`
:param credentials: The OAuth2 Credentials to use for creating
access tokens.
:type user_agent: str
:param user_agent: The user agent to be used with API requests.
:type stub_class: type
:param stub_class: A gRPC stub type for a given service.
:type host: str
:param host: The host for the service.
:type extra_options: tuple
:param extra_options: (Optional) Extra gRPC options passed when creating
the channel.
:rtype: object, instance of ``stub_class``
:returns: The stub object used to make gRPC requests to a given API.
def make_secure_stub(credentials, user_agent, stub_class, host, extra_options=()):
"""Makes a secure stub for an RPC service.
Uses / depends on gRPC.
:type credentials: :class:`google.auth.credentials.Credentials`
:param credentials: The OAuth2 Credentials to use for creating
access tokens.
:type user_agent: str
:param user_agent: The user agent to be used with API requests.
:type stub_class: type
:param stub_class: A gRPC stub type for a given service.
:type host: str
:param host: The host for the service.
:type extra_options: tuple
:param extra_options: (Optional) Extra gRPC options passed when creating
the channel.
:rtype: object, instance of ``stub_class``
:returns: The stub object used to make gRPC requests to a given API.
"""
channel = make_secure_channel(
credentials, user_agent, host, extra_options=extra_options
)
return stub_class(channel) |
Makes an insecure stub for an RPC service.
Uses / depends on gRPC.
:type stub_class: type
:param stub_class: A gRPC stub type for a given service.
:type host: str
:param host: The host for the service. May also include the port
if ``port`` is unspecified.
:type port: int
:param port: (Optional) The port for the service.
:rtype: object, instance of ``stub_class``
:returns: The stub object used to make gRPC requests to a given API.
def make_insecure_stub(stub_class, host, port=None):
"""Makes an insecure stub for an RPC service.
Uses / depends on gRPC.
:type stub_class: type
:param stub_class: A gRPC stub type for a given service.
:type host: str
:param host: The host for the service. May also include the port
if ``port`` is unspecified.
:type port: int
:param port: (Optional) The port for the service.
:rtype: object, instance of ``stub_class``
:returns: The stub object used to make gRPC requests to a given API.
"""
if port is None:
target = host
else:
# NOTE: This assumes port != http_client.HTTPS_PORT:
target = "%s:%d" % (host, port)
channel = grpc.insecure_channel(target)
return stub_class(channel) |
Convert a timestamp from (naive) UTC to this timezone.
def fromutc(self, dt):
"""Convert a timestamp from (naive) UTC to this timezone."""
if dt.tzinfo is None:
return dt.replace(tzinfo=self)
return super(_UTC, self).fromutc(dt) |
Custom decorator intended for :class:`~vision.helpers.VisionHelpers`.
This metaclass adds a `{feature}` method for every feature
defined on the Feature enum.
def add_single_feature_methods(cls):
"""Custom decorator intended for :class:`~vision.helpers.VisionHelpers`.
This metaclass adds a `{feature}` method for every feature
defined on the Feature enum.
"""
# Sanity check: This only makes sense if we are building the GAPIC
# subclass and have enums already attached.
if not hasattr(cls, "enums"):
return cls
# Add each single-feature method to the class.
for feature in cls.enums.Feature.Type:
# Sanity check: Do not make a method for the falsy feature.
if feature.name == "TYPE_UNSPECIFIED":
continue
# Assign the appropriate metadata to the function.
detect = _create_single_feature_method(feature)
# Assign a qualified name to the function, and perform module
# replacement on the docstring.
detect.__qualname__ = "{cls}.{name}".format(
cls=cls.__name__, name=detect.__name__
)
detect.__doc__ = detect.__doc__.format(module=cls.__module__)
# Place the function on the class being created.
setattr(cls, detect.__name__, detect)
# Done; return the class.
return cls |
Return a function that will detect a single feature.
Args:
feature (enum): A specific feature defined as a member of
:class:`~enums.Feature.Type`.
Returns:
function: A helper function to detect just that feature.
def _create_single_feature_method(feature):
"""Return a function that will detect a single feature.
Args:
feature (enum): A specific feature defined as a member of
:class:`~enums.Feature.Type`.
Returns:
function: A helper function to detect just that feature.
"""
# Define the function properties.
fx_name = feature.name.lower()
if "detection" in fx_name:
fx_doc = "Perform {0}.".format(fx_name.replace("_", " "))
else:
fx_doc = "Return {desc} information.".format(desc=fx_name.replace("_", " "))
# Provide a complete docstring with argument and return value
# information.
fx_doc += """
Args:
image (:class:`~.{module}.types.Image`): The image to analyze.
max_results (int):
Number of results to return, does not apply for
TEXT_DETECTION, DOCUMENT_TEXT_DETECTION, or CROP_HINTS.
retry (int): Number of retries to do before giving up.
timeout (int): Number of seconds before timing out.
kwargs (dict): Additional properties to be set on the
:class:`~.{module}.types.AnnotateImageRequest`.
Returns:
:class:`~.{module}.types.AnnotateImageResponse`: The API response.
"""
# Get the actual feature value to send.
feature_value = {"type": feature}
# Define the function to be returned.
def inner(self, image, max_results=None, retry=None, timeout=None, **kwargs):
"""Return a single feature annotation for the given image.
Intended for use with functools.partial, to create the particular
single-feature methods.
"""
copied_features = feature_value.copy()
if max_results is not None:
copied_features["max_results"] = max_results
request = dict(image=image, features=[copied_features], **kwargs)
response = self.annotate_image(request, retry=retry, timeout=timeout)
return response
# Set the appropriate function metadata.
inner.__name__ = fx_name
inner.__doc__ = fx_doc
# Return the final function.
return inner |
Schedule the callback to be called asynchronously in a thread pool.
Args:
callback (Callable): The function to call.
args: Positional arguments passed to the function.
kwargs: Key-word arguments passed to the function.
Returns:
None
def schedule(self, callback, *args, **kwargs):
"""Schedule the callback to be called asynchronously in a thread pool.
Args:
callback (Callable): The function to call.
args: Positional arguments passed to the function.
kwargs: Key-word arguments passed to the function.
Returns:
None
"""
self._executor.submit(callback, *args, **kwargs) |
Shuts down the scheduler and immediately end all pending callbacks.
def shutdown(self):
"""Shuts down the scheduler and immediately end all pending callbacks.
"""
# Drop all pending item from the executor. Without this, the executor
# will block until all pending items are complete, which is
# undesirable.
try:
while True:
self._executor._work_queue.get(block=False)
except queue.Empty:
pass
self._executor.shutdown() |
Lists the specified events.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `group_id`:
>>> group_id = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_events(project_name, group_id):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_events(project_name, group_id).pages:
... for element in page:
... # process element
... pass
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
group_id (str): [Required] The group for which events shall be returned.
service_filter (Union[dict, ~google.cloud.errorreporting_v1beta1.types.ServiceContextFilter]): [Optional] List only ErrorGroups which belong to a service context that
matches the filter. Data for all service contexts is returned if this
field is not specified.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.ServiceContextFilter`
time_range (Union[dict, ~google.cloud.errorreporting_v1beta1.types.QueryTimeRange]): [Optional] List only data for the given time range. If not set a default
time range is used. The field time\_range\_begin in the response will
specify the beginning of this time range.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.QueryTimeRange`
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.errorreporting_v1beta1.types.ErrorEvent` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def list_events(
self,
project_name,
group_id,
service_filter=None,
time_range=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists the specified events.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `group_id`:
>>> group_id = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_events(project_name, group_id):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_events(project_name, group_id).pages:
... for element in page:
... # process element
... pass
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
group_id (str): [Required] The group for which events shall be returned.
service_filter (Union[dict, ~google.cloud.errorreporting_v1beta1.types.ServiceContextFilter]): [Optional] List only ErrorGroups which belong to a service context that
matches the filter. Data for all service contexts is returned if this
field is not specified.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.ServiceContextFilter`
time_range (Union[dict, ~google.cloud.errorreporting_v1beta1.types.QueryTimeRange]): [Optional] List only data for the given time range. If not set a default
time range is used. The field time\_range\_begin in the response will
specify the beginning of this time range.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.QueryTimeRange`
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.errorreporting_v1beta1.types.ErrorEvent` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_events" not in self._inner_api_calls:
self._inner_api_calls[
"list_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_events,
default_retry=self._method_configs["ListEvents"].retry,
default_timeout=self._method_configs["ListEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.ListEventsRequest(
project_name=project_name,
group_id=group_id,
service_filter=service_filter,
time_range=time_range,
page_size=page_size,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_events"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="error_events",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator |
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
def delete_events(
self,
project_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_events" not in self._inner_api_calls:
self._inner_api_calls[
"delete_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_events,
default_retry=self._method_configs["DeleteEvents"].retry,
default_timeout=self._method_configs["DeleteEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["delete_events"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
Convert Document resource to document ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (dict): document resource
def _item_to_document_ref(iterator, item):
"""Convert Document resource to document ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (dict): document resource
"""
document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1]
return iterator.collection.document(document_id) |
Document that owns the current collection.
Returns:
Optional[~.firestore_v1beta1.document.DocumentReference]: The
parent document, if the current collection is not a
top-level collection.
def parent(self):
"""Document that owns the current collection.
Returns:
Optional[~.firestore_v1beta1.document.DocumentReference]: The
parent document, if the current collection is not a
top-level collection.
"""
if len(self._path) == 1:
return None
else:
parent_path = self._path[:-1]
return self._client.document(*parent_path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.