Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- venv/lib/python3.10/site-packages/google/_async_resumable_media/__init__.py +61 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/_download.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/_helpers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/_upload.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/_download.py +550 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/_helpers.py +197 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/_upload.py +976 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__init__.py +682 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/_request_helpers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/download.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/upload.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/_request_helpers.py +155 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/download.py +465 -0
- venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/upload.py +515 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/annotations_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/auth_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/backend_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/billing_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/client_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/config_change_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/consumer_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/context_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/control_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/distribution_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/documentation_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/endpoint_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/error_reason_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/field_behavior_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/field_info_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/http_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/httpbody_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/label_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/launch_stage_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/log_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/logging_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/metric_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/monitored_resource_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/monitoring_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/policy_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/quota_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/resource_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/routing_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/service_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/source_info_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/system_parameter_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/usage_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/__pycache__/visibility_pb2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/google/api/config_change_pb2.py +51 -0
venv/lib/python3.10/site-packages/google/_async_resumable_media/__init__.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Utilities for Google Media Downloads and Resumable Uploads.
|
| 16 |
+
|
| 17 |
+
This package has some general purposes modules, e.g.
|
| 18 |
+
:mod:`~google.resumable_media.common`, but the majority of the
|
| 19 |
+
public interface will be contained in subpackages.
|
| 20 |
+
|
| 21 |
+
===========
|
| 22 |
+
Subpackages
|
| 23 |
+
===========
|
| 24 |
+
|
| 25 |
+
Each subpackage is tailored to a specific transport library:
|
| 26 |
+
|
| 27 |
+
* the :mod:`~google.resumable_media.requests` subpackage uses the ``requests``
|
| 28 |
+
transport library.
|
| 29 |
+
|
| 30 |
+
.. _requests: http://docs.python-requests.org/
|
| 31 |
+
|
| 32 |
+
==========
|
| 33 |
+
Installing
|
| 34 |
+
==========
|
| 35 |
+
|
| 36 |
+
To install with `pip`_:
|
| 37 |
+
|
| 38 |
+
.. code-block:: console
|
| 39 |
+
|
| 40 |
+
$ pip install --upgrade google-resumable-media
|
| 41 |
+
|
| 42 |
+
.. _pip: https://pip.pypa.io/
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
from google.resumable_media.common import DataCorruption
|
| 47 |
+
from google.resumable_media.common import InvalidResponse
|
| 48 |
+
from google.resumable_media.common import PERMANENT_REDIRECT
|
| 49 |
+
from google.resumable_media.common import RetryStrategy
|
| 50 |
+
from google.resumable_media.common import TOO_MANY_REQUESTS
|
| 51 |
+
from google.resumable_media.common import UPLOAD_CHUNK_SIZE
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
__all__ = [
|
| 55 |
+
"DataCorruption",
|
| 56 |
+
"InvalidResponse",
|
| 57 |
+
"PERMANENT_REDIRECT",
|
| 58 |
+
"RetryStrategy",
|
| 59 |
+
"TOO_MANY_REQUESTS",
|
| 60 |
+
"UPLOAD_CHUNK_SIZE",
|
| 61 |
+
]
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/_download.cpython-310.pyc
ADDED
|
Binary file (18.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/_helpers.cpython-310.pyc
ADDED
|
Binary file (5.93 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/__pycache__/_upload.cpython-310.pyc
ADDED
|
Binary file (32.9 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/_download.py
ADDED
|
@@ -0,0 +1,550 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Virtual bases classes for downloading media from Google APIs."""
|
| 16 |
+
|
| 17 |
+
import http.client
|
| 18 |
+
import re
|
| 19 |
+
|
| 20 |
+
from google._async_resumable_media import _helpers
|
| 21 |
+
from google.resumable_media import common
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_CONTENT_RANGE_RE = re.compile(
|
| 25 |
+
r"bytes (?P<start_byte>\d+)-(?P<end_byte>\d+)/(?P<total_bytes>\d+)",
|
| 26 |
+
flags=re.IGNORECASE,
|
| 27 |
+
)
|
| 28 |
+
_ACCEPTABLE_STATUS_CODES = (http.client.OK, http.client.PARTIAL_CONTENT)
|
| 29 |
+
_GET = "GET"
|
| 30 |
+
_ZERO_CONTENT_RANGE_HEADER = "bytes */0"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class DownloadBase(object):
|
| 34 |
+
"""Base class for download helpers.
|
| 35 |
+
|
| 36 |
+
Defines core shared behavior across different download types.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 40 |
+
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
|
| 41 |
+
the downloaded resource can be written to.
|
| 42 |
+
start (int): The first byte in a range to be downloaded.
|
| 43 |
+
end (int): The last byte in a range to be downloaded.
|
| 44 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 45 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 46 |
+
|
| 47 |
+
Attributes:
|
| 48 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 49 |
+
start (Optional[int]): The first byte in a range to be downloaded.
|
| 50 |
+
end (Optional[int]): The last byte in a range to be downloaded.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self, media_url, stream=None, start=None, end=None, headers=None):
|
| 54 |
+
self.media_url = media_url
|
| 55 |
+
self._stream = stream
|
| 56 |
+
self.start = start
|
| 57 |
+
self.end = end
|
| 58 |
+
if headers is None:
|
| 59 |
+
headers = {}
|
| 60 |
+
self._headers = headers
|
| 61 |
+
self._finished = False
|
| 62 |
+
self._retry_strategy = common.RetryStrategy()
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
def finished(self):
|
| 66 |
+
"""bool: Flag indicating if the download has completed."""
|
| 67 |
+
return self._finished
|
| 68 |
+
|
| 69 |
+
@staticmethod
|
| 70 |
+
def _get_status_code(response):
|
| 71 |
+
"""Access the status code from an HTTP response.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
response (object): The HTTP response object.
|
| 75 |
+
|
| 76 |
+
Raises:
|
| 77 |
+
NotImplementedError: Always, since virtual.
|
| 78 |
+
"""
|
| 79 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 80 |
+
|
| 81 |
+
@staticmethod
|
| 82 |
+
def _get_headers(response):
|
| 83 |
+
"""Access the headers from an HTTP response.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
response (object): The HTTP response object.
|
| 87 |
+
|
| 88 |
+
Raises:
|
| 89 |
+
NotImplementedError: Always, since virtual.
|
| 90 |
+
"""
|
| 91 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 92 |
+
|
| 93 |
+
@staticmethod
|
| 94 |
+
def _get_body(response):
|
| 95 |
+
"""Access the response body from an HTTP response.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
response (object): The HTTP response object.
|
| 99 |
+
|
| 100 |
+
Raises:
|
| 101 |
+
NotImplementedError: Always, since virtual.
|
| 102 |
+
"""
|
| 103 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class Download(DownloadBase):
|
| 107 |
+
"""Helper to manage downloading a resource from a Google API.
|
| 108 |
+
|
| 109 |
+
"Slices" of the resource can be retrieved by specifying a range
|
| 110 |
+
with ``start`` and / or ``end``. However, in typical usage, neither
|
| 111 |
+
``start`` nor ``end`` is expected to be provided.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 115 |
+
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
|
| 116 |
+
the downloaded resource can be written to.
|
| 117 |
+
start (int): The first byte in a range to be downloaded. If not
|
| 118 |
+
provided, but ``end`` is provided, will download from the
|
| 119 |
+
beginning to ``end`` of the media.
|
| 120 |
+
end (int): The last byte in a range to be downloaded. If not
|
| 121 |
+
provided, but ``start`` is provided, will download from the
|
| 122 |
+
``start`` to the end of the media.
|
| 123 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 124 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 125 |
+
checksum Optional([str]): The type of checksum to compute to verify
|
| 126 |
+
the integrity of the object. The response headers must contain
|
| 127 |
+
a checksum of the requested type. If the headers lack an
|
| 128 |
+
appropriate checksum (for instance in the case of transcoded or
|
| 129 |
+
ranged downloads where the remote service does not know the
|
| 130 |
+
correct checksum) an INFO-level log will be emitted. Supported
|
| 131 |
+
values are "md5", "crc32c" and None.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
def __init__(
|
| 135 |
+
self, media_url, stream=None, start=None, end=None, headers=None, checksum="md5"
|
| 136 |
+
):
|
| 137 |
+
super(Download, self).__init__(
|
| 138 |
+
media_url, stream=stream, start=start, end=end, headers=headers
|
| 139 |
+
)
|
| 140 |
+
self.checksum = checksum
|
| 141 |
+
|
| 142 |
+
def _prepare_request(self):
|
| 143 |
+
"""Prepare the contents of an HTTP request.
|
| 144 |
+
|
| 145 |
+
This is everything that must be done before a request that doesn't
|
| 146 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 147 |
+
philosophy.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
Tuple[str, str, NoneType, Mapping[str, str]]: The quadruple
|
| 151 |
+
|
| 152 |
+
* HTTP verb for the request (always GET)
|
| 153 |
+
* the URL for the request
|
| 154 |
+
* the body of the request (always :data:`None`)
|
| 155 |
+
* headers for the request
|
| 156 |
+
|
| 157 |
+
Raises:
|
| 158 |
+
ValueError: If the current :class:`Download` has already
|
| 159 |
+
finished.
|
| 160 |
+
|
| 161 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 162 |
+
"""
|
| 163 |
+
if self.finished:
|
| 164 |
+
raise ValueError("A download can only be used once.")
|
| 165 |
+
|
| 166 |
+
add_bytes_range(self.start, self.end, self._headers)
|
| 167 |
+
return _GET, self.media_url, None, self._headers
|
| 168 |
+
|
| 169 |
+
def _process_response(self, response):
|
| 170 |
+
"""Process the response from an HTTP request.
|
| 171 |
+
|
| 172 |
+
This is everything that must be done after a request that doesn't
|
| 173 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 174 |
+
philosophy.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
response (object): The HTTP response object.
|
| 178 |
+
|
| 179 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 180 |
+
"""
|
| 181 |
+
# Tombstone the current Download so it cannot be used again.
|
| 182 |
+
self._finished = True
|
| 183 |
+
_helpers.require_status_code(
|
| 184 |
+
response, _ACCEPTABLE_STATUS_CODES, self._get_status_code
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
def consume(self, transport, timeout=None):
|
| 188 |
+
"""Consume the resource to be downloaded.
|
| 189 |
+
|
| 190 |
+
If a ``stream`` is attached to this download, then the downloaded
|
| 191 |
+
resource will be written to the stream.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
transport (object): An object which can make authenticated
|
| 195 |
+
requests.
|
| 196 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 197 |
+
The number of seconds to wait for the server response.
|
| 198 |
+
Depending on the retry strategy, a request may be repeated
|
| 199 |
+
several times using the same timeout each time.
|
| 200 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 201 |
+
|
| 202 |
+
Raises:
|
| 203 |
+
NotImplementedError: Always, since virtual.
|
| 204 |
+
"""
|
| 205 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class ChunkedDownload(DownloadBase):
|
| 209 |
+
"""Download a resource in chunks from a Google API.
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 213 |
+
chunk_size (int): The number of bytes to be retrieved in each
|
| 214 |
+
request.
|
| 215 |
+
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
|
| 216 |
+
will be used to concatenate chunks of the resource as they are
|
| 217 |
+
downloaded.
|
| 218 |
+
start (int): The first byte in a range to be downloaded. If not
|
| 219 |
+
provided, defaults to ``0``.
|
| 220 |
+
end (int): The last byte in a range to be downloaded. If not
|
| 221 |
+
provided, will download to the end of the media.
|
| 222 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 223 |
+
be sent with each request, e.g. headers for data encryption
|
| 224 |
+
key headers.
|
| 225 |
+
|
| 226 |
+
Attributes:
|
| 227 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 228 |
+
start (Optional[int]): The first byte in a range to be downloaded.
|
| 229 |
+
end (Optional[int]): The last byte in a range to be downloaded.
|
| 230 |
+
chunk_size (int): The number of bytes to be retrieved in each request.
|
| 231 |
+
|
| 232 |
+
Raises:
|
| 233 |
+
ValueError: If ``start`` is negative.
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
def __init__(self, media_url, chunk_size, stream, start=0, end=None, headers=None):
|
| 237 |
+
if start < 0:
|
| 238 |
+
raise ValueError(
|
| 239 |
+
"On a chunked download the starting " "value cannot be negative."
|
| 240 |
+
)
|
| 241 |
+
super(ChunkedDownload, self).__init__(
|
| 242 |
+
media_url, stream=stream, start=start, end=end, headers=headers
|
| 243 |
+
)
|
| 244 |
+
self.chunk_size = chunk_size
|
| 245 |
+
self._bytes_downloaded = 0
|
| 246 |
+
self._total_bytes = None
|
| 247 |
+
self._invalid = False
|
| 248 |
+
|
| 249 |
+
@property
|
| 250 |
+
def bytes_downloaded(self):
|
| 251 |
+
"""int: Number of bytes that have been downloaded."""
|
| 252 |
+
return self._bytes_downloaded
|
| 253 |
+
|
| 254 |
+
@property
|
| 255 |
+
def total_bytes(self):
|
| 256 |
+
"""Optional[int]: The total number of bytes to be downloaded."""
|
| 257 |
+
return self._total_bytes
|
| 258 |
+
|
| 259 |
+
@property
|
| 260 |
+
def invalid(self):
|
| 261 |
+
"""bool: Indicates if the download is in an invalid state.
|
| 262 |
+
|
| 263 |
+
This will occur if a call to :meth:`consume_next_chunk` fails.
|
| 264 |
+
"""
|
| 265 |
+
return self._invalid
|
| 266 |
+
|
| 267 |
+
def _get_byte_range(self):
|
| 268 |
+
"""Determines the byte range for the next request.
|
| 269 |
+
|
| 270 |
+
Returns:
|
| 271 |
+
Tuple[int, int]: The pair of begin and end byte for the next
|
| 272 |
+
chunked request.
|
| 273 |
+
"""
|
| 274 |
+
curr_start = self.start + self.bytes_downloaded
|
| 275 |
+
curr_end = curr_start + self.chunk_size - 1
|
| 276 |
+
# Make sure ``curr_end`` does not exceed ``end``.
|
| 277 |
+
if self.end is not None:
|
| 278 |
+
curr_end = min(curr_end, self.end)
|
| 279 |
+
# Make sure ``curr_end`` does not exceed ``total_bytes - 1``.
|
| 280 |
+
if self.total_bytes is not None:
|
| 281 |
+
curr_end = min(curr_end, self.total_bytes - 1)
|
| 282 |
+
return curr_start, curr_end
|
| 283 |
+
|
| 284 |
+
def _prepare_request(self):
|
| 285 |
+
"""Prepare the contents of an HTTP request.
|
| 286 |
+
|
| 287 |
+
This is everything that must be done before a request that doesn't
|
| 288 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 289 |
+
philosophy.
|
| 290 |
+
|
| 291 |
+
.. note:
|
| 292 |
+
|
| 293 |
+
This method will be used multiple times, so ``headers`` will
|
| 294 |
+
be mutated in between requests. However, we don't make a copy
|
| 295 |
+
since the same keys are being updated.
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
Tuple[str, str, NoneType, Mapping[str, str]]: The quadruple
|
| 299 |
+
|
| 300 |
+
* HTTP verb for the request (always GET)
|
| 301 |
+
* the URL for the request
|
| 302 |
+
* the body of the request (always :data:`None`)
|
| 303 |
+
* headers for the request
|
| 304 |
+
|
| 305 |
+
Raises:
|
| 306 |
+
ValueError: If the current download has finished.
|
| 307 |
+
ValueError: If the current download is invalid.
|
| 308 |
+
|
| 309 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 310 |
+
"""
|
| 311 |
+
if self.finished:
|
| 312 |
+
raise ValueError("Download has finished.")
|
| 313 |
+
if self.invalid:
|
| 314 |
+
raise ValueError("Download is invalid and cannot be re-used.")
|
| 315 |
+
|
| 316 |
+
curr_start, curr_end = self._get_byte_range()
|
| 317 |
+
add_bytes_range(curr_start, curr_end, self._headers)
|
| 318 |
+
return _GET, self.media_url, None, self._headers
|
| 319 |
+
|
| 320 |
+
def _make_invalid(self):
|
| 321 |
+
"""Simple setter for ``invalid``.
|
| 322 |
+
|
| 323 |
+
This is intended to be passed along as a callback to helpers that
|
| 324 |
+
raise an exception so they can mark this instance as invalid before
|
| 325 |
+
raising.
|
| 326 |
+
"""
|
| 327 |
+
self._invalid = True
|
| 328 |
+
|
| 329 |
+
async def _process_response(self, response):
|
| 330 |
+
"""Process the response from an HTTP request.
|
| 331 |
+
|
| 332 |
+
This is everything that must be done after a request that doesn't
|
| 333 |
+
require network I/O. This is based on the `sans-I/O`_ philosophy.
|
| 334 |
+
|
| 335 |
+
For the time being, this **does require** some form of I/O to write
|
| 336 |
+
a chunk to ``stream``. However, this will (almost) certainly not be
|
| 337 |
+
network I/O.
|
| 338 |
+
|
| 339 |
+
Updates the current state after consuming a chunk. First,
|
| 340 |
+
increments ``bytes_downloaded`` by the number of bytes in the
|
| 341 |
+
``content-length`` header.
|
| 342 |
+
|
| 343 |
+
If ``total_bytes`` is already set, this assumes (but does not check)
|
| 344 |
+
that we already have the correct value and doesn't bother to check
|
| 345 |
+
that it agrees with the headers.
|
| 346 |
+
|
| 347 |
+
We expect the **total** length to be in the ``content-range`` header,
|
| 348 |
+
but this header is only present on requests which sent the ``range``
|
| 349 |
+
header. This response header should be of the form
|
| 350 |
+
``bytes {start}-{end}/{total}`` and ``{end} - {start} + 1``
|
| 351 |
+
should be the same as the ``Content-Length``.
|
| 352 |
+
|
| 353 |
+
Args:
|
| 354 |
+
response (object): The HTTP response object (need headers).
|
| 355 |
+
|
| 356 |
+
Raises:
|
| 357 |
+
~google.resumable_media.common.InvalidResponse: If the number
|
| 358 |
+
of bytes in the body doesn't match the content length header.
|
| 359 |
+
|
| 360 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 361 |
+
"""
|
| 362 |
+
# Verify the response before updating the current instance.
|
| 363 |
+
if _check_for_zero_content_range(
|
| 364 |
+
response, self._get_status_code, self._get_headers
|
| 365 |
+
):
|
| 366 |
+
self._finished = True
|
| 367 |
+
return
|
| 368 |
+
|
| 369 |
+
_helpers.require_status_code(
|
| 370 |
+
response,
|
| 371 |
+
_ACCEPTABLE_STATUS_CODES,
|
| 372 |
+
self._get_status_code,
|
| 373 |
+
callback=self._make_invalid,
|
| 374 |
+
)
|
| 375 |
+
headers = self._get_headers(response)
|
| 376 |
+
response_body = await self._get_body(response)
|
| 377 |
+
|
| 378 |
+
start_byte, end_byte, total_bytes = get_range_info(
|
| 379 |
+
response, self._get_headers, callback=self._make_invalid
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
transfer_encoding = headers.get("transfer-encoding")
|
| 383 |
+
|
| 384 |
+
if transfer_encoding is None:
|
| 385 |
+
content_length = _helpers.header_required(
|
| 386 |
+
response,
|
| 387 |
+
"content-length",
|
| 388 |
+
self._get_headers,
|
| 389 |
+
callback=self._make_invalid,
|
| 390 |
+
)
|
| 391 |
+
num_bytes = int(content_length)
|
| 392 |
+
|
| 393 |
+
if len(response_body) != num_bytes:
|
| 394 |
+
self._make_invalid()
|
| 395 |
+
raise common.InvalidResponse(
|
| 396 |
+
response,
|
| 397 |
+
"Response is different size than content-length",
|
| 398 |
+
"Expected",
|
| 399 |
+
num_bytes,
|
| 400 |
+
"Received",
|
| 401 |
+
len(response_body),
|
| 402 |
+
)
|
| 403 |
+
else:
|
| 404 |
+
# 'content-length' header not allowed with chunked encoding.
|
| 405 |
+
num_bytes = end_byte - start_byte + 1
|
| 406 |
+
|
| 407 |
+
# First update ``bytes_downloaded``.
|
| 408 |
+
self._bytes_downloaded += num_bytes
|
| 409 |
+
# If the end byte is past ``end`` or ``total_bytes - 1`` we are done.
|
| 410 |
+
if self.end is not None and end_byte >= self.end:
|
| 411 |
+
self._finished = True
|
| 412 |
+
elif end_byte >= total_bytes - 1:
|
| 413 |
+
self._finished = True
|
| 414 |
+
# NOTE: We only use ``total_bytes`` if not already known.
|
| 415 |
+
if self.total_bytes is None:
|
| 416 |
+
self._total_bytes = total_bytes
|
| 417 |
+
# Write the response body to the stream.
|
| 418 |
+
self._stream.write(response_body)
|
| 419 |
+
|
| 420 |
+
def consume_next_chunk(self, transport, timeout=None):
|
| 421 |
+
"""Consume the next chunk of the resource to be downloaded.
|
| 422 |
+
|
| 423 |
+
Args:
|
| 424 |
+
transport (object): An object which can make authenticated
|
| 425 |
+
requests.
|
| 426 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 427 |
+
The number of seconds to wait for the server response.
|
| 428 |
+
Depending on the retry strategy, a request may be repeated
|
| 429 |
+
several times using the same timeout each time.
|
| 430 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 431 |
+
Raises:
|
| 432 |
+
NotImplementedError: Always, since virtual.
|
| 433 |
+
"""
|
| 434 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 435 |
+
|
| 436 |
+
|
| 437 |
+
def add_bytes_range(start, end, headers):
|
| 438 |
+
"""Add a bytes range to a header dictionary.
|
| 439 |
+
|
| 440 |
+
Some possible inputs and the corresponding bytes ranges::
|
| 441 |
+
|
| 442 |
+
>>> headers = {}
|
| 443 |
+
>>> add_bytes_range(None, None, headers)
|
| 444 |
+
>>> headers
|
| 445 |
+
{}
|
| 446 |
+
>>> add_bytes_range(500, 999, headers)
|
| 447 |
+
>>> headers['range']
|
| 448 |
+
'bytes=500-999'
|
| 449 |
+
>>> add_bytes_range(None, 499, headers)
|
| 450 |
+
>>> headers['range']
|
| 451 |
+
'bytes=0-499'
|
| 452 |
+
>>> add_bytes_range(-500, None, headers)
|
| 453 |
+
>>> headers['range']
|
| 454 |
+
'bytes=-500'
|
| 455 |
+
>>> add_bytes_range(9500, None, headers)
|
| 456 |
+
>>> headers['range']
|
| 457 |
+
'bytes=9500-'
|
| 458 |
+
|
| 459 |
+
Args:
|
| 460 |
+
start (Optional[int]): The first byte in a range. Can be zero,
|
| 461 |
+
positive, negative or :data:`None`.
|
| 462 |
+
end (Optional[int]): The last byte in a range. Assumed to be
|
| 463 |
+
positive.
|
| 464 |
+
headers (Mapping[str, str]): A headers mapping which can have the
|
| 465 |
+
bytes range added if at least one of ``start`` or ``end``
|
| 466 |
+
is not :data:`None`.
|
| 467 |
+
"""
|
| 468 |
+
if start is None:
|
| 469 |
+
if end is None:
|
| 470 |
+
# No range to add.
|
| 471 |
+
return
|
| 472 |
+
else:
|
| 473 |
+
# NOTE: This assumes ``end`` is non-negative.
|
| 474 |
+
bytes_range = "0-{:d}".format(end)
|
| 475 |
+
else:
|
| 476 |
+
if end is None:
|
| 477 |
+
if start < 0:
|
| 478 |
+
bytes_range = "{:d}".format(start)
|
| 479 |
+
else:
|
| 480 |
+
bytes_range = "{:d}-".format(start)
|
| 481 |
+
else:
|
| 482 |
+
# NOTE: This is invalid if ``start < 0``.
|
| 483 |
+
bytes_range = "{:d}-{:d}".format(start, end)
|
| 484 |
+
|
| 485 |
+
headers[_helpers.RANGE_HEADER] = "bytes=" + bytes_range
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def get_range_info(response, get_headers, callback=_helpers.do_nothing):
|
| 489 |
+
"""Get the start, end and total bytes from a content range header.
|
| 490 |
+
|
| 491 |
+
Args:
|
| 492 |
+
response (object): An HTTP response object.
|
| 493 |
+
get_headers (Callable[Any, Mapping[str, str]]): Helper to get headers
|
| 494 |
+
from an HTTP response.
|
| 495 |
+
callback (Optional[Callable]): A callback that takes no arguments,
|
| 496 |
+
to be executed when an exception is being raised.
|
| 497 |
+
|
| 498 |
+
Returns:
|
| 499 |
+
Tuple[int, int, int]: The start byte, end byte and total bytes.
|
| 500 |
+
|
| 501 |
+
Raises:
|
| 502 |
+
~google.resumable_media.common.InvalidResponse: If the
|
| 503 |
+
``Content-Range`` header is not of the form
|
| 504 |
+
``bytes {start}-{end}/{total}``.
|
| 505 |
+
"""
|
| 506 |
+
content_range = _helpers.header_required(
|
| 507 |
+
response, _helpers.CONTENT_RANGE_HEADER, get_headers, callback=callback
|
| 508 |
+
)
|
| 509 |
+
match = _CONTENT_RANGE_RE.match(content_range)
|
| 510 |
+
if match is None:
|
| 511 |
+
callback()
|
| 512 |
+
raise common.InvalidResponse(
|
| 513 |
+
response,
|
| 514 |
+
"Unexpected content-range header",
|
| 515 |
+
content_range,
|
| 516 |
+
'Expected to be of the form "bytes {start}-{end}/{total}"',
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
return (
|
| 520 |
+
int(match.group("start_byte")),
|
| 521 |
+
int(match.group("end_byte")),
|
| 522 |
+
int(match.group("total_bytes")),
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def _check_for_zero_content_range(response, get_status_code, get_headers):
|
| 527 |
+
"""Validate if response status code is 416 and content range is zero.
|
| 528 |
+
|
| 529 |
+
This is the special case for handling zero bytes files.
|
| 530 |
+
|
| 531 |
+
Args:
|
| 532 |
+
response (object): An HTTP response object.
|
| 533 |
+
get_status_code (Callable[Any, int]): Helper to get a status code
|
| 534 |
+
from a response.
|
| 535 |
+
get_headers (Callable[Any, Mapping[str, str]]): Helper to get headers
|
| 536 |
+
from an HTTP response.
|
| 537 |
+
|
| 538 |
+
Returns:
|
| 539 |
+
bool: True if content range total bytes is zero, false otherwise.
|
| 540 |
+
"""
|
| 541 |
+
if get_status_code(response) == http.client.REQUESTED_RANGE_NOT_SATISFIABLE:
|
| 542 |
+
content_range = _helpers.header_required(
|
| 543 |
+
response,
|
| 544 |
+
_helpers.CONTENT_RANGE_HEADER,
|
| 545 |
+
get_headers,
|
| 546 |
+
callback=_helpers.do_nothing,
|
| 547 |
+
)
|
| 548 |
+
if content_range == _ZERO_CONTENT_RANGE_HEADER:
|
| 549 |
+
return True
|
| 550 |
+
return False
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/_helpers.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Shared utilities used by both downloads and uploads."""
|
| 16 |
+
|
| 17 |
+
import logging
|
| 18 |
+
import random
|
| 19 |
+
import time
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
from google.resumable_media import common
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
RANGE_HEADER = "range"
|
| 26 |
+
CONTENT_RANGE_HEADER = "content-range"
|
| 27 |
+
|
| 28 |
+
_SLOW_CRC32C_WARNING = (
|
| 29 |
+
"Currently using crcmod in pure python form. This is a slow "
|
| 30 |
+
"implementation. Python 3 has a faster implementation, `google-crc32c`, "
|
| 31 |
+
"which will be used if it is installed."
|
| 32 |
+
)
|
| 33 |
+
_HASH_HEADER = "x-goog-hash"
|
| 34 |
+
_MISSING_CHECKSUM = """\
|
| 35 |
+
No {checksum_type} checksum was returned from the service while downloading {}
|
| 36 |
+
(which happens for composite objects), so client-side content integrity
|
| 37 |
+
checking is not being performed."""
|
| 38 |
+
_LOGGER = logging.getLogger(__name__)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def do_nothing():
|
| 42 |
+
"""Simple default callback."""
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def header_required(response, name, get_headers, callback=do_nothing):
|
| 46 |
+
"""Checks that a specific header is in a headers dictionary.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
response (object): An HTTP response object, expected to have a
|
| 50 |
+
``headers`` attribute that is a ``Mapping[str, str]``.
|
| 51 |
+
name (str): The name of a required header.
|
| 52 |
+
get_headers (Callable[Any, Mapping[str, str]]): Helper to get headers
|
| 53 |
+
from an HTTP response.
|
| 54 |
+
callback (Optional[Callable]): A callback that takes no arguments,
|
| 55 |
+
to be executed when an exception is being raised.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
str: The desired header.
|
| 59 |
+
|
| 60 |
+
Raises:
|
| 61 |
+
~google.resumable_media.common.InvalidResponse: If the header
|
| 62 |
+
is missing.
|
| 63 |
+
"""
|
| 64 |
+
headers = get_headers(response)
|
| 65 |
+
if name not in headers:
|
| 66 |
+
callback()
|
| 67 |
+
raise common.InvalidResponse(
|
| 68 |
+
response, "Response headers must contain header", name
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
return headers[name]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def require_status_code(response, status_codes, get_status_code, callback=do_nothing):
|
| 75 |
+
"""Require a response has a status code among a list.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
response (object): The HTTP response object.
|
| 79 |
+
status_codes (tuple): The acceptable status codes.
|
| 80 |
+
get_status_code (Callable[Any, int]): Helper to get a status code
|
| 81 |
+
from a response.
|
| 82 |
+
callback (Optional[Callable]): A callback that takes no arguments,
|
| 83 |
+
to be executed when an exception is being raised.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
int: The status code.
|
| 87 |
+
|
| 88 |
+
Raises:
|
| 89 |
+
~google.resumable_media.common.InvalidResponse: If the status code
|
| 90 |
+
is not one of the values in ``status_codes``.
|
| 91 |
+
"""
|
| 92 |
+
status_code = get_status_code(response)
|
| 93 |
+
if status_code not in status_codes:
|
| 94 |
+
callback()
|
| 95 |
+
raise common.InvalidResponse(
|
| 96 |
+
response,
|
| 97 |
+
"Request failed with status code",
|
| 98 |
+
status_code,
|
| 99 |
+
"Expected one of",
|
| 100 |
+
*status_codes
|
| 101 |
+
)
|
| 102 |
+
return status_code
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def calculate_retry_wait(base_wait, max_sleep):
|
| 106 |
+
"""Calculate the amount of time to wait before a retry attempt.
|
| 107 |
+
|
| 108 |
+
Wait time grows exponentially with the number of attempts, until
|
| 109 |
+
``max_sleep``.
|
| 110 |
+
|
| 111 |
+
A random amount of jitter (between 0 and 1 seconds) is added to spread out
|
| 112 |
+
retry attempts from different clients.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
base_wait (float): The "base" wait time (i.e. without any jitter)
|
| 116 |
+
that will be doubled until it reaches the maximum sleep.
|
| 117 |
+
max_sleep (float): Maximum value that a sleep time is allowed to be.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
Tuple[float, float]: The new base wait time as well as the wait time
|
| 121 |
+
to be applied (with a random amount of jitter between 0 and 1 seconds
|
| 122 |
+
added).
|
| 123 |
+
"""
|
| 124 |
+
new_base_wait = 2.0 * base_wait
|
| 125 |
+
if new_base_wait > max_sleep:
|
| 126 |
+
new_base_wait = max_sleep
|
| 127 |
+
|
| 128 |
+
jitter_ms = random.randint(0, 1000)
|
| 129 |
+
return new_base_wait, new_base_wait + 0.001 * jitter_ms
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
async def wait_and_retry(func, get_status_code, retry_strategy):
|
| 133 |
+
"""Attempts to retry a call to ``func`` until success.
|
| 134 |
+
|
| 135 |
+
Expects ``func`` to return an HTTP response and uses ``get_status_code``
|
| 136 |
+
to check if the response is retry-able.
|
| 137 |
+
|
| 138 |
+
Will retry until :meth:`~.RetryStrategy.retry_allowed` (on the current
|
| 139 |
+
``retry_strategy``) returns :data:`False`. Uses
|
| 140 |
+
:func:`calculate_retry_wait` to double the wait time (with jitter) after
|
| 141 |
+
each attempt.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
func (Callable): A callable that takes no arguments and produces
|
| 145 |
+
an HTTP response which will be checked as retry-able.
|
| 146 |
+
get_status_code (Callable[Any, int]): Helper to get a status code
|
| 147 |
+
from a response.
|
| 148 |
+
retry_strategy (~google.resumable_media.common.RetryStrategy): The
|
| 149 |
+
strategy to use if the request fails and must be retried.
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
object: The return value of ``func``.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
total_sleep = 0.0
|
| 156 |
+
num_retries = 0
|
| 157 |
+
base_wait = 0.5 # When doubled will give 1.0
|
| 158 |
+
|
| 159 |
+
while True: # return on success or when retries exhausted.
|
| 160 |
+
error = None
|
| 161 |
+
try:
|
| 162 |
+
response = await func()
|
| 163 |
+
except ConnectionError as e:
|
| 164 |
+
error = e
|
| 165 |
+
else:
|
| 166 |
+
if get_status_code(response) not in common.RETRYABLE:
|
| 167 |
+
return response
|
| 168 |
+
|
| 169 |
+
if not retry_strategy.retry_allowed(total_sleep, num_retries):
|
| 170 |
+
# Retries are exhausted and no acceptable response was received. Raise the
|
| 171 |
+
# retriable_error or return the unacceptable response.
|
| 172 |
+
if error:
|
| 173 |
+
raise error
|
| 174 |
+
|
| 175 |
+
return response
|
| 176 |
+
|
| 177 |
+
base_wait, wait_time = calculate_retry_wait(base_wait, retry_strategy.max_sleep)
|
| 178 |
+
|
| 179 |
+
num_retries += 1
|
| 180 |
+
total_sleep += wait_time
|
| 181 |
+
time.sleep(wait_time)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class _DoNothingHash(object):
|
| 185 |
+
"""Do-nothing hash object.
|
| 186 |
+
|
| 187 |
+
Intended as a stand-in for ``hashlib.md5`` or a crc32c checksum
|
| 188 |
+
implementation in cases where it isn't necessary to compute the hash.
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
def update(self, unused_chunk):
|
| 192 |
+
"""Do-nothing ``update`` method.
|
| 193 |
+
|
| 194 |
+
Intended to match the interface of ``hashlib.md5`` and other checksums.
|
| 195 |
+
Args:
|
| 196 |
+
unused_chunk (bytes): A chunk of data.
|
| 197 |
+
"""
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/_upload.py
ADDED
|
@@ -0,0 +1,976 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Virtual bases classes for uploading media via Google APIs.
|
| 16 |
+
|
| 17 |
+
Supported here are:
|
| 18 |
+
|
| 19 |
+
* simple (media) uploads
|
| 20 |
+
* multipart uploads that contain both metadata and a small file as payload
|
| 21 |
+
* resumable uploads (with metadata as well)
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import http.client
|
| 25 |
+
import json
|
| 26 |
+
import os
|
| 27 |
+
import random
|
| 28 |
+
import sys
|
| 29 |
+
|
| 30 |
+
from google import _async_resumable_media
|
| 31 |
+
from google._async_resumable_media import _helpers
|
| 32 |
+
from google.resumable_media import _helpers as sync_helpers
|
| 33 |
+
from google.resumable_media import _upload as sync_upload
|
| 34 |
+
from google.resumable_media import common
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
from google.resumable_media._upload import (
|
| 38 |
+
_CONTENT_TYPE_HEADER,
|
| 39 |
+
_CONTENT_RANGE_TEMPLATE,
|
| 40 |
+
_RANGE_UNKNOWN_TEMPLATE,
|
| 41 |
+
_EMPTY_RANGE_TEMPLATE,
|
| 42 |
+
_BOUNDARY_FORMAT,
|
| 43 |
+
_MULTIPART_SEP,
|
| 44 |
+
_CRLF,
|
| 45 |
+
_MULTIPART_BEGIN,
|
| 46 |
+
_RELATED_HEADER,
|
| 47 |
+
_BYTES_RANGE_RE,
|
| 48 |
+
_STREAM_ERROR_TEMPLATE,
|
| 49 |
+
_POST,
|
| 50 |
+
_PUT,
|
| 51 |
+
_UPLOAD_CHECKSUM_MISMATCH_MESSAGE,
|
| 52 |
+
_UPLOAD_METADATA_NO_APPROPRIATE_CHECKSUM_MESSAGE,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class UploadBase(object):
|
| 57 |
+
"""Base class for upload helpers.
|
| 58 |
+
|
| 59 |
+
Defines core shared behavior across different upload types.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 63 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 64 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 65 |
+
|
| 66 |
+
Attributes:
|
| 67 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def __init__(self, upload_url, headers=None):
|
| 71 |
+
self.upload_url = upload_url
|
| 72 |
+
if headers is None:
|
| 73 |
+
headers = {}
|
| 74 |
+
self._headers = headers
|
| 75 |
+
self._finished = False
|
| 76 |
+
self._retry_strategy = common.RetryStrategy()
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def finished(self):
|
| 80 |
+
"""bool: Flag indicating if the upload has completed."""
|
| 81 |
+
return self._finished
|
| 82 |
+
|
| 83 |
+
def _process_response(self, response):
|
| 84 |
+
"""Process the response from an HTTP request.
|
| 85 |
+
|
| 86 |
+
This is everything that must be done after a request that doesn't
|
| 87 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 88 |
+
philosophy.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
response (object): The HTTP response object.
|
| 92 |
+
|
| 93 |
+
Raises:
|
| 94 |
+
~google.resumable_media.common.InvalidResponse: If the status
|
| 95 |
+
code is not 200.
|
| 96 |
+
|
| 97 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 98 |
+
"""
|
| 99 |
+
# Tombstone the current upload so it cannot be used again (in either
|
| 100 |
+
# failure or success).
|
| 101 |
+
self._finished = True
|
| 102 |
+
_helpers.require_status_code(response, (http.client.OK,), self._get_status_code)
|
| 103 |
+
|
| 104 |
+
@staticmethod
|
| 105 |
+
def _get_status_code(response):
|
| 106 |
+
"""Access the status code from an HTTP response.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
response (object): The HTTP response object.
|
| 110 |
+
|
| 111 |
+
Raises:
|
| 112 |
+
NotImplementedError: Always, since virtual.
|
| 113 |
+
"""
|
| 114 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 115 |
+
|
| 116 |
+
@staticmethod
|
| 117 |
+
def _get_headers(response):
|
| 118 |
+
"""Access the headers from an HTTP response.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
response (object): The HTTP response object.
|
| 122 |
+
|
| 123 |
+
Raises:
|
| 124 |
+
NotImplementedError: Always, since virtual.
|
| 125 |
+
"""
|
| 126 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 127 |
+
|
| 128 |
+
@staticmethod
|
| 129 |
+
def _get_body(response):
|
| 130 |
+
"""Access the response body from an HTTP response.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
response (object): The HTTP response object.
|
| 134 |
+
|
| 135 |
+
Raises:
|
| 136 |
+
NotImplementedError: Always, since virtual.
|
| 137 |
+
"""
|
| 138 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class SimpleUpload(UploadBase):
|
| 142 |
+
"""Upload a resource to a Google API.
|
| 143 |
+
|
| 144 |
+
A **simple** media upload sends no metadata and completes the upload
|
| 145 |
+
in a single request.
|
| 146 |
+
|
| 147 |
+
Args:
|
| 148 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 149 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 150 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 151 |
+
|
| 152 |
+
Attributes:
|
| 153 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 154 |
+
"""
|
| 155 |
+
|
| 156 |
+
def _prepare_request(self, data, content_type):
|
| 157 |
+
"""Prepare the contents of an HTTP request.
|
| 158 |
+
|
| 159 |
+
This is everything that must be done before a request that doesn't
|
| 160 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 161 |
+
philosophy.
|
| 162 |
+
|
| 163 |
+
.. note:
|
| 164 |
+
|
| 165 |
+
This method will be used only once, so ``headers`` will be
|
| 166 |
+
mutated by having a new key added to it.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
data (bytes): The resource content to be uploaded.
|
| 170 |
+
content_type (str): The content type for the request.
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
|
| 174 |
+
|
| 175 |
+
* HTTP verb for the request (always POST)
|
| 176 |
+
* the URL for the request
|
| 177 |
+
* the body of the request
|
| 178 |
+
* headers for the request
|
| 179 |
+
|
| 180 |
+
Raises:
|
| 181 |
+
ValueError: If the current upload has already finished.
|
| 182 |
+
TypeError: If ``data`` isn't bytes.
|
| 183 |
+
|
| 184 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 185 |
+
"""
|
| 186 |
+
if self.finished:
|
| 187 |
+
raise ValueError("An upload can only be used once.")
|
| 188 |
+
|
| 189 |
+
if not isinstance(data, bytes):
|
| 190 |
+
raise TypeError("`data` must be bytes, received", type(data))
|
| 191 |
+
self._headers[_CONTENT_TYPE_HEADER] = content_type
|
| 192 |
+
return _POST, self.upload_url, data, self._headers
|
| 193 |
+
|
| 194 |
+
def transmit(self, transport, data, content_type, timeout=None):
|
| 195 |
+
"""Transmit the resource to be uploaded.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
transport (object): An object which can make authenticated
|
| 199 |
+
requests.
|
| 200 |
+
data (bytes): The resource content to be uploaded.
|
| 201 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 202 |
+
image has content type ``image/jpeg``.
|
| 203 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 204 |
+
The number of seconds to wait for the server response.
|
| 205 |
+
Depending on the retry strategy, a request may be repeated
|
| 206 |
+
several times using the same timeout each time.
|
| 207 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 208 |
+
|
| 209 |
+
Raises:
|
| 210 |
+
NotImplementedError: Always, since virtual.
|
| 211 |
+
"""
|
| 212 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
class MultipartUpload(UploadBase):
|
| 216 |
+
"""Upload a resource with metadata to a Google API.
|
| 217 |
+
|
| 218 |
+
A **multipart** upload sends both metadata and the resource in a single
|
| 219 |
+
(multipart) request.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 223 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 224 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 225 |
+
checksum Optional([str]): The type of checksum to compute to verify
|
| 226 |
+
the integrity of the object. The request metadata will be amended
|
| 227 |
+
to include the computed value. Using this option will override a
|
| 228 |
+
manually-set checksum value. Supported values are "md5", "crc32c"
|
| 229 |
+
and None. The default is None.
|
| 230 |
+
|
| 231 |
+
Attributes:
|
| 232 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 233 |
+
"""
|
| 234 |
+
|
| 235 |
+
def __init__(self, upload_url, headers=None, checksum=None):
|
| 236 |
+
super(MultipartUpload, self).__init__(upload_url, headers=headers)
|
| 237 |
+
self._checksum_type = checksum
|
| 238 |
+
|
| 239 |
+
def _prepare_request(self, data, metadata, content_type):
|
| 240 |
+
"""Prepare the contents of an HTTP request.
|
| 241 |
+
|
| 242 |
+
This is everything that must be done before a request that doesn't
|
| 243 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 244 |
+
philosophy.
|
| 245 |
+
|
| 246 |
+
.. note:
|
| 247 |
+
|
| 248 |
+
This method will be used only once, so ``headers`` will be
|
| 249 |
+
mutated by having a new key added to it.
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
data (bytes): The resource content to be uploaded.
|
| 253 |
+
metadata (Mapping[str, str]): The resource metadata, such as an
|
| 254 |
+
ACL list.
|
| 255 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 256 |
+
image has content type ``image/jpeg``.
|
| 257 |
+
|
| 258 |
+
Returns:
|
| 259 |
+
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
|
| 260 |
+
|
| 261 |
+
* HTTP verb for the request (always POST)
|
| 262 |
+
* the URL for the request
|
| 263 |
+
* the body of the request
|
| 264 |
+
* headers for the request
|
| 265 |
+
|
| 266 |
+
Raises:
|
| 267 |
+
ValueError: If the current upload has already finished.
|
| 268 |
+
TypeError: If ``data`` isn't bytes.
|
| 269 |
+
|
| 270 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 271 |
+
"""
|
| 272 |
+
if self.finished:
|
| 273 |
+
raise ValueError("An upload can only be used once.")
|
| 274 |
+
|
| 275 |
+
if not isinstance(data, bytes):
|
| 276 |
+
raise TypeError("`data` must be bytes, received", type(data))
|
| 277 |
+
|
| 278 |
+
checksum_object = sync_helpers._get_checksum_object(self._checksum_type)
|
| 279 |
+
|
| 280 |
+
if checksum_object is not None:
|
| 281 |
+
checksum_object.update(data)
|
| 282 |
+
actual_checksum = sync_helpers.prepare_checksum_digest(
|
| 283 |
+
checksum_object.digest()
|
| 284 |
+
)
|
| 285 |
+
metadata_key = sync_helpers._get_metadata_key(self._checksum_type)
|
| 286 |
+
metadata[metadata_key] = actual_checksum
|
| 287 |
+
|
| 288 |
+
content, multipart_boundary = construct_multipart_request(
|
| 289 |
+
data, metadata, content_type
|
| 290 |
+
)
|
| 291 |
+
multipart_content_type = _RELATED_HEADER + multipart_boundary + b'"'
|
| 292 |
+
|
| 293 |
+
self._headers[_CONTENT_TYPE_HEADER] = multipart_content_type
|
| 294 |
+
|
| 295 |
+
return _POST, self.upload_url, content, self._headers
|
| 296 |
+
|
| 297 |
+
def transmit(self, transport, data, metadata, content_type, timeout=None):
|
| 298 |
+
"""Transmit the resource to be uploaded.
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
transport (object): An object which can make authenticated
|
| 302 |
+
requests.
|
| 303 |
+
data (bytes): The resource content to be uploaded.
|
| 304 |
+
metadata (Mapping[str, str]): The resource metadata, such as an
|
| 305 |
+
ACL list.
|
| 306 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 307 |
+
image has content type ``image/jpeg``.
|
| 308 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 309 |
+
The number of seconds to wait for the server response.
|
| 310 |
+
Depending on the retry strategy, a request may be repeated
|
| 311 |
+
several times using the same timeout each time.
|
| 312 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 313 |
+
|
| 314 |
+
Raises:
|
| 315 |
+
NotImplementedError: Always, since virtual.
|
| 316 |
+
"""
|
| 317 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class ResumableUpload(UploadBase, sync_upload.ResumableUpload):
|
| 321 |
+
"""Initiate and fulfill a resumable upload to a Google API.
|
| 322 |
+
|
| 323 |
+
A **resumable** upload sends an initial request with the resource metadata
|
| 324 |
+
and then gets assigned an upload ID / upload URL to send bytes to.
|
| 325 |
+
Using the upload URL, the upload is then done in chunks (determined by
|
| 326 |
+
the user) until all bytes have been uploaded.
|
| 327 |
+
|
| 328 |
+
Args:
|
| 329 |
+
upload_url (str): The URL where the resumable upload will be initiated.
|
| 330 |
+
chunk_size (int): The size of each chunk used to upload the resource.
|
| 331 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 332 |
+
be sent with the :meth:`initiate` request, e.g. headers for
|
| 333 |
+
encrypted data. These **will not** be sent with
|
| 334 |
+
:meth:`transmit_next_chunk` or :meth:`recover` requests.
|
| 335 |
+
checksum Optional([str]): The type of checksum to compute to verify
|
| 336 |
+
the integrity of the object. After the upload is complete, the
|
| 337 |
+
server-computed checksum of the resulting object will be read
|
| 338 |
+
and google.resumable_media.common.DataCorruption will be raised on
|
| 339 |
+
a mismatch. The corrupted file will not be deleted from the remote
|
| 340 |
+
host automatically. Supported values are "md5", "crc32c" and None.
|
| 341 |
+
The default is None.
|
| 342 |
+
|
| 343 |
+
Attributes:
|
| 344 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 345 |
+
|
| 346 |
+
Raises:
|
| 347 |
+
ValueError: If ``chunk_size`` is not a multiple of
|
| 348 |
+
:data:`.UPLOAD_CHUNK_SIZE`.
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
def __init__(self, upload_url, chunk_size, checksum=None, headers=None):
|
| 352 |
+
super(ResumableUpload, self).__init__(upload_url, headers=headers)
|
| 353 |
+
if chunk_size % _async_resumable_media.UPLOAD_CHUNK_SIZE != 0:
|
| 354 |
+
raise ValueError(
|
| 355 |
+
"{} KB must divide chunk size".format(
|
| 356 |
+
_async_resumable_media.UPLOAD_CHUNK_SIZE / 1024
|
| 357 |
+
)
|
| 358 |
+
)
|
| 359 |
+
self._chunk_size = chunk_size
|
| 360 |
+
self._stream = None
|
| 361 |
+
self._content_type = None
|
| 362 |
+
self._bytes_uploaded = 0
|
| 363 |
+
self._bytes_checksummed = 0
|
| 364 |
+
self._checksum_type = checksum
|
| 365 |
+
self._checksum_object = None
|
| 366 |
+
self._total_bytes = None
|
| 367 |
+
self._resumable_url = None
|
| 368 |
+
self._invalid = False
|
| 369 |
+
|
| 370 |
+
@property
|
| 371 |
+
def invalid(self):
|
| 372 |
+
"""bool: Indicates if the upload is in an invalid state.
|
| 373 |
+
|
| 374 |
+
This will occur if a call to :meth:`transmit_next_chunk` fails.
|
| 375 |
+
To recover from such a failure, call :meth:`recover`.
|
| 376 |
+
"""
|
| 377 |
+
return self._invalid
|
| 378 |
+
|
| 379 |
+
@property
|
| 380 |
+
def chunk_size(self):
|
| 381 |
+
"""int: The size of each chunk used to upload the resource."""
|
| 382 |
+
return self._chunk_size
|
| 383 |
+
|
| 384 |
+
@property
|
| 385 |
+
def resumable_url(self):
|
| 386 |
+
"""Optional[str]: The URL of the in-progress resumable upload."""
|
| 387 |
+
return self._resumable_url
|
| 388 |
+
|
| 389 |
+
@property
|
| 390 |
+
def bytes_uploaded(self):
|
| 391 |
+
"""int: Number of bytes that have been uploaded."""
|
| 392 |
+
return self._bytes_uploaded
|
| 393 |
+
|
| 394 |
+
@property
|
| 395 |
+
def total_bytes(self):
|
| 396 |
+
"""Optional[int]: The total number of bytes to be uploaded.
|
| 397 |
+
|
| 398 |
+
If this upload is initiated (via :meth:`initiate`) with
|
| 399 |
+
``stream_final=True``, this value will be populated based on the size
|
| 400 |
+
of the ``stream`` being uploaded. (By default ``stream_final=True``.)
|
| 401 |
+
|
| 402 |
+
If this upload is initiated with ``stream_final=False``,
|
| 403 |
+
:attr:`total_bytes` will be :data:`None` since it cannot be
|
| 404 |
+
determined from the stream.
|
| 405 |
+
"""
|
| 406 |
+
return self._total_bytes
|
| 407 |
+
|
| 408 |
+
def _prepare_initiate_request(
|
| 409 |
+
self, stream, metadata, content_type, total_bytes=None, stream_final=True
|
| 410 |
+
):
|
| 411 |
+
"""Prepare the contents of HTTP request to initiate upload.
|
| 412 |
+
|
| 413 |
+
This is everything that must be done before a request that doesn't
|
| 414 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 415 |
+
philosophy.
|
| 416 |
+
|
| 417 |
+
Args:
|
| 418 |
+
stream (IO[bytes]): The stream (i.e. file-like object) that will
|
| 419 |
+
be uploaded. The stream **must** be at the beginning (i.e.
|
| 420 |
+
``stream.tell() == 0``).
|
| 421 |
+
metadata (Mapping[str, str]): The resource metadata, such as an
|
| 422 |
+
ACL list.
|
| 423 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 424 |
+
image has content type ``image/jpeg``.
|
| 425 |
+
total_bytes (Optional[int]): The total number of bytes to be
|
| 426 |
+
uploaded. If specified, the upload size **will not** be
|
| 427 |
+
determined from the stream (even if ``stream_final=True``).
|
| 428 |
+
stream_final (Optional[bool]): Indicates if the ``stream`` is
|
| 429 |
+
"final" (i.e. no more bytes will be added to it). In this case
|
| 430 |
+
we determine the upload size from the size of the stream. If
|
| 431 |
+
``total_bytes`` is passed, this argument will be ignored.
|
| 432 |
+
|
| 433 |
+
Returns:
|
| 434 |
+
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
|
| 435 |
+
|
| 436 |
+
* HTTP verb for the request (always POST)
|
| 437 |
+
* the URL for the request
|
| 438 |
+
* the body of the request
|
| 439 |
+
* headers for the request
|
| 440 |
+
|
| 441 |
+
Raises:
|
| 442 |
+
ValueError: If the current upload has already been initiated.
|
| 443 |
+
ValueError: If ``stream`` is not at the beginning.
|
| 444 |
+
|
| 445 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 446 |
+
"""
|
| 447 |
+
if self.resumable_url is not None:
|
| 448 |
+
raise ValueError("This upload has already been initiated.")
|
| 449 |
+
if stream.tell() != 0:
|
| 450 |
+
raise ValueError("Stream must be at beginning.")
|
| 451 |
+
|
| 452 |
+
self._stream = stream
|
| 453 |
+
self._content_type = content_type
|
| 454 |
+
headers = {
|
| 455 |
+
_CONTENT_TYPE_HEADER: "application/json; charset=UTF-8",
|
| 456 |
+
"x-upload-content-type": content_type,
|
| 457 |
+
}
|
| 458 |
+
# Set the total bytes if possible.
|
| 459 |
+
if total_bytes is not None:
|
| 460 |
+
self._total_bytes = total_bytes
|
| 461 |
+
elif stream_final:
|
| 462 |
+
self._total_bytes = get_total_bytes(stream)
|
| 463 |
+
# Add the total bytes to the headers if set.
|
| 464 |
+
if self._total_bytes is not None:
|
| 465 |
+
content_length = "{:d}".format(self._total_bytes)
|
| 466 |
+
headers["x-upload-content-length"] = content_length
|
| 467 |
+
|
| 468 |
+
headers.update(self._headers)
|
| 469 |
+
payload = json.dumps(metadata).encode("utf-8")
|
| 470 |
+
return _POST, self.upload_url, payload, headers
|
| 471 |
+
|
| 472 |
+
def _process_initiate_response(self, response):
|
| 473 |
+
"""Process the response from an HTTP request that initiated upload.
|
| 474 |
+
|
| 475 |
+
This is everything that must be done after a request that doesn't
|
| 476 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 477 |
+
philosophy.
|
| 478 |
+
|
| 479 |
+
This method takes the URL from the ``Location`` header and stores it
|
| 480 |
+
for future use. Within that URL, we assume the ``upload_id`` query
|
| 481 |
+
parameter has been included, but we do not check.
|
| 482 |
+
|
| 483 |
+
Args:
|
| 484 |
+
response (object): The HTTP response object (need headers).
|
| 485 |
+
|
| 486 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 487 |
+
"""
|
| 488 |
+
_helpers.require_status_code(
|
| 489 |
+
response,
|
| 490 |
+
(http.client.OK,),
|
| 491 |
+
self._get_status_code,
|
| 492 |
+
callback=self._make_invalid,
|
| 493 |
+
)
|
| 494 |
+
self._resumable_url = _helpers.header_required(
|
| 495 |
+
response, "location", self._get_headers
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
def initiate(
|
| 499 |
+
self,
|
| 500 |
+
transport,
|
| 501 |
+
stream,
|
| 502 |
+
metadata,
|
| 503 |
+
content_type,
|
| 504 |
+
total_bytes=None,
|
| 505 |
+
stream_final=True,
|
| 506 |
+
timeout=None,
|
| 507 |
+
):
|
| 508 |
+
"""Initiate a resumable upload.
|
| 509 |
+
|
| 510 |
+
By default, this method assumes your ``stream`` is in a "final"
|
| 511 |
+
state ready to transmit. However, ``stream_final=False`` can be used
|
| 512 |
+
to indicate that the size of the resource is not known. This can happen
|
| 513 |
+
if bytes are being dynamically fed into ``stream``, e.g. if the stream
|
| 514 |
+
is attached to application logs.
|
| 515 |
+
|
| 516 |
+
If ``stream_final=False`` is used, :attr:`chunk_size` bytes will be
|
| 517 |
+
read from the stream every time :meth:`transmit_next_chunk` is called.
|
| 518 |
+
If one of those reads produces strictly fewer bites than the chunk
|
| 519 |
+
size, the upload will be concluded.
|
| 520 |
+
|
| 521 |
+
Args:
|
| 522 |
+
transport (object): An object which can make authenticated
|
| 523 |
+
requests.
|
| 524 |
+
stream (IO[bytes]): The stream (i.e. file-like object) that will
|
| 525 |
+
be uploaded. The stream **must** be at the beginning (i.e.
|
| 526 |
+
``stream.tell() == 0``).
|
| 527 |
+
metadata (Mapping[str, str]): The resource metadata, such as an
|
| 528 |
+
ACL list.
|
| 529 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 530 |
+
image has content type ``image/jpeg``.
|
| 531 |
+
total_bytes (Optional[int]): The total number of bytes to be
|
| 532 |
+
uploaded. If specified, the upload size **will not** be
|
| 533 |
+
determined from the stream (even if ``stream_final=True``).
|
| 534 |
+
stream_final (Optional[bool]): Indicates if the ``stream`` is
|
| 535 |
+
"final" (i.e. no more bytes will be added to it). In this case
|
| 536 |
+
we determine the upload size from the size of the stream. If
|
| 537 |
+
``total_bytes`` is passed, this argument will be ignored.
|
| 538 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 539 |
+
The number of seconds to wait for the server response.
|
| 540 |
+
Depending on the retry strategy, a request may be repeated
|
| 541 |
+
several times using the same timeout each time.
|
| 542 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 543 |
+
|
| 544 |
+
Raises:
|
| 545 |
+
NotImplementedError: Always, since virtual.
|
| 546 |
+
"""
|
| 547 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 548 |
+
|
| 549 |
+
def _prepare_request(self):
|
| 550 |
+
"""Prepare the contents of HTTP request to upload a chunk.
|
| 551 |
+
|
| 552 |
+
This is everything that must be done before a request that doesn't
|
| 553 |
+
require network I/O. This is based on the `sans-I/O`_ philosophy.
|
| 554 |
+
|
| 555 |
+
For the time being, this **does require** some form of I/O to read
|
| 556 |
+
a chunk from ``stream`` (via :func:`get_next_chunk`). However, this
|
| 557 |
+
will (almost) certainly not be network I/O.
|
| 558 |
+
|
| 559 |
+
Returns:
|
| 560 |
+
Tuple[str, str, bytes, Mapping[str, str]]: The quadruple
|
| 561 |
+
|
| 562 |
+
* HTTP verb for the request (always PUT)
|
| 563 |
+
* the URL for the request
|
| 564 |
+
* the body of the request
|
| 565 |
+
* headers for the request
|
| 566 |
+
|
| 567 |
+
The headers **do not** incorporate the ``_headers`` on the
|
| 568 |
+
current instance.
|
| 569 |
+
|
| 570 |
+
Raises:
|
| 571 |
+
ValueError: If the current upload has finished.
|
| 572 |
+
ValueError: If the current upload is in an invalid state.
|
| 573 |
+
ValueError: If the current upload has not been initiated.
|
| 574 |
+
ValueError: If the location in the stream (i.e. ``stream.tell()``)
|
| 575 |
+
does not agree with ``bytes_uploaded``.
|
| 576 |
+
|
| 577 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 578 |
+
"""
|
| 579 |
+
if self.finished:
|
| 580 |
+
raise ValueError("Upload has finished.")
|
| 581 |
+
if self.invalid:
|
| 582 |
+
raise ValueError(
|
| 583 |
+
"Upload is in an invalid state. To recover call `recover()`."
|
| 584 |
+
)
|
| 585 |
+
if self.resumable_url is None:
|
| 586 |
+
raise ValueError(
|
| 587 |
+
"This upload has not been initiated. Please call "
|
| 588 |
+
"initiate() before beginning to transmit chunks."
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
start_byte, payload, content_range = get_next_chunk(
|
| 592 |
+
self._stream, self._chunk_size, self._total_bytes
|
| 593 |
+
)
|
| 594 |
+
if start_byte != self.bytes_uploaded:
|
| 595 |
+
msg = _STREAM_ERROR_TEMPLATE.format(start_byte, self.bytes_uploaded)
|
| 596 |
+
raise ValueError(msg)
|
| 597 |
+
|
| 598 |
+
self._update_checksum(start_byte, payload)
|
| 599 |
+
|
| 600 |
+
headers = {
|
| 601 |
+
_CONTENT_TYPE_HEADER: self._content_type,
|
| 602 |
+
_helpers.CONTENT_RANGE_HEADER: content_range,
|
| 603 |
+
}
|
| 604 |
+
return _PUT, self.resumable_url, payload, headers
|
| 605 |
+
|
| 606 |
+
def _make_invalid(self):
|
| 607 |
+
"""Simple setter for ``invalid``.
|
| 608 |
+
|
| 609 |
+
This is intended to be passed along as a callback to helpers that
|
| 610 |
+
raise an exception so they can mark this instance as invalid before
|
| 611 |
+
raising.
|
| 612 |
+
"""
|
| 613 |
+
self._invalid = True
|
| 614 |
+
|
| 615 |
+
async def _process_resumable_response(self, response, bytes_sent):
|
| 616 |
+
"""Process the response from an HTTP request.
|
| 617 |
+
|
| 618 |
+
This is everything that must be done after a request that doesn't
|
| 619 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 620 |
+
philosophy.
|
| 621 |
+
|
| 622 |
+
Args:
|
| 623 |
+
response (object): The HTTP response object.
|
| 624 |
+
bytes_sent (int): The number of bytes sent in the request that
|
| 625 |
+
``response`` was returned for.
|
| 626 |
+
|
| 627 |
+
Raises:
|
| 628 |
+
~google.resumable_media.common.InvalidResponse: If the status
|
| 629 |
+
code is 308 and the ``range`` header is not of the form
|
| 630 |
+
``bytes 0-{end}``.
|
| 631 |
+
~google.resumable_media.common.InvalidResponse: If the status
|
| 632 |
+
code is not 200 or 308.
|
| 633 |
+
|
| 634 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 635 |
+
"""
|
| 636 |
+
status_code = _helpers.require_status_code(
|
| 637 |
+
response,
|
| 638 |
+
(http.client.OK, http.client.PERMANENT_REDIRECT),
|
| 639 |
+
self._get_status_code,
|
| 640 |
+
callback=self._make_invalid,
|
| 641 |
+
)
|
| 642 |
+
if status_code == http.client.OK:
|
| 643 |
+
# NOTE: We use the "local" information of ``bytes_sent`` to update
|
| 644 |
+
# ``bytes_uploaded``, but do not verify this against other
|
| 645 |
+
# state. However, there may be some other information:
|
| 646 |
+
#
|
| 647 |
+
# * a ``size`` key in JSON response body
|
| 648 |
+
# * the ``total_bytes`` attribute (if set)
|
| 649 |
+
# * ``stream.tell()`` (relying on fact that ``initiate()``
|
| 650 |
+
# requires stream to be at the beginning)
|
| 651 |
+
self._bytes_uploaded = self._bytes_uploaded + bytes_sent
|
| 652 |
+
# Tombstone the current upload so it cannot be used again.
|
| 653 |
+
self._finished = True
|
| 654 |
+
# Validate the checksum. This can raise an exception on failure.
|
| 655 |
+
await self._validate_checksum(response)
|
| 656 |
+
else:
|
| 657 |
+
bytes_range = _helpers.header_required(
|
| 658 |
+
response,
|
| 659 |
+
_helpers.RANGE_HEADER,
|
| 660 |
+
self._get_headers,
|
| 661 |
+
callback=self._make_invalid,
|
| 662 |
+
)
|
| 663 |
+
match = _BYTES_RANGE_RE.match(bytes_range)
|
| 664 |
+
if match is None:
|
| 665 |
+
self._make_invalid()
|
| 666 |
+
raise common.InvalidResponse(
|
| 667 |
+
response,
|
| 668 |
+
'Unexpected "range" header',
|
| 669 |
+
bytes_range,
|
| 670 |
+
'Expected to be of the form "bytes=0-{end}"',
|
| 671 |
+
)
|
| 672 |
+
self._bytes_uploaded = int(match.group("end_byte")) + 1
|
| 673 |
+
|
| 674 |
+
async def _validate_checksum(self, response):
|
| 675 |
+
"""Check the computed checksum, if any, against the response headers.
|
| 676 |
+
Args:
|
| 677 |
+
response (object): The HTTP response object.
|
| 678 |
+
Raises:
|
| 679 |
+
~google.resumable_media.common.DataCorruption: If the checksum
|
| 680 |
+
computed locally and the checksum reported by the remote host do
|
| 681 |
+
not match.
|
| 682 |
+
"""
|
| 683 |
+
if self._checksum_type is None:
|
| 684 |
+
return
|
| 685 |
+
metadata_key = sync_helpers._get_metadata_key(self._checksum_type)
|
| 686 |
+
metadata = await response.json()
|
| 687 |
+
remote_checksum = metadata.get(metadata_key)
|
| 688 |
+
if remote_checksum is None:
|
| 689 |
+
raise common.InvalidResponse(
|
| 690 |
+
response,
|
| 691 |
+
_UPLOAD_METADATA_NO_APPROPRIATE_CHECKSUM_MESSAGE.format(metadata_key),
|
| 692 |
+
self._get_headers(response),
|
| 693 |
+
)
|
| 694 |
+
local_checksum = sync_helpers.prepare_checksum_digest(
|
| 695 |
+
self._checksum_object.digest()
|
| 696 |
+
)
|
| 697 |
+
if local_checksum != remote_checksum:
|
| 698 |
+
raise common.DataCorruption(
|
| 699 |
+
response,
|
| 700 |
+
_UPLOAD_CHECKSUM_MISMATCH_MESSAGE.format(
|
| 701 |
+
self._checksum_type.upper(), local_checksum, remote_checksum
|
| 702 |
+
),
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
def transmit_next_chunk(self, transport, timeout=None):
|
| 706 |
+
"""Transmit the next chunk of the resource to be uploaded.
|
| 707 |
+
|
| 708 |
+
If the current upload was initiated with ``stream_final=False``,
|
| 709 |
+
this method will dynamically determine if the upload has completed.
|
| 710 |
+
The upload will be considered complete if the stream produces
|
| 711 |
+
fewer than :attr:`chunk_size` bytes when a chunk is read from it.
|
| 712 |
+
|
| 713 |
+
Args:
|
| 714 |
+
transport (object): An object which can make authenticated
|
| 715 |
+
requests.
|
| 716 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 717 |
+
The number of seconds to wait for the server response.
|
| 718 |
+
Depending on the retry strategy, a request may be repeated
|
| 719 |
+
several times using the same timeout each time.
|
| 720 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 721 |
+
|
| 722 |
+
Raises:
|
| 723 |
+
NotImplementedError: Always, since virtual.
|
| 724 |
+
"""
|
| 725 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 726 |
+
|
| 727 |
+
def _prepare_recover_request(self):
|
| 728 |
+
"""Prepare the contents of HTTP request to recover from failure.
|
| 729 |
+
|
| 730 |
+
This is everything that must be done before a request that doesn't
|
| 731 |
+
require network I/O. This is based on the `sans-I/O`_ philosophy.
|
| 732 |
+
|
| 733 |
+
We assume that the :attr:`resumable_url` is set (i.e. the only way
|
| 734 |
+
the upload can end up :attr:`invalid` is if it has been initiated.
|
| 735 |
+
|
| 736 |
+
Returns:
|
| 737 |
+
Tuple[str, str, NoneType, Mapping[str, str]]: The quadruple
|
| 738 |
+
|
| 739 |
+
* HTTP verb for the request (always PUT)
|
| 740 |
+
* the URL for the request
|
| 741 |
+
* the body of the request (always :data:`None`)
|
| 742 |
+
* headers for the request
|
| 743 |
+
|
| 744 |
+
The headers **do not** incorporate the ``_headers`` on the
|
| 745 |
+
current instance.
|
| 746 |
+
|
| 747 |
+
Raises:
|
| 748 |
+
ValueError: If the current upload is not in an invalid state.
|
| 749 |
+
|
| 750 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 751 |
+
"""
|
| 752 |
+
if not self.invalid:
|
| 753 |
+
raise ValueError("Upload is not in invalid state, no need to recover.")
|
| 754 |
+
|
| 755 |
+
headers = {_helpers.CONTENT_RANGE_HEADER: "bytes */*"}
|
| 756 |
+
return _PUT, self.resumable_url, None, headers
|
| 757 |
+
|
| 758 |
+
def _process_recover_response(self, response):
|
| 759 |
+
"""Process the response from an HTTP request to recover from failure.
|
| 760 |
+
|
| 761 |
+
This is everything that must be done after a request that doesn't
|
| 762 |
+
require network I/O (or other I/O). This is based on the `sans-I/O`_
|
| 763 |
+
philosophy.
|
| 764 |
+
|
| 765 |
+
Args:
|
| 766 |
+
response (object): The HTTP response object.
|
| 767 |
+
|
| 768 |
+
Raises:
|
| 769 |
+
~google.resumable_media.common.InvalidResponse: If the status
|
| 770 |
+
code is not 308.
|
| 771 |
+
~google.resumable_media.common.InvalidResponse: If the status
|
| 772 |
+
code is 308 and the ``range`` header is not of the form
|
| 773 |
+
``bytes 0-{end}``.
|
| 774 |
+
|
| 775 |
+
.. _sans-I/O: https://sans-io.readthedocs.io/
|
| 776 |
+
"""
|
| 777 |
+
_helpers.require_status_code(
|
| 778 |
+
response,
|
| 779 |
+
(http.client.PERMANENT_REDIRECT,),
|
| 780 |
+
self._get_status_code,
|
| 781 |
+
)
|
| 782 |
+
headers = self._get_headers(response)
|
| 783 |
+
if _helpers.RANGE_HEADER in headers:
|
| 784 |
+
bytes_range = headers[_helpers.RANGE_HEADER]
|
| 785 |
+
match = _BYTES_RANGE_RE.match(bytes_range)
|
| 786 |
+
if match is None:
|
| 787 |
+
raise common.InvalidResponse(
|
| 788 |
+
response,
|
| 789 |
+
'Unexpected "range" header',
|
| 790 |
+
bytes_range,
|
| 791 |
+
'Expected to be of the form "bytes=0-{end}"',
|
| 792 |
+
)
|
| 793 |
+
self._bytes_uploaded = int(match.group("end_byte")) + 1
|
| 794 |
+
else:
|
| 795 |
+
# In this case, the upload has not "begun".
|
| 796 |
+
self._bytes_uploaded = 0
|
| 797 |
+
|
| 798 |
+
self._stream.seek(self._bytes_uploaded)
|
| 799 |
+
self._invalid = False
|
| 800 |
+
|
| 801 |
+
def recover(self, transport):
|
| 802 |
+
"""Recover from a failure.
|
| 803 |
+
|
| 804 |
+
This method should be used when a :class:`ResumableUpload` is in an
|
| 805 |
+
:attr:`~ResumableUpload.invalid` state due to a request failure.
|
| 806 |
+
|
| 807 |
+
This will verify the progress with the server and make sure the
|
| 808 |
+
current upload is in a valid state before :meth:`transmit_next_chunk`
|
| 809 |
+
can be used again.
|
| 810 |
+
|
| 811 |
+
Args:
|
| 812 |
+
transport (object): An object which can make authenticated
|
| 813 |
+
requests.
|
| 814 |
+
|
| 815 |
+
Raises:
|
| 816 |
+
NotImplementedError: Always, since virtual.
|
| 817 |
+
"""
|
| 818 |
+
raise NotImplementedError("This implementation is virtual.")
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
def get_boundary():
|
| 822 |
+
"""Get a random boundary for a multipart request.
|
| 823 |
+
|
| 824 |
+
Returns:
|
| 825 |
+
bytes: The boundary used to separate parts of a multipart request.
|
| 826 |
+
"""
|
| 827 |
+
random_int = random.randrange(sys.maxsize)
|
| 828 |
+
boundary = _BOUNDARY_FORMAT.format(random_int)
|
| 829 |
+
# NOTE: Neither % formatting nor .format() are available for byte strings
|
| 830 |
+
# in Python 3.4, so we must use unicode strings as templates.
|
| 831 |
+
return boundary.encode("utf-8")
|
| 832 |
+
|
| 833 |
+
|
| 834 |
+
def construct_multipart_request(data, metadata, content_type):
|
| 835 |
+
"""Construct a multipart request body.
|
| 836 |
+
|
| 837 |
+
Args:
|
| 838 |
+
data (bytes): The resource content (UTF-8 encoded as bytes)
|
| 839 |
+
to be uploaded.
|
| 840 |
+
metadata (Mapping[str, str]): The resource metadata, such as an
|
| 841 |
+
ACL list.
|
| 842 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 843 |
+
image has content type ``image/jpeg``.
|
| 844 |
+
|
| 845 |
+
Returns:
|
| 846 |
+
Tuple[bytes, bytes]: The multipart request body and the boundary used
|
| 847 |
+
between each part.
|
| 848 |
+
"""
|
| 849 |
+
multipart_boundary = get_boundary()
|
| 850 |
+
json_bytes = json.dumps(metadata).encode("utf-8")
|
| 851 |
+
content_type = content_type.encode("utf-8")
|
| 852 |
+
# Combine the two parts into a multipart payload.
|
| 853 |
+
# NOTE: We'd prefer a bytes template but are restricted by Python 3.4.
|
| 854 |
+
boundary_sep = _MULTIPART_SEP + multipart_boundary
|
| 855 |
+
content = (
|
| 856 |
+
boundary_sep
|
| 857 |
+
+ _MULTIPART_BEGIN
|
| 858 |
+
+ json_bytes
|
| 859 |
+
+ _CRLF
|
| 860 |
+
+ boundary_sep
|
| 861 |
+
+ _CRLF
|
| 862 |
+
+ b"content-type: "
|
| 863 |
+
+ content_type
|
| 864 |
+
+ _CRLF
|
| 865 |
+
+ _CRLF
|
| 866 |
+
+ data # Empty line between headers and body.
|
| 867 |
+
+ _CRLF
|
| 868 |
+
+ boundary_sep
|
| 869 |
+
+ _MULTIPART_SEP
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
return content, multipart_boundary
|
| 873 |
+
|
| 874 |
+
|
| 875 |
+
def get_total_bytes(stream):
|
| 876 |
+
"""Determine the total number of bytes in a stream.
|
| 877 |
+
|
| 878 |
+
Args:
|
| 879 |
+
stream (IO[bytes]): The stream (i.e. file-like object).
|
| 880 |
+
|
| 881 |
+
Returns:
|
| 882 |
+
int: The number of bytes.
|
| 883 |
+
"""
|
| 884 |
+
current_position = stream.tell()
|
| 885 |
+
# NOTE: ``.seek()`` **should** return the same value that ``.tell()``
|
| 886 |
+
# returns, but in Python 2, ``file`` objects do not.
|
| 887 |
+
stream.seek(0, os.SEEK_END)
|
| 888 |
+
end_position = stream.tell()
|
| 889 |
+
# Go back to the initial position.
|
| 890 |
+
stream.seek(current_position)
|
| 891 |
+
|
| 892 |
+
return end_position
|
| 893 |
+
|
| 894 |
+
|
| 895 |
+
def get_next_chunk(stream, chunk_size, total_bytes):
|
| 896 |
+
"""Get a chunk from an I/O stream.
|
| 897 |
+
|
| 898 |
+
The ``stream`` may have fewer bytes remaining than ``chunk_size``
|
| 899 |
+
so it may not always be the case that
|
| 900 |
+
``end_byte == start_byte + chunk_size - 1``.
|
| 901 |
+
|
| 902 |
+
Args:
|
| 903 |
+
stream (IO[bytes]): The stream (i.e. file-like object).
|
| 904 |
+
chunk_size (int): The size of the chunk to be read from the ``stream``.
|
| 905 |
+
total_bytes (Optional[int]): The (expected) total number of bytes
|
| 906 |
+
in the ``stream``.
|
| 907 |
+
|
| 908 |
+
Returns:
|
| 909 |
+
Tuple[int, bytes, str]: Triple of:
|
| 910 |
+
|
| 911 |
+
* the start byte index
|
| 912 |
+
* the content in between the start and end bytes (inclusive)
|
| 913 |
+
* content range header for the chunk (slice) that has been read
|
| 914 |
+
|
| 915 |
+
Raises:
|
| 916 |
+
ValueError: If ``total_bytes == 0`` but ``stream.read()`` yields
|
| 917 |
+
non-empty content.
|
| 918 |
+
ValueError: If there is no data left to consume. This corresponds
|
| 919 |
+
exactly to the case ``end_byte < start_byte``, which can only
|
| 920 |
+
occur if ``end_byte == start_byte - 1``.
|
| 921 |
+
"""
|
| 922 |
+
start_byte = stream.tell()
|
| 923 |
+
if total_bytes is not None and start_byte + chunk_size >= total_bytes > 0:
|
| 924 |
+
payload = stream.read(total_bytes - start_byte)
|
| 925 |
+
else:
|
| 926 |
+
payload = stream.read(chunk_size)
|
| 927 |
+
end_byte = stream.tell() - 1
|
| 928 |
+
|
| 929 |
+
num_bytes_read = len(payload)
|
| 930 |
+
if total_bytes is None:
|
| 931 |
+
if num_bytes_read < chunk_size:
|
| 932 |
+
# We now **KNOW** the total number of bytes.
|
| 933 |
+
total_bytes = end_byte + 1
|
| 934 |
+
elif total_bytes == 0:
|
| 935 |
+
# NOTE: We also expect ``start_byte == 0`` here but don't check
|
| 936 |
+
# because ``_prepare_initiate_request()`` requires the
|
| 937 |
+
# stream to be at the beginning.
|
| 938 |
+
if num_bytes_read != 0:
|
| 939 |
+
raise ValueError(
|
| 940 |
+
"Stream specified as empty, but produced non-empty content."
|
| 941 |
+
)
|
| 942 |
+
else:
|
| 943 |
+
if num_bytes_read == 0:
|
| 944 |
+
raise ValueError(
|
| 945 |
+
"Stream is already exhausted. There is no content remaining."
|
| 946 |
+
)
|
| 947 |
+
|
| 948 |
+
content_range = get_content_range(start_byte, end_byte, total_bytes)
|
| 949 |
+
return start_byte, payload, content_range
|
| 950 |
+
|
| 951 |
+
|
| 952 |
+
def get_content_range(start_byte, end_byte, total_bytes):
|
| 953 |
+
"""Convert start, end and total into content range header.
|
| 954 |
+
|
| 955 |
+
If ``total_bytes`` is not known, uses "bytes {start}-{end}/*".
|
| 956 |
+
If we are dealing with an empty range (i.e. ``end_byte < start_byte``)
|
| 957 |
+
then "bytes */{total}" is used.
|
| 958 |
+
|
| 959 |
+
This function **ASSUMES** that if the size is not known, the caller will
|
| 960 |
+
not also pass an empty range.
|
| 961 |
+
|
| 962 |
+
Args:
|
| 963 |
+
start_byte (int): The start (inclusive) of the byte range.
|
| 964 |
+
end_byte (int): The end (inclusive) of the byte range.
|
| 965 |
+
total_bytes (Optional[int]): The number of bytes in the byte
|
| 966 |
+
range (if known).
|
| 967 |
+
|
| 968 |
+
Returns:
|
| 969 |
+
str: The content range header.
|
| 970 |
+
"""
|
| 971 |
+
if total_bytes is None:
|
| 972 |
+
return _RANGE_UNKNOWN_TEMPLATE.format(start_byte, end_byte)
|
| 973 |
+
elif end_byte < start_byte:
|
| 974 |
+
return _EMPTY_RANGE_TEMPLATE.format(total_bytes)
|
| 975 |
+
else:
|
| 976 |
+
return _CONTENT_RANGE_TEMPLATE.format(start_byte, end_byte, total_bytes)
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__init__.py
ADDED
|
@@ -0,0 +1,682 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""``requests`` utilities for Google Media Downloads and Resumable Uploads.
|
| 16 |
+
|
| 17 |
+
This sub-package assumes callers will use the `requests`_ library
|
| 18 |
+
as transport and `google-auth`_ for sending authenticated HTTP traffic
|
| 19 |
+
with ``requests``.
|
| 20 |
+
|
| 21 |
+
.. _requests: http://docs.python-requests.org/
|
| 22 |
+
.. _google-auth: https://google-auth.readthedocs.io/
|
| 23 |
+
|
| 24 |
+
====================
|
| 25 |
+
Authorized Transport
|
| 26 |
+
====================
|
| 27 |
+
|
| 28 |
+
To use ``google-auth`` and ``requests`` to create an authorized transport
|
| 29 |
+
that has read-only access to Google Cloud Storage (GCS):
|
| 30 |
+
|
| 31 |
+
.. testsetup:: get-credentials
|
| 32 |
+
|
| 33 |
+
import google.auth
|
| 34 |
+
import google.auth.credentials as creds_mod
|
| 35 |
+
import mock
|
| 36 |
+
|
| 37 |
+
def mock_default(scopes=None):
|
| 38 |
+
credentials = mock.Mock(spec=creds_mod.Credentials)
|
| 39 |
+
return credentials, 'mock-project'
|
| 40 |
+
|
| 41 |
+
# Patch the ``default`` function on the module.
|
| 42 |
+
original_default = google.auth.default
|
| 43 |
+
google.auth.default = mock_default
|
| 44 |
+
|
| 45 |
+
.. doctest:: get-credentials
|
| 46 |
+
|
| 47 |
+
>>> import google.auth
|
| 48 |
+
>>> import google.auth.transport.requests as tr_requests
|
| 49 |
+
>>>
|
| 50 |
+
>>> ro_scope = 'https://www.googleapis.com/auth/devstorage.read_only'
|
| 51 |
+
>>> credentials, _ = google.auth.default(scopes=(ro_scope,))
|
| 52 |
+
>>> transport = tr_requests.AuthorizedSession(credentials)
|
| 53 |
+
>>> transport
|
| 54 |
+
<google.auth.transport.requests.AuthorizedSession object at 0x...>
|
| 55 |
+
|
| 56 |
+
.. testcleanup:: get-credentials
|
| 57 |
+
|
| 58 |
+
# Put back the correct ``default`` function on the module.
|
| 59 |
+
google.auth.default = original_default
|
| 60 |
+
|
| 61 |
+
================
|
| 62 |
+
Simple Downloads
|
| 63 |
+
================
|
| 64 |
+
|
| 65 |
+
To download an object from Google Cloud Storage, construct the media URL
|
| 66 |
+
for the GCS object and download it with an authorized transport that has
|
| 67 |
+
access to the resource:
|
| 68 |
+
|
| 69 |
+
.. testsetup:: basic-download
|
| 70 |
+
|
| 71 |
+
import mock
|
| 72 |
+
import requests
|
| 73 |
+
import http.client
|
| 74 |
+
|
| 75 |
+
bucket = 'bucket-foo'
|
| 76 |
+
blob_name = 'file.txt'
|
| 77 |
+
|
| 78 |
+
fake_response = requests.Response()
|
| 79 |
+
fake_response.status_code = int(http.client.OK)
|
| 80 |
+
fake_response.headers['Content-Length'] = '1364156'
|
| 81 |
+
fake_content = mock.MagicMock(spec=['__len__'])
|
| 82 |
+
fake_content.__len__.return_value = 1364156
|
| 83 |
+
fake_response._content = fake_content
|
| 84 |
+
|
| 85 |
+
get_method = mock.Mock(return_value=fake_response, spec=[])
|
| 86 |
+
transport = mock.Mock(request=get_method, spec=['request'])
|
| 87 |
+
|
| 88 |
+
.. doctest:: basic-download
|
| 89 |
+
|
| 90 |
+
>>> from google.resumable_media.requests import Download
|
| 91 |
+
>>>
|
| 92 |
+
>>> url_template = (
|
| 93 |
+
... 'https://www.googleapis.com/download/storage/v1/b/'
|
| 94 |
+
... '{bucket}/o/{blob_name}?alt=media')
|
| 95 |
+
>>> media_url = url_template.format(
|
| 96 |
+
... bucket=bucket, blob_name=blob_name)
|
| 97 |
+
>>>
|
| 98 |
+
>>> download = Download(media_url)
|
| 99 |
+
>>> response = download.consume(transport)
|
| 100 |
+
>>> download.finished
|
| 101 |
+
True
|
| 102 |
+
>>> response
|
| 103 |
+
<Response [200]>
|
| 104 |
+
>>> response.headers['Content-Length']
|
| 105 |
+
'1364156'
|
| 106 |
+
>>> len(response.content)
|
| 107 |
+
1364156
|
| 108 |
+
|
| 109 |
+
To download only a portion of the bytes in the object,
|
| 110 |
+
specify ``start`` and ``end`` byte positions (both optional):
|
| 111 |
+
|
| 112 |
+
.. testsetup:: basic-download-with-slice
|
| 113 |
+
|
| 114 |
+
import mock
|
| 115 |
+
import requests
|
| 116 |
+
import http.client
|
| 117 |
+
|
| 118 |
+
from google.resumable_media.requests import Download
|
| 119 |
+
|
| 120 |
+
media_url = 'http://test.invalid'
|
| 121 |
+
start = 4096
|
| 122 |
+
end = 8191
|
| 123 |
+
slice_size = end - start + 1
|
| 124 |
+
|
| 125 |
+
fake_response = requests.Response()
|
| 126 |
+
fake_response.status_code = int(http.client.PARTIAL_CONTENT)
|
| 127 |
+
fake_response.headers['Content-Length'] = '{:d}'.format(slice_size)
|
| 128 |
+
content_range = 'bytes {:d}-{:d}/1364156'.format(start, end)
|
| 129 |
+
fake_response.headers['Content-Range'] = content_range
|
| 130 |
+
fake_content = mock.MagicMock(spec=['__len__'])
|
| 131 |
+
fake_content.__len__.return_value = slice_size
|
| 132 |
+
fake_response._content = fake_content
|
| 133 |
+
|
| 134 |
+
get_method = mock.Mock(return_value=fake_response, spec=[])
|
| 135 |
+
transport = mock.Mock(request=get_method, spec=['request'])
|
| 136 |
+
|
| 137 |
+
.. doctest:: basic-download-with-slice
|
| 138 |
+
|
| 139 |
+
>>> download = Download(media_url, start=4096, end=8191)
|
| 140 |
+
>>> response = download.consume(transport)
|
| 141 |
+
>>> download.finished
|
| 142 |
+
True
|
| 143 |
+
>>> response
|
| 144 |
+
<Response [206]>
|
| 145 |
+
>>> response.headers['Content-Length']
|
| 146 |
+
'4096'
|
| 147 |
+
>>> response.headers['Content-Range']
|
| 148 |
+
'bytes 4096-8191/1364156'
|
| 149 |
+
>>> len(response.content)
|
| 150 |
+
4096
|
| 151 |
+
|
| 152 |
+
=================
|
| 153 |
+
Chunked Downloads
|
| 154 |
+
=================
|
| 155 |
+
|
| 156 |
+
For very large objects or objects of unknown size, it may make more sense
|
| 157 |
+
to download the object in chunks rather than all at once. This can be done
|
| 158 |
+
to avoid dropped connections with a poor internet connection or can allow
|
| 159 |
+
multiple chunks to be downloaded in parallel to speed up the total
|
| 160 |
+
download.
|
| 161 |
+
|
| 162 |
+
A :class:`.ChunkedDownload` uses the same media URL and authorized
|
| 163 |
+
transport that a basic :class:`.Download` would use, but also
|
| 164 |
+
requires a chunk size and a write-able byte ``stream``. The chunk size is used
|
| 165 |
+
to determine how much of the resouce to consume with each request and the
|
| 166 |
+
stream is to allow the resource to be written out (e.g. to disk) without
|
| 167 |
+
having to fit in memory all at once.
|
| 168 |
+
|
| 169 |
+
.. testsetup:: chunked-download
|
| 170 |
+
|
| 171 |
+
import io
|
| 172 |
+
|
| 173 |
+
import mock
|
| 174 |
+
import requests
|
| 175 |
+
import http.client
|
| 176 |
+
|
| 177 |
+
media_url = 'http://test.invalid'
|
| 178 |
+
|
| 179 |
+
fifty_mb = 50 * 1024 * 1024
|
| 180 |
+
one_gb = 1024 * 1024 * 1024
|
| 181 |
+
fake_response = requests.Response()
|
| 182 |
+
fake_response.status_code = int(http.client.PARTIAL_CONTENT)
|
| 183 |
+
fake_response.headers['Content-Length'] = '{:d}'.format(fifty_mb)
|
| 184 |
+
content_range = 'bytes 0-{:d}/{:d}'.format(fifty_mb - 1, one_gb)
|
| 185 |
+
fake_response.headers['Content-Range'] = content_range
|
| 186 |
+
fake_content_begin = b'The beginning of the chunk...'
|
| 187 |
+
fake_content = fake_content_begin + b'1' * (fifty_mb - 29)
|
| 188 |
+
fake_response._content = fake_content
|
| 189 |
+
|
| 190 |
+
get_method = mock.Mock(return_value=fake_response, spec=[])
|
| 191 |
+
transport = mock.Mock(request=get_method, spec=['request'])
|
| 192 |
+
|
| 193 |
+
.. doctest:: chunked-download
|
| 194 |
+
|
| 195 |
+
>>> from google.resumable_media.requests import ChunkedDownload
|
| 196 |
+
>>>
|
| 197 |
+
>>> chunk_size = 50 * 1024 * 1024 # 50MB
|
| 198 |
+
>>> stream = io.BytesIO()
|
| 199 |
+
>>> download = ChunkedDownload(
|
| 200 |
+
... media_url, chunk_size, stream)
|
| 201 |
+
>>> # Check the state of the download before starting.
|
| 202 |
+
>>> download.bytes_downloaded
|
| 203 |
+
0
|
| 204 |
+
>>> download.total_bytes is None
|
| 205 |
+
True
|
| 206 |
+
>>> response = download.consume_next_chunk(transport)
|
| 207 |
+
>>> # Check the state of the download after consuming one chunk.
|
| 208 |
+
>>> download.finished
|
| 209 |
+
False
|
| 210 |
+
>>> download.bytes_downloaded # chunk_size
|
| 211 |
+
52428800
|
| 212 |
+
>>> download.total_bytes # 1GB
|
| 213 |
+
1073741824
|
| 214 |
+
>>> response
|
| 215 |
+
<Response [206]>
|
| 216 |
+
>>> response.headers['Content-Length']
|
| 217 |
+
'52428800'
|
| 218 |
+
>>> response.headers['Content-Range']
|
| 219 |
+
'bytes 0-52428799/1073741824'
|
| 220 |
+
>>> len(response.content) == chunk_size
|
| 221 |
+
True
|
| 222 |
+
>>> stream.seek(0)
|
| 223 |
+
0
|
| 224 |
+
>>> stream.read(29)
|
| 225 |
+
b'The beginning of the chunk...'
|
| 226 |
+
|
| 227 |
+
The download will change it's ``finished`` status to :data:`True`
|
| 228 |
+
once the final chunk is consumed. In some cases, the final chunk may
|
| 229 |
+
not be the same size as the other chunks:
|
| 230 |
+
|
| 231 |
+
.. testsetup:: chunked-download-end
|
| 232 |
+
|
| 233 |
+
import mock
|
| 234 |
+
import requests
|
| 235 |
+
import http.client
|
| 236 |
+
|
| 237 |
+
from google.resumable_media.requests import ChunkedDownload
|
| 238 |
+
|
| 239 |
+
media_url = 'http://test.invalid'
|
| 240 |
+
|
| 241 |
+
fifty_mb = 50 * 1024 * 1024
|
| 242 |
+
one_gb = 1024 * 1024 * 1024
|
| 243 |
+
stream = mock.Mock(spec=['write'])
|
| 244 |
+
download = ChunkedDownload(media_url, fifty_mb, stream)
|
| 245 |
+
download._bytes_downloaded = 20 * fifty_mb
|
| 246 |
+
download._total_bytes = one_gb
|
| 247 |
+
|
| 248 |
+
fake_response = requests.Response()
|
| 249 |
+
fake_response.status_code = int(http.client.PARTIAL_CONTENT)
|
| 250 |
+
slice_size = one_gb - 20 * fifty_mb
|
| 251 |
+
fake_response.headers['Content-Length'] = '{:d}'.format(slice_size)
|
| 252 |
+
content_range = 'bytes {:d}-{:d}/{:d}'.format(
|
| 253 |
+
20 * fifty_mb, one_gb - 1, one_gb)
|
| 254 |
+
fake_response.headers['Content-Range'] = content_range
|
| 255 |
+
fake_content = mock.MagicMock(spec=['__len__'])
|
| 256 |
+
fake_content.__len__.return_value = slice_size
|
| 257 |
+
fake_response._content = fake_content
|
| 258 |
+
|
| 259 |
+
get_method = mock.Mock(return_value=fake_response, spec=[])
|
| 260 |
+
transport = mock.Mock(request=get_method, spec=['request'])
|
| 261 |
+
|
| 262 |
+
.. doctest:: chunked-download-end
|
| 263 |
+
|
| 264 |
+
>>> # The state of the download in progress.
|
| 265 |
+
>>> download.finished
|
| 266 |
+
False
|
| 267 |
+
>>> download.bytes_downloaded # 20 chunks at 50MB
|
| 268 |
+
1048576000
|
| 269 |
+
>>> download.total_bytes # 1GB
|
| 270 |
+
1073741824
|
| 271 |
+
>>> response = download.consume_next_chunk(transport)
|
| 272 |
+
>>> # The state of the download after consuming the final chunk.
|
| 273 |
+
>>> download.finished
|
| 274 |
+
True
|
| 275 |
+
>>> download.bytes_downloaded == download.total_bytes
|
| 276 |
+
True
|
| 277 |
+
>>> response
|
| 278 |
+
<Response [206]>
|
| 279 |
+
>>> response.headers['Content-Length']
|
| 280 |
+
'25165824'
|
| 281 |
+
>>> response.headers['Content-Range']
|
| 282 |
+
'bytes 1048576000-1073741823/1073741824'
|
| 283 |
+
>>> len(response.content) < download.chunk_size
|
| 284 |
+
True
|
| 285 |
+
|
| 286 |
+
In addition, a :class:`.ChunkedDownload` can also take optional
|
| 287 |
+
``start`` and ``end`` byte positions.
|
| 288 |
+
|
| 289 |
+
Usually, no checksum is returned with a chunked download. Even if one is returned,
|
| 290 |
+
it is not validated. If you need to validate the checksum, you can do so
|
| 291 |
+
by buffering the chunks and validating the checksum against the completed download.
|
| 292 |
+
|
| 293 |
+
==============
|
| 294 |
+
Simple Uploads
|
| 295 |
+
==============
|
| 296 |
+
|
| 297 |
+
Among the three supported upload classes, the simplest is
|
| 298 |
+
:class:`.SimpleUpload`. A simple upload should be used when the resource
|
| 299 |
+
being uploaded is small and when there is no metadata (other than the name)
|
| 300 |
+
associated with the resource.
|
| 301 |
+
|
| 302 |
+
.. testsetup:: simple-upload
|
| 303 |
+
|
| 304 |
+
import json
|
| 305 |
+
|
| 306 |
+
import mock
|
| 307 |
+
import requests
|
| 308 |
+
import http.client
|
| 309 |
+
|
| 310 |
+
bucket = 'some-bucket'
|
| 311 |
+
blob_name = 'file.txt'
|
| 312 |
+
|
| 313 |
+
fake_response = requests.Response()
|
| 314 |
+
fake_response.status_code = int(http.client.OK)
|
| 315 |
+
payload = {
|
| 316 |
+
'bucket': bucket,
|
| 317 |
+
'contentType': 'text/plain',
|
| 318 |
+
'md5Hash': 'M0XLEsX9/sMdiI+4pB4CAQ==',
|
| 319 |
+
'name': blob_name,
|
| 320 |
+
'size': '27',
|
| 321 |
+
}
|
| 322 |
+
fake_response._content = json.dumps(payload).encode('utf-8')
|
| 323 |
+
|
| 324 |
+
post_method = mock.Mock(return_value=fake_response, spec=[])
|
| 325 |
+
transport = mock.Mock(request=post_method, spec=['request'])
|
| 326 |
+
|
| 327 |
+
.. doctest:: simple-upload
|
| 328 |
+
:options: +NORMALIZE_WHITESPACE
|
| 329 |
+
|
| 330 |
+
>>> from google.resumable_media.requests import SimpleUpload
|
| 331 |
+
>>>
|
| 332 |
+
>>> url_template = (
|
| 333 |
+
... 'https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?'
|
| 334 |
+
... 'uploadType=media&'
|
| 335 |
+
... 'name={blob_name}')
|
| 336 |
+
>>> upload_url = url_template.format(
|
| 337 |
+
... bucket=bucket, blob_name=blob_name)
|
| 338 |
+
>>>
|
| 339 |
+
>>> upload = SimpleUpload(upload_url)
|
| 340 |
+
>>> data = b'Some not too large content.'
|
| 341 |
+
>>> content_type = 'text/plain'
|
| 342 |
+
>>> response = upload.transmit(transport, data, content_type)
|
| 343 |
+
>>> upload.finished
|
| 344 |
+
True
|
| 345 |
+
>>> response
|
| 346 |
+
<Response [200]>
|
| 347 |
+
>>> json_response = response.json()
|
| 348 |
+
>>> json_response['bucket'] == bucket
|
| 349 |
+
True
|
| 350 |
+
>>> json_response['name'] == blob_name
|
| 351 |
+
True
|
| 352 |
+
>>> json_response['contentType'] == content_type
|
| 353 |
+
True
|
| 354 |
+
>>> json_response['md5Hash']
|
| 355 |
+
'M0XLEsX9/sMdiI+4pB4CAQ=='
|
| 356 |
+
>>> int(json_response['size']) == len(data)
|
| 357 |
+
True
|
| 358 |
+
|
| 359 |
+
In the rare case that an upload fails, an :exc:`.InvalidResponse`
|
| 360 |
+
will be raised:
|
| 361 |
+
|
| 362 |
+
.. testsetup:: simple-upload-fail
|
| 363 |
+
|
| 364 |
+
import time
|
| 365 |
+
|
| 366 |
+
import mock
|
| 367 |
+
import requests
|
| 368 |
+
import http.client
|
| 369 |
+
|
| 370 |
+
from google import resumable_media
|
| 371 |
+
from google.resumable_media import _helpers
|
| 372 |
+
from google.resumable_media.requests import SimpleUpload as constructor
|
| 373 |
+
|
| 374 |
+
upload_url = 'http://test.invalid'
|
| 375 |
+
data = b'Some not too large content.'
|
| 376 |
+
content_type = 'text/plain'
|
| 377 |
+
|
| 378 |
+
fake_response = requests.Response()
|
| 379 |
+
fake_response.status_code = int(http.client.SERVICE_UNAVAILABLE)
|
| 380 |
+
|
| 381 |
+
post_method = mock.Mock(return_value=fake_response, spec=[])
|
| 382 |
+
transport = mock.Mock(request=post_method, spec=['request'])
|
| 383 |
+
|
| 384 |
+
time_sleep = time.sleep
|
| 385 |
+
def dont_sleep(seconds):
|
| 386 |
+
raise RuntimeError('No sleep', seconds)
|
| 387 |
+
|
| 388 |
+
def SimpleUpload(*args, **kwargs):
|
| 389 |
+
upload = constructor(*args, **kwargs)
|
| 390 |
+
# Mock the cumulative sleep to avoid retries (and `time.sleep()`).
|
| 391 |
+
upload._retry_strategy = resumable_media.RetryStrategy(
|
| 392 |
+
max_cumulative_retry=-1.0)
|
| 393 |
+
return upload
|
| 394 |
+
|
| 395 |
+
time.sleep = dont_sleep
|
| 396 |
+
|
| 397 |
+
.. doctest:: simple-upload-fail
|
| 398 |
+
:options: +NORMALIZE_WHITESPACE
|
| 399 |
+
|
| 400 |
+
>>> upload = SimpleUpload(upload_url)
|
| 401 |
+
>>> error = None
|
| 402 |
+
>>> try:
|
| 403 |
+
... upload.transmit(transport, data, content_type)
|
| 404 |
+
... except resumable_media.InvalidResponse as caught_exc:
|
| 405 |
+
... error = caught_exc
|
| 406 |
+
...
|
| 407 |
+
>>> error
|
| 408 |
+
InvalidResponse('Request failed with status code', 503,
|
| 409 |
+
'Expected one of', <HTTPStatus.OK: 200>)
|
| 410 |
+
>>> error.response
|
| 411 |
+
<Response [503]>
|
| 412 |
+
>>>
|
| 413 |
+
>>> upload.finished
|
| 414 |
+
True
|
| 415 |
+
|
| 416 |
+
.. testcleanup:: simple-upload-fail
|
| 417 |
+
|
| 418 |
+
# Put back the correct ``sleep`` function on the ``time`` module.
|
| 419 |
+
time.sleep = time_sleep
|
| 420 |
+
|
| 421 |
+
Even in the case of failure, we see that the upload is
|
| 422 |
+
:attr:`~.SimpleUpload.finished`, i.e. it cannot be re-used.
|
| 423 |
+
|
| 424 |
+
=================
|
| 425 |
+
Multipart Uploads
|
| 426 |
+
=================
|
| 427 |
+
|
| 428 |
+
After the simple upload, the :class:`.MultipartUpload` can be used to
|
| 429 |
+
achieve essentially the same task. However, a multipart upload allows some
|
| 430 |
+
metadata about the resource to be sent along as well. (This is the "multi":
|
| 431 |
+
we send a first part with the metadata and a second part with the actual
|
| 432 |
+
bytes in the resource.)
|
| 433 |
+
|
| 434 |
+
Usage is similar to the simple upload, but :meth:`~.MultipartUpload.transmit`
|
| 435 |
+
accepts an extra required argument: ``metadata``.
|
| 436 |
+
|
| 437 |
+
.. testsetup:: multipart-upload
|
| 438 |
+
|
| 439 |
+
import json
|
| 440 |
+
|
| 441 |
+
import mock
|
| 442 |
+
import requests
|
| 443 |
+
import http.client
|
| 444 |
+
|
| 445 |
+
bucket = 'some-bucket'
|
| 446 |
+
blob_name = 'file.txt'
|
| 447 |
+
data = b'Some not too large content.'
|
| 448 |
+
content_type = 'text/plain'
|
| 449 |
+
|
| 450 |
+
fake_response = requests.Response()
|
| 451 |
+
fake_response.status_code = int(http.client.OK)
|
| 452 |
+
payload = {
|
| 453 |
+
'bucket': bucket,
|
| 454 |
+
'name': blob_name,
|
| 455 |
+
'metadata': {'color': 'grurple'},
|
| 456 |
+
}
|
| 457 |
+
fake_response._content = json.dumps(payload).encode('utf-8')
|
| 458 |
+
|
| 459 |
+
post_method = mock.Mock(return_value=fake_response, spec=[])
|
| 460 |
+
transport = mock.Mock(request=post_method, spec=['request'])
|
| 461 |
+
|
| 462 |
+
.. doctest:: multipart-upload
|
| 463 |
+
|
| 464 |
+
>>> from google.resumable_media.requests import MultipartUpload
|
| 465 |
+
>>>
|
| 466 |
+
>>> url_template = (
|
| 467 |
+
... 'https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?'
|
| 468 |
+
... 'uploadType=multipart')
|
| 469 |
+
>>> upload_url = url_template.format(bucket=bucket)
|
| 470 |
+
>>>
|
| 471 |
+
>>> upload = MultipartUpload(upload_url)
|
| 472 |
+
>>> metadata = {
|
| 473 |
+
... 'name': blob_name,
|
| 474 |
+
... 'metadata': {
|
| 475 |
+
... 'color': 'grurple',
|
| 476 |
+
... },
|
| 477 |
+
... }
|
| 478 |
+
>>> response = upload.transmit(transport, data, metadata, content_type)
|
| 479 |
+
>>> upload.finished
|
| 480 |
+
True
|
| 481 |
+
>>> response
|
| 482 |
+
<Response [200]>
|
| 483 |
+
>>> json_response = response.json()
|
| 484 |
+
>>> json_response['bucket'] == bucket
|
| 485 |
+
True
|
| 486 |
+
>>> json_response['name'] == blob_name
|
| 487 |
+
True
|
| 488 |
+
>>> json_response['metadata'] == metadata['metadata']
|
| 489 |
+
True
|
| 490 |
+
|
| 491 |
+
As with the simple upload, in the case of failure an :exc:`.InvalidResponse`
|
| 492 |
+
is raised, enclosing the :attr:`~.InvalidResponse.response` that caused
|
| 493 |
+
the failure and the ``upload`` object cannot be re-used after a failure.
|
| 494 |
+
|
| 495 |
+
=================
|
| 496 |
+
Resumable Uploads
|
| 497 |
+
=================
|
| 498 |
+
|
| 499 |
+
A :class:`.ResumableUpload` deviates from the other two upload classes:
|
| 500 |
+
it transmits a resource over the course of multiple requests. This
|
| 501 |
+
is intended to be used in cases where:
|
| 502 |
+
|
| 503 |
+
* the size of the resource is not known (i.e. it is generated on the fly)
|
| 504 |
+
* requests must be short-lived
|
| 505 |
+
* the client has request **size** limitations
|
| 506 |
+
* the resource is too large to fit into memory
|
| 507 |
+
|
| 508 |
+
In general, a resource should be sent in a **single** request to avoid
|
| 509 |
+
latency and reduce QPS. See `GCS best practices`_ for more things to
|
| 510 |
+
consider when using a resumable upload.
|
| 511 |
+
|
| 512 |
+
.. _GCS best practices: https://cloud.google.com/storage/docs/\
|
| 513 |
+
best-practices#uploading
|
| 514 |
+
|
| 515 |
+
After creating a :class:`.ResumableUpload` instance, a
|
| 516 |
+
**resumable upload session** must be initiated to let the server know that
|
| 517 |
+
a series of chunked upload requests will be coming and to obtain an
|
| 518 |
+
``upload_id`` for the session. In contrast to the other two upload classes,
|
| 519 |
+
:meth:`~.ResumableUpload.initiate` takes a byte ``stream`` as input rather
|
| 520 |
+
than raw bytes as ``data``. This can be a file object, a :class:`~io.BytesIO`
|
| 521 |
+
object or any other stream implementing the same interface.
|
| 522 |
+
|
| 523 |
+
.. testsetup:: resumable-initiate
|
| 524 |
+
|
| 525 |
+
import io
|
| 526 |
+
|
| 527 |
+
import mock
|
| 528 |
+
import requests
|
| 529 |
+
import http.client
|
| 530 |
+
|
| 531 |
+
bucket = 'some-bucket'
|
| 532 |
+
blob_name = 'file.txt'
|
| 533 |
+
data = b'Some resumable bytes.'
|
| 534 |
+
content_type = 'text/plain'
|
| 535 |
+
|
| 536 |
+
fake_response = requests.Response()
|
| 537 |
+
fake_response.status_code = int(http.client.OK)
|
| 538 |
+
fake_response._content = b''
|
| 539 |
+
upload_id = 'ABCdef189XY_super_serious'
|
| 540 |
+
resumable_url_template = (
|
| 541 |
+
'https://www.googleapis.com/upload/storage/v1/b/{bucket}'
|
| 542 |
+
'/o?uploadType=resumable&upload_id={upload_id}')
|
| 543 |
+
resumable_url = resumable_url_template.format(
|
| 544 |
+
bucket=bucket, upload_id=upload_id)
|
| 545 |
+
fake_response.headers['location'] = resumable_url
|
| 546 |
+
fake_response.headers['x-guploader-uploadid'] = upload_id
|
| 547 |
+
|
| 548 |
+
post_method = mock.Mock(return_value=fake_response, spec=[])
|
| 549 |
+
transport = mock.Mock(request=post_method, spec=['request'])
|
| 550 |
+
|
| 551 |
+
.. doctest:: resumable-initiate
|
| 552 |
+
|
| 553 |
+
>>> from google.resumable_media.requests import ResumableUpload
|
| 554 |
+
>>>
|
| 555 |
+
>>> url_template = (
|
| 556 |
+
... 'https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?'
|
| 557 |
+
... 'uploadType=resumable')
|
| 558 |
+
>>> upload_url = url_template.format(bucket=bucket)
|
| 559 |
+
>>>
|
| 560 |
+
>>> chunk_size = 1024 * 1024 # 1MB
|
| 561 |
+
>>> upload = ResumableUpload(upload_url, chunk_size)
|
| 562 |
+
>>> stream = io.BytesIO(data)
|
| 563 |
+
>>> # The upload doesn't know how "big" it is until seeing a stream.
|
| 564 |
+
>>> upload.total_bytes is None
|
| 565 |
+
True
|
| 566 |
+
>>> metadata = {'name': blob_name}
|
| 567 |
+
>>> response = upload.initiate(transport, stream, metadata, content_type)
|
| 568 |
+
>>> response
|
| 569 |
+
<Response [200]>
|
| 570 |
+
>>> upload.resumable_url == response.headers['Location']
|
| 571 |
+
True
|
| 572 |
+
>>> upload.total_bytes == len(data)
|
| 573 |
+
True
|
| 574 |
+
>>> upload_id = response.headers['X-GUploader-UploadID']
|
| 575 |
+
>>> upload_id
|
| 576 |
+
'ABCdef189XY_super_serious'
|
| 577 |
+
>>> upload.resumable_url == upload_url + '&upload_id=' + upload_id
|
| 578 |
+
True
|
| 579 |
+
|
| 580 |
+
Once a :class:`.ResumableUpload` has been initiated, the resource is
|
| 581 |
+
transmitted in chunks until completion:
|
| 582 |
+
|
| 583 |
+
.. testsetup:: resumable-transmit
|
| 584 |
+
|
| 585 |
+
import io
|
| 586 |
+
import json
|
| 587 |
+
|
| 588 |
+
import mock
|
| 589 |
+
import requests
|
| 590 |
+
import http.client
|
| 591 |
+
|
| 592 |
+
from google import resumable_media
|
| 593 |
+
import google.resumable_media.requests.upload as upload_mod
|
| 594 |
+
|
| 595 |
+
data = b'01234567891'
|
| 596 |
+
stream = io.BytesIO(data)
|
| 597 |
+
# Create an "already initiated" upload.
|
| 598 |
+
upload_url = 'http://test.invalid'
|
| 599 |
+
chunk_size = 256 * 1024 # 256KB
|
| 600 |
+
upload = upload_mod.ResumableUpload(upload_url, chunk_size)
|
| 601 |
+
upload._resumable_url = 'http://test.invalid?upload_id=mocked'
|
| 602 |
+
upload._stream = stream
|
| 603 |
+
upload._content_type = 'text/plain'
|
| 604 |
+
upload._total_bytes = len(data)
|
| 605 |
+
|
| 606 |
+
# After-the-fact update the chunk size so that len(data)
|
| 607 |
+
# is split into three.
|
| 608 |
+
upload._chunk_size = 4
|
| 609 |
+
# Make three fake responses.
|
| 610 |
+
fake_response0 = requests.Response()
|
| 611 |
+
fake_response0.status_code = http.client.PERMANENT_REDIRECT
|
| 612 |
+
fake_response0.headers['range'] = 'bytes=0-3'
|
| 613 |
+
|
| 614 |
+
fake_response1 = requests.Response()
|
| 615 |
+
fake_response1.status_code = http.client.PERMANENT_REDIRECT
|
| 616 |
+
fake_response1.headers['range'] = 'bytes=0-7'
|
| 617 |
+
|
| 618 |
+
fake_response2 = requests.Response()
|
| 619 |
+
fake_response2.status_code = int(http.client.OK)
|
| 620 |
+
bucket = 'some-bucket'
|
| 621 |
+
blob_name = 'file.txt'
|
| 622 |
+
payload = {
|
| 623 |
+
'bucket': bucket,
|
| 624 |
+
'name': blob_name,
|
| 625 |
+
'size': '{:d}'.format(len(data)),
|
| 626 |
+
}
|
| 627 |
+
fake_response2._content = json.dumps(payload).encode('utf-8')
|
| 628 |
+
|
| 629 |
+
# Use the fake responses to mock a transport.
|
| 630 |
+
responses = [fake_response0, fake_response1, fake_response2]
|
| 631 |
+
put_method = mock.Mock(side_effect=responses, spec=[])
|
| 632 |
+
transport = mock.Mock(request=put_method, spec=['request'])
|
| 633 |
+
|
| 634 |
+
.. doctest:: resumable-transmit
|
| 635 |
+
|
| 636 |
+
>>> response0 = upload.transmit_next_chunk(transport)
|
| 637 |
+
>>> response0
|
| 638 |
+
<Response [308]>
|
| 639 |
+
>>> upload.finished
|
| 640 |
+
False
|
| 641 |
+
>>> upload.bytes_uploaded == upload.chunk_size
|
| 642 |
+
True
|
| 643 |
+
>>>
|
| 644 |
+
>>> response1 = upload.transmit_next_chunk(transport)
|
| 645 |
+
>>> response1
|
| 646 |
+
<Response [308]>
|
| 647 |
+
>>> upload.finished
|
| 648 |
+
False
|
| 649 |
+
>>> upload.bytes_uploaded == 2 * upload.chunk_size
|
| 650 |
+
True
|
| 651 |
+
>>>
|
| 652 |
+
>>> response2 = upload.transmit_next_chunk(transport)
|
| 653 |
+
>>> response2
|
| 654 |
+
<Response [200]>
|
| 655 |
+
>>> upload.finished
|
| 656 |
+
True
|
| 657 |
+
>>> upload.bytes_uploaded == upload.total_bytes
|
| 658 |
+
True
|
| 659 |
+
>>> json_response = response2.json()
|
| 660 |
+
>>> json_response['bucket'] == bucket
|
| 661 |
+
True
|
| 662 |
+
>>> json_response['name'] == blob_name
|
| 663 |
+
True
|
| 664 |
+
"""
|
| 665 |
+
from google._async_resumable_media.requests.download import ChunkedDownload
|
| 666 |
+
from google._async_resumable_media.requests.download import Download
|
| 667 |
+
from google._async_resumable_media.requests.upload import MultipartUpload
|
| 668 |
+
from google._async_resumable_media.requests.download import RawChunkedDownload
|
| 669 |
+
from google._async_resumable_media.requests.download import RawDownload
|
| 670 |
+
from google._async_resumable_media.requests.upload import ResumableUpload
|
| 671 |
+
from google._async_resumable_media.requests.upload import SimpleUpload
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
__all__ = [
|
| 675 |
+
"ChunkedDownload",
|
| 676 |
+
"Download",
|
| 677 |
+
"MultipartUpload",
|
| 678 |
+
"RawChunkedDownload",
|
| 679 |
+
"RawDownload",
|
| 680 |
+
"ResumableUpload",
|
| 681 |
+
"SimpleUpload",
|
| 682 |
+
]
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (21.1 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/_request_helpers.cpython-310.pyc
ADDED
|
Binary file (4.15 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/download.cpython-310.pyc
ADDED
|
Binary file (15.7 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/__pycache__/upload.cpython-310.pyc
ADDED
|
Binary file (18 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/_request_helpers.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Shared utilities used by both downloads and uploads.
|
| 16 |
+
|
| 17 |
+
This utilities are explicitly catered to ``requests``-like transports.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
import functools
|
| 22 |
+
|
| 23 |
+
from google._async_resumable_media import _helpers
|
| 24 |
+
from google.resumable_media import common
|
| 25 |
+
|
| 26 |
+
from google.auth.transport import _aiohttp_requests as aiohttp_requests # type: ignore
|
| 27 |
+
import aiohttp # type: ignore
|
| 28 |
+
|
| 29 |
+
_DEFAULT_RETRY_STRATEGY = common.RetryStrategy()
|
| 30 |
+
_SINGLE_GET_CHUNK_SIZE = 8192
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# The number of seconds to wait to establish a connection
|
| 34 |
+
# (connect() call on socket). Avoid setting this to a multiple of 3 to not
|
| 35 |
+
# Align with TCP Retransmission timing. (typically 2.5-3s)
|
| 36 |
+
_DEFAULT_CONNECT_TIMEOUT = 61
|
| 37 |
+
# The number of seconds to wait between bytes sent from the server.
|
| 38 |
+
_DEFAULT_READ_TIMEOUT = 60
|
| 39 |
+
_DEFAULT_TIMEOUT = aiohttp.ClientTimeout(
|
| 40 |
+
connect=_DEFAULT_CONNECT_TIMEOUT, sock_read=_DEFAULT_READ_TIMEOUT
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class RequestsMixin(object):
|
| 45 |
+
"""Mix-in class implementing ``requests``-specific behavior.
|
| 46 |
+
|
| 47 |
+
These are methods that are more general purpose, with implementations
|
| 48 |
+
specific to the types defined in ``requests``.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
@staticmethod
|
| 52 |
+
def _get_status_code(response):
|
| 53 |
+
"""Access the status code from an HTTP response.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
response (~requests.Response): The HTTP response object.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
int: The status code.
|
| 60 |
+
"""
|
| 61 |
+
return response.status
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
def _get_headers(response):
|
| 65 |
+
"""Access the headers from an HTTP response.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
response (~requests.Response): The HTTP response object.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
~requests.structures.CaseInsensitiveDict: The header mapping (keys
|
| 72 |
+
are case-insensitive).
|
| 73 |
+
"""
|
| 74 |
+
# For Async testing,`_headers` is modified instead of headers
|
| 75 |
+
# access via the internal field.
|
| 76 |
+
return response._headers
|
| 77 |
+
|
| 78 |
+
@staticmethod
|
| 79 |
+
async def _get_body(response):
|
| 80 |
+
"""Access the response body from an HTTP response.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
response (~requests.Response): The HTTP response object.
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
bytes: The body of the ``response``.
|
| 87 |
+
"""
|
| 88 |
+
wrapped_response = aiohttp_requests._CombinedResponse(response)
|
| 89 |
+
content = await wrapped_response.data.read()
|
| 90 |
+
return content
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class RawRequestsMixin(RequestsMixin):
|
| 94 |
+
@staticmethod
|
| 95 |
+
async def _get_body(response):
|
| 96 |
+
"""Access the response body from an HTTP response.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
response (~requests.Response): The HTTP response object.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
bytes: The body of the ``response``.
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
wrapped_response = aiohttp_requests._CombinedResponse(response)
|
| 106 |
+
content = await wrapped_response.raw_content()
|
| 107 |
+
return content
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
async def http_request(
|
| 111 |
+
transport,
|
| 112 |
+
method,
|
| 113 |
+
url,
|
| 114 |
+
data=None,
|
| 115 |
+
headers=None,
|
| 116 |
+
retry_strategy=_DEFAULT_RETRY_STRATEGY,
|
| 117 |
+
**transport_kwargs
|
| 118 |
+
):
|
| 119 |
+
"""Make an HTTP request.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
transport (~requests.Session): A ``requests`` object which can make
|
| 123 |
+
authenticated requests via a ``request()`` method. This method
|
| 124 |
+
must accept an HTTP method, an upload URL, a ``data`` keyword
|
| 125 |
+
argument and a ``headers`` keyword argument.
|
| 126 |
+
method (str): The HTTP method for the request.
|
| 127 |
+
url (str): The URL for the request.
|
| 128 |
+
data (Optional[bytes]): The body of the request.
|
| 129 |
+
headers (Mapping[str, str]): The headers for the request (``transport``
|
| 130 |
+
may also add additional headers).
|
| 131 |
+
retry_strategy (~google.resumable_media.common.RetryStrategy): The
|
| 132 |
+
strategy to use if the request fails and must be retried.
|
| 133 |
+
transport_kwargs (Dict[str, str]): Extra keyword arguments to be
|
| 134 |
+
passed along to ``transport.request``.
|
| 135 |
+
|
| 136 |
+
Returns:
|
| 137 |
+
~requests.Response: The return value of ``transport.request()``.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
# NOTE(asyncio/aiohttp): Sync versions use a tuple for two timeouts,
|
| 141 |
+
# default connect timeout and read timeout. Since async requests only
|
| 142 |
+
# accepts a single value, this is using the connect timeout. This logic
|
| 143 |
+
# diverges from the sync implementation.
|
| 144 |
+
if "timeout" not in transport_kwargs:
|
| 145 |
+
timeout = _DEFAULT_TIMEOUT
|
| 146 |
+
transport_kwargs["timeout"] = timeout
|
| 147 |
+
|
| 148 |
+
func = functools.partial(
|
| 149 |
+
transport.request, method, url, data=data, headers=headers, **transport_kwargs
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
resp = await _helpers.wait_and_retry(
|
| 153 |
+
func, RequestsMixin._get_status_code, retry_strategy
|
| 154 |
+
)
|
| 155 |
+
return resp
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/download.py
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Support for downloading media from Google APIs."""
|
| 16 |
+
|
| 17 |
+
import urllib3.response # type: ignore
|
| 18 |
+
import http
|
| 19 |
+
|
| 20 |
+
from google._async_resumable_media import _download
|
| 21 |
+
from google._async_resumable_media import _helpers
|
| 22 |
+
from google._async_resumable_media.requests import _request_helpers
|
| 23 |
+
from google.resumable_media import common
|
| 24 |
+
from google.resumable_media import _helpers as sync_helpers
|
| 25 |
+
from google.resumable_media.requests import download
|
| 26 |
+
|
| 27 |
+
_CHECKSUM_MISMATCH = download._CHECKSUM_MISMATCH
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class Download(_request_helpers.RequestsMixin, _download.Download):
|
| 31 |
+
"""Helper to manage downloading a resource from a Google API.
|
| 32 |
+
|
| 33 |
+
"Slices" of the resource can be retrieved by specifying a range
|
| 34 |
+
with ``start`` and / or ``end``. However, in typical usage, neither
|
| 35 |
+
``start`` nor ``end`` is expected to be provided.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 39 |
+
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
|
| 40 |
+
the downloaded resource can be written to.
|
| 41 |
+
start (int): The first byte in a range to be downloaded. If not
|
| 42 |
+
provided, but ``end`` is provided, will download from the
|
| 43 |
+
beginning to ``end`` of the media.
|
| 44 |
+
end (int): The last byte in a range to be downloaded. If not
|
| 45 |
+
provided, but ``start`` is provided, will download from the
|
| 46 |
+
``start`` to the end of the media.
|
| 47 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 48 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 49 |
+
checksum Optional([str]): The type of checksum to compute to verify
|
| 50 |
+
the integrity of the object. The response headers must contain
|
| 51 |
+
a checksum of the requested type. If the headers lack an
|
| 52 |
+
appropriate checksum (for instance in the case of transcoded or
|
| 53 |
+
ranged downloads where the remote service does not know the
|
| 54 |
+
correct checksum) an INFO-level log will be emitted. Supported
|
| 55 |
+
values are "md5", "crc32c" and None. The default is "md5".
|
| 56 |
+
|
| 57 |
+
Attributes:
|
| 58 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 59 |
+
start (Optional[int]): The first byte in a range to be downloaded.
|
| 60 |
+
end (Optional[int]): The last byte in a range to be downloaded.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
async def _write_to_stream(self, response):
|
| 64 |
+
"""Write response body to a write-able stream.
|
| 65 |
+
|
| 66 |
+
.. note:
|
| 67 |
+
|
| 68 |
+
This method assumes that the ``_stream`` attribute is set on the
|
| 69 |
+
current download.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
response (~requests.Response): The HTTP response object.
|
| 73 |
+
|
| 74 |
+
Raises:
|
| 75 |
+
~google.resumable_media.common.DataCorruption: If the download's
|
| 76 |
+
checksum doesn't agree with server-computed checksum.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
# `_get_expected_checksum()` may return None even if a checksum was
|
| 80 |
+
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
|
| 81 |
+
# If an invalid checksum type is specified, this will raise ValueError.
|
| 82 |
+
expected_checksum, checksum_object = sync_helpers._get_expected_checksum(
|
| 83 |
+
response, self._get_headers, self.media_url, checksum_type=self.checksum
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
local_checksum_object = _add_decoder(response, checksum_object)
|
| 87 |
+
|
| 88 |
+
async for chunk in response.content.iter_chunked(
|
| 89 |
+
_request_helpers._SINGLE_GET_CHUNK_SIZE
|
| 90 |
+
):
|
| 91 |
+
self._stream.write(chunk)
|
| 92 |
+
local_checksum_object.update(chunk)
|
| 93 |
+
|
| 94 |
+
# Don't validate the checksum for partial responses.
|
| 95 |
+
if (
|
| 96 |
+
expected_checksum is not None
|
| 97 |
+
and response.status != http.client.PARTIAL_CONTENT
|
| 98 |
+
):
|
| 99 |
+
actual_checksum = sync_helpers.prepare_checksum_digest(
|
| 100 |
+
checksum_object.digest()
|
| 101 |
+
)
|
| 102 |
+
if actual_checksum != expected_checksum:
|
| 103 |
+
msg = _CHECKSUM_MISMATCH.format(
|
| 104 |
+
self.media_url,
|
| 105 |
+
expected_checksum,
|
| 106 |
+
actual_checksum,
|
| 107 |
+
checksum_type=self.checksum.upper(),
|
| 108 |
+
)
|
| 109 |
+
raise common.DataCorruption(response, msg)
|
| 110 |
+
|
| 111 |
+
async def consume(self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT):
|
| 112 |
+
"""Consume the resource to be downloaded.
|
| 113 |
+
|
| 114 |
+
If a ``stream`` is attached to this download, then the downloaded
|
| 115 |
+
resource will be written to the stream.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 119 |
+
make authenticated requests.
|
| 120 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 121 |
+
The number of seconds to wait for the server response.
|
| 122 |
+
Depending on the retry strategy, a request may be repeated
|
| 123 |
+
several times using the same timeout each time.
|
| 124 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 128 |
+
|
| 129 |
+
Raises:
|
| 130 |
+
~google.resumable_media.common.DataCorruption: If the download's
|
| 131 |
+
checksum doesn't agree with server-computed checksum.
|
| 132 |
+
ValueError: If the current :class:`Download` has already
|
| 133 |
+
finished.
|
| 134 |
+
"""
|
| 135 |
+
method, url, payload, headers = self._prepare_request()
|
| 136 |
+
# NOTE: We assume "payload is None" but pass it along anyway.
|
| 137 |
+
request_kwargs = {
|
| 138 |
+
"data": payload,
|
| 139 |
+
"headers": headers,
|
| 140 |
+
"retry_strategy": self._retry_strategy,
|
| 141 |
+
"timeout": timeout,
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
if self._stream is not None:
|
| 145 |
+
request_kwargs["stream"] = True
|
| 146 |
+
|
| 147 |
+
result = await _request_helpers.http_request(
|
| 148 |
+
transport, method, url, **request_kwargs
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
self._process_response(result)
|
| 152 |
+
|
| 153 |
+
if self._stream is not None:
|
| 154 |
+
await self._write_to_stream(result)
|
| 155 |
+
|
| 156 |
+
return result
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class RawDownload(_request_helpers.RawRequestsMixin, _download.Download):
|
| 160 |
+
"""Helper to manage downloading a raw resource from a Google API.
|
| 161 |
+
|
| 162 |
+
"Slices" of the resource can be retrieved by specifying a range
|
| 163 |
+
with ``start`` and / or ``end``. However, in typical usage, neither
|
| 164 |
+
``start`` nor ``end`` is expected to be provided.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 168 |
+
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
|
| 169 |
+
the downloaded resource can be written to.
|
| 170 |
+
start (int): The first byte in a range to be downloaded. If not
|
| 171 |
+
provided, but ``end`` is provided, will download from the
|
| 172 |
+
beginning to ``end`` of the media.
|
| 173 |
+
end (int): The last byte in a range to be downloaded. If not
|
| 174 |
+
provided, but ``start`` is provided, will download from the
|
| 175 |
+
``start`` to the end of the media.
|
| 176 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 177 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 178 |
+
checksum Optional([str]): The type of checksum to compute to verify
|
| 179 |
+
the integrity of the object. The response headers must contain
|
| 180 |
+
a checksum of the requested type. If the headers lack an
|
| 181 |
+
appropriate checksum (for instance in the case of transcoded or
|
| 182 |
+
ranged downloads where the remote service does not know the
|
| 183 |
+
correct checksum) an INFO-level log will be emitted. Supported
|
| 184 |
+
values are "md5", "crc32c" and None. The default is "md5".
|
| 185 |
+
|
| 186 |
+
Attributes:
|
| 187 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 188 |
+
start (Optional[int]): The first byte in a range to be downloaded.
|
| 189 |
+
end (Optional[int]): The last byte in a range to be downloaded.
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
async def _write_to_stream(self, response):
|
| 193 |
+
"""Write response body to a write-able stream.
|
| 194 |
+
|
| 195 |
+
.. note:
|
| 196 |
+
|
| 197 |
+
This method assumes that the ``_stream`` attribute is set on the
|
| 198 |
+
current download.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
response (~requests.Response): The HTTP response object.
|
| 202 |
+
|
| 203 |
+
Raises:
|
| 204 |
+
~google.resumable_media.common.DataCorruption: If the download's
|
| 205 |
+
checksum doesn't agree with server-computed checksum.
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
# `_get_expected_checksum()` may return None even if a checksum was
|
| 209 |
+
# requested, in which case it will emit an info log _MISSING_CHECKSUM.
|
| 210 |
+
# If an invalid checksum type is specified, this will raise ValueError.
|
| 211 |
+
expected_checksum, checksum_object = sync_helpers._get_expected_checksum(
|
| 212 |
+
response, self._get_headers, self.media_url, checksum_type=self.checksum
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
async for chunk in response.content.iter_chunked(
|
| 216 |
+
_request_helpers._SINGLE_GET_CHUNK_SIZE
|
| 217 |
+
):
|
| 218 |
+
self._stream.write(chunk)
|
| 219 |
+
checksum_object.update(chunk)
|
| 220 |
+
|
| 221 |
+
# Don't validate the checksum for partial responses.
|
| 222 |
+
if (
|
| 223 |
+
expected_checksum is not None
|
| 224 |
+
and response.status != http.client.PARTIAL_CONTENT
|
| 225 |
+
):
|
| 226 |
+
actual_checksum = sync_helpers.prepare_checksum_digest(
|
| 227 |
+
checksum_object.digest()
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
if actual_checksum != expected_checksum:
|
| 231 |
+
msg = _CHECKSUM_MISMATCH.format(
|
| 232 |
+
self.media_url,
|
| 233 |
+
expected_checksum,
|
| 234 |
+
actual_checksum,
|
| 235 |
+
checksum_type=self.checksum.upper(),
|
| 236 |
+
)
|
| 237 |
+
raise common.DataCorruption(response, msg)
|
| 238 |
+
|
| 239 |
+
async def consume(self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT):
|
| 240 |
+
"""Consume the resource to be downloaded.
|
| 241 |
+
|
| 242 |
+
If a ``stream`` is attached to this download, then the downloaded
|
| 243 |
+
resource will be written to the stream.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 247 |
+
make authenticated requests.
|
| 248 |
+
timeout (Optional[Union[float, Tuple[float, float]]]):
|
| 249 |
+
The number of seconds to wait for the server response.
|
| 250 |
+
Depending on the retry strategy, a request may be repeated
|
| 251 |
+
several times using the same timeout each time.
|
| 252 |
+
Can also be passed as a tuple (connect_timeout, read_timeout).
|
| 253 |
+
See :meth:`requests.Session.request` documentation for details.
|
| 254 |
+
|
| 255 |
+
Returns:
|
| 256 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 257 |
+
|
| 258 |
+
Raises:
|
| 259 |
+
~google.resumable_media.common.DataCorruption: If the download's
|
| 260 |
+
checksum doesn't agree with server-computed checksum.
|
| 261 |
+
ValueError: If the current :class:`Download` has already
|
| 262 |
+
finished.
|
| 263 |
+
"""
|
| 264 |
+
method, url, payload, headers = self._prepare_request()
|
| 265 |
+
# NOTE: We assume "payload is None" but pass it along anyway.
|
| 266 |
+
result = await _request_helpers.http_request(
|
| 267 |
+
transport,
|
| 268 |
+
method,
|
| 269 |
+
url,
|
| 270 |
+
data=payload,
|
| 271 |
+
headers=headers,
|
| 272 |
+
retry_strategy=self._retry_strategy,
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
self._process_response(result)
|
| 276 |
+
|
| 277 |
+
if self._stream is not None:
|
| 278 |
+
await self._write_to_stream(result)
|
| 279 |
+
|
| 280 |
+
return result
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class ChunkedDownload(_request_helpers.RequestsMixin, _download.ChunkedDownload):
|
| 284 |
+
"""Download a resource in chunks from a Google API.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 288 |
+
chunk_size (int): The number of bytes to be retrieved in each
|
| 289 |
+
request.
|
| 290 |
+
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
|
| 291 |
+
will be used to concatenate chunks of the resource as they are
|
| 292 |
+
downloaded.
|
| 293 |
+
start (int): The first byte in a range to be downloaded. If not
|
| 294 |
+
provided, defaults to ``0``.
|
| 295 |
+
end (int): The last byte in a range to be downloaded. If not
|
| 296 |
+
provided, will download to the end of the media.
|
| 297 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 298 |
+
be sent with each request, e.g. headers for data encryption
|
| 299 |
+
key headers.
|
| 300 |
+
|
| 301 |
+
Attributes:
|
| 302 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 303 |
+
start (Optional[int]): The first byte in a range to be downloaded.
|
| 304 |
+
end (Optional[int]): The last byte in a range to be downloaded.
|
| 305 |
+
chunk_size (int): The number of bytes to be retrieved in each request.
|
| 306 |
+
|
| 307 |
+
Raises:
|
| 308 |
+
ValueError: If ``start`` is negative.
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
async def consume_next_chunk(
|
| 312 |
+
self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT
|
| 313 |
+
):
|
| 314 |
+
|
| 315 |
+
"""
|
| 316 |
+
Consume the next chunk of the resource to be downloaded.
|
| 317 |
+
|
| 318 |
+
Args:
|
| 319 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 320 |
+
make authenticated requests.
|
| 321 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 322 |
+
The number of seconds to wait for the server response.
|
| 323 |
+
Depending on the retry strategy, a request may be repeated
|
| 324 |
+
several times using the same timeout each time.
|
| 325 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 329 |
+
|
| 330 |
+
Raises:
|
| 331 |
+
ValueError: If the current download has finished.
|
| 332 |
+
"""
|
| 333 |
+
method, url, payload, headers = self._prepare_request()
|
| 334 |
+
# NOTE: We assume "payload is None" but pass it along anyway.
|
| 335 |
+
result = await _request_helpers.http_request(
|
| 336 |
+
transport,
|
| 337 |
+
method,
|
| 338 |
+
url,
|
| 339 |
+
data=payload,
|
| 340 |
+
headers=headers,
|
| 341 |
+
retry_strategy=self._retry_strategy,
|
| 342 |
+
timeout=timeout,
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
await self._process_response(result)
|
| 346 |
+
return result
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class RawChunkedDownload(_request_helpers.RawRequestsMixin, _download.ChunkedDownload):
|
| 350 |
+
"""Download a raw resource in chunks from a Google API.
|
| 351 |
+
|
| 352 |
+
Args:
|
| 353 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 354 |
+
chunk_size (int): The number of bytes to be retrieved in each
|
| 355 |
+
request.
|
| 356 |
+
stream (IO[bytes]): A write-able stream (i.e. file-like object) that
|
| 357 |
+
will be used to concatenate chunks of the resource as they are
|
| 358 |
+
downloaded.
|
| 359 |
+
start (int): The first byte in a range to be downloaded. If not
|
| 360 |
+
provided, defaults to ``0``.
|
| 361 |
+
end (int): The last byte in a range to be downloaded. If not
|
| 362 |
+
provided, will download to the end of the media.
|
| 363 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 364 |
+
be sent with each request, e.g. headers for data encryption
|
| 365 |
+
key headers.
|
| 366 |
+
|
| 367 |
+
Attributes:
|
| 368 |
+
media_url (str): The URL containing the media to be downloaded.
|
| 369 |
+
start (Optional[int]): The first byte in a range to be downloaded.
|
| 370 |
+
end (Optional[int]): The last byte in a range to be downloaded.
|
| 371 |
+
chunk_size (int): The number of bytes to be retrieved in each request.
|
| 372 |
+
|
| 373 |
+
Raises:
|
| 374 |
+
ValueError: If ``start`` is negative.
|
| 375 |
+
"""
|
| 376 |
+
|
| 377 |
+
async def consume_next_chunk(
|
| 378 |
+
self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT
|
| 379 |
+
):
|
| 380 |
+
"""Consume the next chunk of the resource to be downloaded.
|
| 381 |
+
|
| 382 |
+
Args:
|
| 383 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 384 |
+
make authenticated requests.
|
| 385 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 386 |
+
The number of seconds to wait for the server response.
|
| 387 |
+
Depending on the retry strategy, a request may be repeated
|
| 388 |
+
several times using the same timeout each time.
|
| 389 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 390 |
+
|
| 391 |
+
Returns:
|
| 392 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 393 |
+
|
| 394 |
+
Raises:
|
| 395 |
+
ValueError: If the current download has finished.
|
| 396 |
+
"""
|
| 397 |
+
method, url, payload, headers = self._prepare_request()
|
| 398 |
+
# NOTE: We assume "payload is None" but pass it along anyway.
|
| 399 |
+
result = await _request_helpers.http_request(
|
| 400 |
+
transport,
|
| 401 |
+
method,
|
| 402 |
+
url,
|
| 403 |
+
data=payload,
|
| 404 |
+
headers=headers,
|
| 405 |
+
retry_strategy=self._retry_strategy,
|
| 406 |
+
timeout=timeout,
|
| 407 |
+
)
|
| 408 |
+
await self._process_response(result)
|
| 409 |
+
return result
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def _add_decoder(response_raw, checksum):
|
| 413 |
+
"""Patch the ``_decoder`` on a ``urllib3`` response.
|
| 414 |
+
|
| 415 |
+
This is so that we can intercept the compressed bytes before they are
|
| 416 |
+
decoded.
|
| 417 |
+
|
| 418 |
+
Only patches if the content encoding is ``gzip``.
|
| 419 |
+
|
| 420 |
+
Args:
|
| 421 |
+
response_raw (urllib3.response.HTTPResponse): The raw response for
|
| 422 |
+
an HTTP request.
|
| 423 |
+
checksum (object):
|
| 424 |
+
A checksum which will be updated with compressed bytes.
|
| 425 |
+
|
| 426 |
+
Returns:
|
| 427 |
+
object: Either the original ``checksum`` if ``_decoder`` is not
|
| 428 |
+
patched, or a ``_DoNothingHash`` if the decoder is patched, since the
|
| 429 |
+
caller will no longer need to hash to decoded bytes.
|
| 430 |
+
"""
|
| 431 |
+
|
| 432 |
+
encoding = response_raw.headers.get("content-encoding", "").lower()
|
| 433 |
+
if encoding != "gzip":
|
| 434 |
+
return checksum
|
| 435 |
+
|
| 436 |
+
response_raw._decoder = _GzipDecoder(checksum)
|
| 437 |
+
return _helpers._DoNothingHash()
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
class _GzipDecoder(urllib3.response.GzipDecoder):
|
| 441 |
+
"""Custom subclass of ``urllib3`` decoder for ``gzip``-ed bytes.
|
| 442 |
+
|
| 443 |
+
Allows a checksum function to see the compressed bytes before they are
|
| 444 |
+
decoded. This way the checksum of the compressed value can be computed.
|
| 445 |
+
|
| 446 |
+
Args:
|
| 447 |
+
checksum (object):
|
| 448 |
+
A checksum which will be updated with compressed bytes.
|
| 449 |
+
"""
|
| 450 |
+
|
| 451 |
+
def __init__(self, checksum):
|
| 452 |
+
super(_GzipDecoder, self).__init__()
|
| 453 |
+
self._checksum = checksum
|
| 454 |
+
|
| 455 |
+
def decompress(self, data):
|
| 456 |
+
"""Decompress the bytes.
|
| 457 |
+
|
| 458 |
+
Args:
|
| 459 |
+
data (bytes): The compressed bytes to be decompressed.
|
| 460 |
+
|
| 461 |
+
Returns:
|
| 462 |
+
bytes: The decompressed bytes from ``data``.
|
| 463 |
+
"""
|
| 464 |
+
self._checksum.update(data)
|
| 465 |
+
return super(_GzipDecoder, self).decompress(data)
|
venv/lib/python3.10/site-packages/google/_async_resumable_media/requests/upload.py
ADDED
|
@@ -0,0 +1,515 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2017 Google Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""Support for resumable uploads.
|
| 16 |
+
|
| 17 |
+
Also supported here are simple (media) uploads and multipart
|
| 18 |
+
uploads that contain both metadata and a small file as payload.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
from google._async_resumable_media import _upload
|
| 23 |
+
from google._async_resumable_media.requests import _request_helpers
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class SimpleUpload(_request_helpers.RequestsMixin, _upload.SimpleUpload):
|
| 27 |
+
"""Upload a resource to a Google API.
|
| 28 |
+
|
| 29 |
+
A **simple** media upload sends no metadata and completes the upload
|
| 30 |
+
in a single request.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 34 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 35 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 36 |
+
|
| 37 |
+
Attributes:
|
| 38 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
async def transmit(
|
| 42 |
+
self,
|
| 43 |
+
transport,
|
| 44 |
+
data,
|
| 45 |
+
content_type,
|
| 46 |
+
timeout=_request_helpers._DEFAULT_TIMEOUT,
|
| 47 |
+
):
|
| 48 |
+
"""Transmit the resource to be uploaded.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 52 |
+
make authenticated requests.
|
| 53 |
+
data (bytes): The resource content to be uploaded.
|
| 54 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 55 |
+
image has content type ``image/jpeg``.
|
| 56 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 57 |
+
The number of seconds to wait for the server response.
|
| 58 |
+
Depending on the retry strategy, a request may be repeated
|
| 59 |
+
several times using the same timeout each time.
|
| 60 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 64 |
+
"""
|
| 65 |
+
method, url, payload, headers = self._prepare_request(data, content_type)
|
| 66 |
+
|
| 67 |
+
response = await _request_helpers.http_request(
|
| 68 |
+
transport,
|
| 69 |
+
method,
|
| 70 |
+
url,
|
| 71 |
+
data=payload,
|
| 72 |
+
headers=headers,
|
| 73 |
+
retry_strategy=self._retry_strategy,
|
| 74 |
+
timeout=timeout,
|
| 75 |
+
)
|
| 76 |
+
self._process_response(response)
|
| 77 |
+
return response
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class MultipartUpload(_request_helpers.RequestsMixin, _upload.MultipartUpload):
|
| 81 |
+
"""Upload a resource with metadata to a Google API.
|
| 82 |
+
|
| 83 |
+
A **multipart** upload sends both metadata and the resource in a single
|
| 84 |
+
(multipart) request.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 88 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 89 |
+
be sent with the request, e.g. headers for encrypted data.
|
| 90 |
+
checksum Optional([str]): The type of checksum to compute to verify
|
| 91 |
+
the integrity of the object. The request metadata will be amended
|
| 92 |
+
to include the computed value. Using this option will override a
|
| 93 |
+
manually-set checksum value. Supported values are "md5",
|
| 94 |
+
"crc32c" and None. The default is None.
|
| 95 |
+
|
| 96 |
+
Attributes:
|
| 97 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
async def transmit(
|
| 101 |
+
self,
|
| 102 |
+
transport,
|
| 103 |
+
data,
|
| 104 |
+
metadata,
|
| 105 |
+
content_type,
|
| 106 |
+
timeout=_request_helpers._DEFAULT_TIMEOUT,
|
| 107 |
+
):
|
| 108 |
+
"""Transmit the resource to be uploaded.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 112 |
+
make authenticated requests.
|
| 113 |
+
data (bytes): The resource content to be uploaded.
|
| 114 |
+
metadata (Mapping[str, str]): The resource metadata, such as an
|
| 115 |
+
ACL list.
|
| 116 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 117 |
+
image has content type ``image/jpeg``.
|
| 118 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 119 |
+
The number of seconds to wait for the server response.
|
| 120 |
+
Depending on the retry strategy, a request may be repeated
|
| 121 |
+
several times using the same timeout each time.
|
| 122 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 126 |
+
"""
|
| 127 |
+
method, url, payload, headers = self._prepare_request(
|
| 128 |
+
data, metadata, content_type
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
response = await _request_helpers.http_request(
|
| 132 |
+
transport,
|
| 133 |
+
method,
|
| 134 |
+
url,
|
| 135 |
+
data=payload,
|
| 136 |
+
headers=headers,
|
| 137 |
+
retry_strategy=self._retry_strategy,
|
| 138 |
+
timeout=timeout,
|
| 139 |
+
)
|
| 140 |
+
self._process_response(response)
|
| 141 |
+
return response
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class ResumableUpload(_request_helpers.RequestsMixin, _upload.ResumableUpload):
|
| 145 |
+
"""Initiate and fulfill a resumable upload to a Google API.
|
| 146 |
+
|
| 147 |
+
A **resumable** upload sends an initial request with the resource metadata
|
| 148 |
+
and then gets assigned an upload ID / upload URL to send bytes to.
|
| 149 |
+
Using the upload URL, the upload is then done in chunks (determined by
|
| 150 |
+
the user) until all bytes have been uploaded.
|
| 151 |
+
|
| 152 |
+
When constructing a resumable upload, only the resumable upload URL and
|
| 153 |
+
the chunk size are required:
|
| 154 |
+
|
| 155 |
+
.. testsetup:: resumable-constructor
|
| 156 |
+
|
| 157 |
+
bucket = 'bucket-foo'
|
| 158 |
+
|
| 159 |
+
.. doctest:: resumable-constructor
|
| 160 |
+
|
| 161 |
+
>>> from google.resumable_media.requests import ResumableUpload
|
| 162 |
+
>>>
|
| 163 |
+
>>> url_template = (
|
| 164 |
+
... 'https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?'
|
| 165 |
+
... 'uploadType=resumable')
|
| 166 |
+
>>> upload_url = url_template.format(bucket=bucket)
|
| 167 |
+
>>>
|
| 168 |
+
>>> chunk_size = 3 * 1024 * 1024 # 3MB
|
| 169 |
+
>>> upload = ResumableUpload(upload_url, chunk_size)
|
| 170 |
+
|
| 171 |
+
When initiating an upload (via :meth:`initiate`), the caller is expected
|
| 172 |
+
to pass the resource being uploaded as a file-like ``stream``. If the size
|
| 173 |
+
of the resource is explicitly known, it can be passed in directly:
|
| 174 |
+
|
| 175 |
+
.. testsetup:: resumable-explicit-size
|
| 176 |
+
|
| 177 |
+
import os
|
| 178 |
+
import tempfile
|
| 179 |
+
|
| 180 |
+
import mock
|
| 181 |
+
import requests
|
| 182 |
+
import http.client
|
| 183 |
+
|
| 184 |
+
from google.resumable_media.requests import ResumableUpload
|
| 185 |
+
|
| 186 |
+
upload_url = 'http://test.invalid'
|
| 187 |
+
chunk_size = 3 * 1024 * 1024 # 3MB
|
| 188 |
+
upload = ResumableUpload(upload_url, chunk_size)
|
| 189 |
+
|
| 190 |
+
file_desc, filename = tempfile.mkstemp()
|
| 191 |
+
os.close(file_desc)
|
| 192 |
+
|
| 193 |
+
data = b'some bytes!'
|
| 194 |
+
with open(filename, 'wb') as file_obj:
|
| 195 |
+
file_obj.write(data)
|
| 196 |
+
|
| 197 |
+
fake_response = requests.Response()
|
| 198 |
+
fake_response.status_code = int(http.client.OK)
|
| 199 |
+
fake_response._content = b''
|
| 200 |
+
resumable_url = 'http://test.invalid?upload_id=7up'
|
| 201 |
+
fake_response.headers['location'] = resumable_url
|
| 202 |
+
|
| 203 |
+
post_method = mock.Mock(return_value=fake_response, spec=[])
|
| 204 |
+
transport = mock.Mock(request=post_method, spec=['request'])
|
| 205 |
+
|
| 206 |
+
.. doctest:: resumable-explicit-size
|
| 207 |
+
|
| 208 |
+
>>> import os
|
| 209 |
+
>>>
|
| 210 |
+
>>> upload.total_bytes is None
|
| 211 |
+
True
|
| 212 |
+
>>>
|
| 213 |
+
>>> stream = open(filename, 'rb')
|
| 214 |
+
>>> total_bytes = os.path.getsize(filename)
|
| 215 |
+
>>> metadata = {'name': filename}
|
| 216 |
+
>>> response = upload.initiate(
|
| 217 |
+
... transport, stream, metadata, 'text/plain',
|
| 218 |
+
... total_bytes=total_bytes)
|
| 219 |
+
>>> response
|
| 220 |
+
<Response [200]>
|
| 221 |
+
>>>
|
| 222 |
+
>>> upload.total_bytes == total_bytes
|
| 223 |
+
True
|
| 224 |
+
|
| 225 |
+
.. testcleanup:: resumable-explicit-size
|
| 226 |
+
|
| 227 |
+
os.remove(filename)
|
| 228 |
+
|
| 229 |
+
If the stream is in a "final" state (i.e. it won't have any more bytes
|
| 230 |
+
written to it), the total number of bytes can be determined implicitly
|
| 231 |
+
from the ``stream`` itself:
|
| 232 |
+
|
| 233 |
+
.. testsetup:: resumable-implicit-size
|
| 234 |
+
|
| 235 |
+
import io
|
| 236 |
+
|
| 237 |
+
import mock
|
| 238 |
+
import requests
|
| 239 |
+
import http.client
|
| 240 |
+
|
| 241 |
+
from google.resumable_media.requests import ResumableUpload
|
| 242 |
+
|
| 243 |
+
upload_url = 'http://test.invalid'
|
| 244 |
+
chunk_size = 3 * 1024 * 1024 # 3MB
|
| 245 |
+
upload = ResumableUpload(upload_url, chunk_size)
|
| 246 |
+
|
| 247 |
+
fake_response = requests.Response()
|
| 248 |
+
fake_response.status_code = int(http.client.OK)
|
| 249 |
+
fake_response._content = b''
|
| 250 |
+
resumable_url = 'http://test.invalid?upload_id=7up'
|
| 251 |
+
fake_response.headers['location'] = resumable_url
|
| 252 |
+
|
| 253 |
+
post_method = mock.Mock(return_value=fake_response, spec=[])
|
| 254 |
+
transport = mock.Mock(request=post_method, spec=['request'])
|
| 255 |
+
|
| 256 |
+
data = b'some MOAR bytes!'
|
| 257 |
+
metadata = {'name': 'some-file.jpg'}
|
| 258 |
+
content_type = 'image/jpeg'
|
| 259 |
+
|
| 260 |
+
.. doctest:: resumable-implicit-size
|
| 261 |
+
|
| 262 |
+
>>> stream = io.BytesIO(data)
|
| 263 |
+
>>> response = upload.initiate(
|
| 264 |
+
... transport, stream, metadata, content_type)
|
| 265 |
+
>>>
|
| 266 |
+
>>> upload.total_bytes == len(data)
|
| 267 |
+
True
|
| 268 |
+
|
| 269 |
+
If the size of the resource is **unknown** when the upload is initiated,
|
| 270 |
+
the ``stream_final`` argument can be used. This might occur if the
|
| 271 |
+
resource is being dynamically created on the client (e.g. application
|
| 272 |
+
logs). To use this argument:
|
| 273 |
+
|
| 274 |
+
.. testsetup:: resumable-unknown-size
|
| 275 |
+
|
| 276 |
+
import io
|
| 277 |
+
|
| 278 |
+
import mock
|
| 279 |
+
import requests
|
| 280 |
+
import http.client
|
| 281 |
+
|
| 282 |
+
from google.resumable_media.requests import ResumableUpload
|
| 283 |
+
|
| 284 |
+
upload_url = 'http://test.invalid'
|
| 285 |
+
chunk_size = 3 * 1024 * 1024 # 3MB
|
| 286 |
+
upload = ResumableUpload(upload_url, chunk_size)
|
| 287 |
+
|
| 288 |
+
fake_response = requests.Response()
|
| 289 |
+
fake_response.status_code = int(http.client.OK)
|
| 290 |
+
fake_response._content = b''
|
| 291 |
+
resumable_url = 'http://test.invalid?upload_id=7up'
|
| 292 |
+
fake_response.headers['location'] = resumable_url
|
| 293 |
+
|
| 294 |
+
post_method = mock.Mock(return_value=fake_response, spec=[])
|
| 295 |
+
transport = mock.Mock(request=post_method, spec=['request'])
|
| 296 |
+
|
| 297 |
+
metadata = {'name': 'some-file.jpg'}
|
| 298 |
+
content_type = 'application/octet-stream'
|
| 299 |
+
|
| 300 |
+
stream = io.BytesIO(b'data')
|
| 301 |
+
|
| 302 |
+
.. doctest:: resumable-unknown-size
|
| 303 |
+
|
| 304 |
+
>>> response = upload.initiate(
|
| 305 |
+
... transport, stream, metadata, content_type,
|
| 306 |
+
... stream_final=False)
|
| 307 |
+
>>>
|
| 308 |
+
>>> upload.total_bytes is None
|
| 309 |
+
True
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
upload_url (str): The URL where the resumable upload will be initiated.
|
| 313 |
+
chunk_size (int): The size of each chunk used to upload the resource.
|
| 314 |
+
headers (Optional[Mapping[str, str]]): Extra headers that should
|
| 315 |
+
be sent with the :meth:`initiate` request, e.g. headers for
|
| 316 |
+
encrypted data. These **will not** be sent with
|
| 317 |
+
:meth:`transmit_next_chunk` or :meth:`recover` requests.
|
| 318 |
+
checksum Optional([str]): The type of checksum to compute to verify
|
| 319 |
+
the integrity of the object. After the upload is complete, the
|
| 320 |
+
server-computed checksum of the resulting object will be checked
|
| 321 |
+
and google.resumable_media.common.DataCorruption will be raised on
|
| 322 |
+
a mismatch. The corrupted file will not be deleted from the remote
|
| 323 |
+
host automatically. Supported values are "md5", "crc32c" and None.
|
| 324 |
+
The default is None.
|
| 325 |
+
|
| 326 |
+
Attributes:
|
| 327 |
+
upload_url (str): The URL where the content will be uploaded.
|
| 328 |
+
|
| 329 |
+
Raises:
|
| 330 |
+
ValueError: If ``chunk_size`` is not a multiple of
|
| 331 |
+
:data:`.UPLOAD_CHUNK_SIZE`.
|
| 332 |
+
"""
|
| 333 |
+
|
| 334 |
+
async def initiate(
|
| 335 |
+
self,
|
| 336 |
+
transport,
|
| 337 |
+
stream,
|
| 338 |
+
metadata,
|
| 339 |
+
content_type,
|
| 340 |
+
total_bytes=None,
|
| 341 |
+
stream_final=True,
|
| 342 |
+
timeout=_request_helpers._DEFAULT_TIMEOUT,
|
| 343 |
+
):
|
| 344 |
+
"""Initiate a resumable upload.
|
| 345 |
+
|
| 346 |
+
By default, this method assumes your ``stream`` is in a "final"
|
| 347 |
+
state ready to transmit. However, ``stream_final=False`` can be used
|
| 348 |
+
to indicate that the size of the resource is not known. This can happen
|
| 349 |
+
if bytes are being dynamically fed into ``stream``, e.g. if the stream
|
| 350 |
+
is attached to application logs.
|
| 351 |
+
|
| 352 |
+
If ``stream_final=False`` is used, :attr:`chunk_size` bytes will be
|
| 353 |
+
read from the stream every time :meth:`transmit_next_chunk` is called.
|
| 354 |
+
If one of those reads produces strictly fewer bites than the chunk
|
| 355 |
+
size, the upload will be concluded.
|
| 356 |
+
|
| 357 |
+
Args:
|
| 358 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 359 |
+
make authenticated requests.
|
| 360 |
+
stream (IO[bytes]): The stream (i.e. file-like object) that will
|
| 361 |
+
be uploaded. The stream **must** be at the beginning (i.e.
|
| 362 |
+
``stream.tell() == 0``).
|
| 363 |
+
metadata (Mapping[str, str]): The resource metadata, such as an
|
| 364 |
+
ACL list.
|
| 365 |
+
content_type (str): The content type of the resource, e.g. a JPEG
|
| 366 |
+
image has content type ``image/jpeg``.
|
| 367 |
+
total_bytes (Optional[int]): The total number of bytes to be
|
| 368 |
+
uploaded. If specified, the upload size **will not** be
|
| 369 |
+
determined from the stream (even if ``stream_final=True``).
|
| 370 |
+
stream_final (Optional[bool]): Indicates if the ``stream`` is
|
| 371 |
+
"final" (i.e. no more bytes will be added to it). In this case
|
| 372 |
+
we determine the upload size from the size of the stream. If
|
| 373 |
+
``total_bytes`` is passed, this argument will be ignored.
|
| 374 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 375 |
+
The number of seconds to wait for the server response.
|
| 376 |
+
Depending on the retry strategy, a request may be repeated
|
| 377 |
+
several times using the same timeout each time.
|
| 378 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 379 |
+
|
| 380 |
+
Returns:
|
| 381 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 382 |
+
"""
|
| 383 |
+
method, url, payload, headers = self._prepare_initiate_request(
|
| 384 |
+
stream,
|
| 385 |
+
metadata,
|
| 386 |
+
content_type,
|
| 387 |
+
total_bytes=total_bytes,
|
| 388 |
+
stream_final=stream_final,
|
| 389 |
+
)
|
| 390 |
+
response = await _request_helpers.http_request(
|
| 391 |
+
transport,
|
| 392 |
+
method,
|
| 393 |
+
url,
|
| 394 |
+
data=payload,
|
| 395 |
+
headers=headers,
|
| 396 |
+
retry_strategy=self._retry_strategy,
|
| 397 |
+
timeout=timeout,
|
| 398 |
+
)
|
| 399 |
+
self._process_initiate_response(response)
|
| 400 |
+
return response
|
| 401 |
+
|
| 402 |
+
async def transmit_next_chunk(
|
| 403 |
+
self, transport, timeout=_request_helpers._DEFAULT_TIMEOUT
|
| 404 |
+
):
|
| 405 |
+
"""Transmit the next chunk of the resource to be uploaded.
|
| 406 |
+
|
| 407 |
+
If the current upload was initiated with ``stream_final=False``,
|
| 408 |
+
this method will dynamically determine if the upload has completed.
|
| 409 |
+
The upload will be considered complete if the stream produces
|
| 410 |
+
fewer than :attr:`chunk_size` bytes when a chunk is read from it.
|
| 411 |
+
|
| 412 |
+
In the case of failure, an exception is thrown that preserves the
|
| 413 |
+
failed response:
|
| 414 |
+
|
| 415 |
+
.. testsetup:: bad-response
|
| 416 |
+
|
| 417 |
+
import io
|
| 418 |
+
|
| 419 |
+
import mock
|
| 420 |
+
import requests
|
| 421 |
+
import http.client
|
| 422 |
+
|
| 423 |
+
from google import resumable_media
|
| 424 |
+
import google.resumable_media.requests.upload as upload_mod
|
| 425 |
+
|
| 426 |
+
transport = mock.Mock(spec=['request'])
|
| 427 |
+
fake_response = requests.Response()
|
| 428 |
+
fake_response.status_code = int(http.client.BAD_REQUEST)
|
| 429 |
+
transport.request.return_value = fake_response
|
| 430 |
+
|
| 431 |
+
upload_url = 'http://test.invalid'
|
| 432 |
+
upload = upload_mod.ResumableUpload(
|
| 433 |
+
upload_url, resumable_media.UPLOAD_CHUNK_SIZE)
|
| 434 |
+
# Fake that the upload has been initiate()-d
|
| 435 |
+
data = b'data is here'
|
| 436 |
+
upload._stream = io.BytesIO(data)
|
| 437 |
+
upload._total_bytes = len(data)
|
| 438 |
+
upload._resumable_url = 'http://test.invalid?upload_id=nope'
|
| 439 |
+
|
| 440 |
+
.. doctest:: bad-response
|
| 441 |
+
:options: +NORMALIZE_WHITESPACE
|
| 442 |
+
|
| 443 |
+
>>> error = None
|
| 444 |
+
>>> try:
|
| 445 |
+
... upload.transmit_next_chunk(transport)
|
| 446 |
+
... except resumable_media.InvalidResponse as caught_exc:
|
| 447 |
+
... error = caught_exc
|
| 448 |
+
...
|
| 449 |
+
>>> error
|
| 450 |
+
InvalidResponse('Request failed with status code', 400,
|
| 451 |
+
'Expected one of', <HTTPStatus.OK: 200>, 308)
|
| 452 |
+
>>> error.response
|
| 453 |
+
<Response [400]>
|
| 454 |
+
|
| 455 |
+
Args:
|
| 456 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 457 |
+
make authenticated requests.
|
| 458 |
+
timeout (Optional[Union[float, aiohttp.ClientTimeout]]):
|
| 459 |
+
The number of seconds to wait for the server response.
|
| 460 |
+
Depending on the retry strategy, a request may be repeated
|
| 461 |
+
several times using the same timeout each time.
|
| 462 |
+
Can also be passed as an `aiohttp.ClientTimeout` object.
|
| 463 |
+
|
| 464 |
+
Returns:
|
| 465 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 466 |
+
|
| 467 |
+
Raises:
|
| 468 |
+
~google.resumable_media.common.InvalidResponse: If the status
|
| 469 |
+
code is not 200 or 308.
|
| 470 |
+
~google.resumable_media.common.DataCorruption: If this is the final
|
| 471 |
+
chunk, a checksum validation was requested, and the checksum
|
| 472 |
+
does not match or is not available.
|
| 473 |
+
"""
|
| 474 |
+
method, url, payload, headers = self._prepare_request()
|
| 475 |
+
response = await _request_helpers.http_request(
|
| 476 |
+
transport,
|
| 477 |
+
method,
|
| 478 |
+
url,
|
| 479 |
+
data=payload,
|
| 480 |
+
headers=headers,
|
| 481 |
+
retry_strategy=self._retry_strategy,
|
| 482 |
+
timeout=timeout,
|
| 483 |
+
)
|
| 484 |
+
await self._process_resumable_response(response, len(payload))
|
| 485 |
+
return response
|
| 486 |
+
|
| 487 |
+
async def recover(self, transport):
|
| 488 |
+
"""Recover from a failure.
|
| 489 |
+
|
| 490 |
+
This method should be used when a :class:`ResumableUpload` is in an
|
| 491 |
+
:attr:`~ResumableUpload.invalid` state due to a request failure.
|
| 492 |
+
|
| 493 |
+
This will verify the progress with the server and make sure the
|
| 494 |
+
current upload is in a valid state before :meth:`transmit_next_chunk`
|
| 495 |
+
can be used again.
|
| 496 |
+
|
| 497 |
+
Args:
|
| 498 |
+
transport (~requests.Session): A ``requests`` object which can
|
| 499 |
+
make authenticated requests.
|
| 500 |
+
|
| 501 |
+
Returns:
|
| 502 |
+
~requests.Response: The HTTP response returned by ``transport``.
|
| 503 |
+
"""
|
| 504 |
+
method, url, payload, headers = self._prepare_recover_request()
|
| 505 |
+
# NOTE: We assume "payload is None" but pass it along anyway.
|
| 506 |
+
response = await _request_helpers.http_request(
|
| 507 |
+
transport,
|
| 508 |
+
method,
|
| 509 |
+
url,
|
| 510 |
+
data=payload,
|
| 511 |
+
headers=headers,
|
| 512 |
+
retry_strategy=self._retry_strategy,
|
| 513 |
+
)
|
| 514 |
+
self._process_recover_response(response)
|
| 515 |
+
return response
|
venv/lib/python3.10/site-packages/google/api/__pycache__/annotations_pb2.cpython-310.pyc
ADDED
|
Binary file (1.44 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/auth_pb2.cpython-310.pyc
ADDED
|
Binary file (2.12 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/backend_pb2.cpython-310.pyc
ADDED
|
Binary file (2.16 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/billing_pb2.cpython-310.pyc
ADDED
|
Binary file (1.44 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/client_pb2.cpython-310.pyc
ADDED
|
Binary file (6.35 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/config_change_pb2.cpython-310.pyc
ADDED
|
Binary file (1.62 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/consumer_pb2.cpython-310.pyc
ADDED
|
Binary file (1.56 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/context_pb2.cpython-310.pyc
ADDED
|
Binary file (1.47 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/control_pb2.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/distribution_pb2.cpython-310.pyc
ADDED
|
Binary file (2.71 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/documentation_pb2.cpython-310.pyc
ADDED
|
Binary file (1.72 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/endpoint_pb2.cpython-310.pyc
ADDED
|
Binary file (1.31 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/error_reason_pb2.cpython-310.pyc
ADDED
|
Binary file (2.21 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/field_behavior_pb2.cpython-310.pyc
ADDED
|
Binary file (1.69 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/field_info_pb2.cpython-310.pyc
ADDED
|
Binary file (1.76 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/http_pb2.cpython-310.pyc
ADDED
|
Binary file (1.7 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/httpbody_pb2.cpython-310.pyc
ADDED
|
Binary file (1.39 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/label_pb2.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/launch_stage_pb2.cpython-310.pyc
ADDED
|
Binary file (1.35 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/log_pb2.cpython-310.pyc
ADDED
|
Binary file (1.43 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/logging_pb2.cpython-310.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/metric_pb2.cpython-310.pyc
ADDED
|
Binary file (3.24 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/monitored_resource_pb2.cpython-310.pyc
ADDED
|
Binary file (2.46 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/monitoring_pb2.cpython-310.pyc
ADDED
|
Binary file (1.55 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/policy_pb2.cpython-310.pyc
ADDED
|
Binary file (1.72 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/quota_pb2.cpython-310.pyc
ADDED
|
Binary file (2 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/resource_pb2.cpython-310.pyc
ADDED
|
Binary file (2.21 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/routing_pb2.cpython-310.pyc
ADDED
|
Binary file (1.6 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/service_pb2.cpython-310.pyc
ADDED
|
Binary file (4.29 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/source_info_pb2.cpython-310.pyc
ADDED
|
Binary file (1.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/system_parameter_pb2.cpython-310.pyc
ADDED
|
Binary file (1.63 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/usage_pb2.cpython-310.pyc
ADDED
|
Binary file (1.46 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/__pycache__/visibility_pb2.cpython-310.pyc
ADDED
|
Binary file (2.05 kB). View file
|
|
|
venv/lib/python3.10/site-packages/google/api/config_change_pb2.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
# Copyright 2025 Google LLC
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
| 18 |
+
# source: google/api/config_change.proto
|
| 19 |
+
# Protobuf Python Version: 4.25.3
|
| 20 |
+
"""Generated protocol buffer code."""
|
| 21 |
+
from google.protobuf import descriptor as _descriptor
|
| 22 |
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
| 23 |
+
from google.protobuf import symbol_database as _symbol_database
|
| 24 |
+
from google.protobuf.internal import builder as _builder
|
| 25 |
+
|
| 26 |
+
# @@protoc_insertion_point(imports)
|
| 27 |
+
|
| 28 |
+
_sym_db = _symbol_database.Default()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
|
| 32 |
+
b'\n\x1egoogle/api/config_change.proto\x12\ngoogle.api"\x97\x01\n\x0c\x43onfigChange\x12\x0f\n\x07\x65lement\x18\x01 \x01(\t\x12\x11\n\told_value\x18\x02 \x01(\t\x12\x11\n\tnew_value\x18\x03 \x01(\t\x12+\n\x0b\x63hange_type\x18\x04 \x01(\x0e\x32\x16.google.api.ChangeType\x12#\n\x07\x61\x64vices\x18\x05 \x03(\x0b\x32\x12.google.api.Advice"\x1d\n\x06\x41\x64vice\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t*O\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07REMOVED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x42q\n\x0e\x63om.google.apiB\x11\x43onfigChangeProtoP\x01ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\xa2\x02\x04GAPIb\x06proto3'
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
_globals = globals()
|
| 36 |
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
| 37 |
+
_builder.BuildTopDescriptorsAndMessages(
|
| 38 |
+
DESCRIPTOR, "google.api.config_change_pb2", _globals
|
| 39 |
+
)
|
| 40 |
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
| 41 |
+
_globals["DESCRIPTOR"]._options = None
|
| 42 |
+
_globals[
|
| 43 |
+
"DESCRIPTOR"
|
| 44 |
+
]._serialized_options = b"\n\016com.google.apiB\021ConfigChangeProtoP\001ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\242\002\004GAPI"
|
| 45 |
+
_globals["_CHANGETYPE"]._serialized_start = 231
|
| 46 |
+
_globals["_CHANGETYPE"]._serialized_end = 310
|
| 47 |
+
_globals["_CONFIGCHANGE"]._serialized_start = 47
|
| 48 |
+
_globals["_CONFIGCHANGE"]._serialized_end = 198
|
| 49 |
+
_globals["_ADVICE"]._serialized_start = 200
|
| 50 |
+
_globals["_ADVICE"]._serialized_end = 229
|
| 51 |
+
# @@protoc_insertion_point(module_scope)
|