code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
return _get_bucket_attribute(bucket, 'storageClass', 'StorageClass', retry_params=retry_params, _account_id=_account_id)
def get_storage_class(bucket, retry_params=None, _account_id=None)
Returns the storage class for the given bucket. https://cloud.google.com/storage/docs/storage-classes Args: bucket: A Google Cloud Storage bucket of form '/bucket'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. R...
3.450047
4.451004
0.775117
api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) common.validate_bucket_path(bucket) status, headers, content = api.get_bucket('%s?%s' % (bucket, query_param)) errors.check_status(status, [200], bucket, resp_headers=headers, body=cont...
def _get_bucket_attribute(bucket, query_param, xml_response_tag, retry_params=None, _account_id=None)
Helper method to request a bucket parameter and parse the response. Args: bucket: A Google Cloud Storage bucket of form '/bucket'. query_param: The query parameter to include in the get bucket request. xml_response_tag: The expected tag in the xml response. retry_params: An api_utils.RetryParams for ...
3.854723
3.786155
1.01811
common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) status, headers, content = api.head_object( api_utils._quote_filename(filename)) errors.check_status(status, [200], filename, resp_headers=heade...
def stat(filename, retry_params=None, _account_id=None)
Get GCSFileStat of a Google Cloud storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: a GCSFileStat object containing ...
3.703864
3.600425
1.02873
common.validate_file_path(src) common.validate_file_path(dst) if metadata is None: metadata = {} copy_meta = 'COPY' else: copy_meta = 'REPLACE' metadata.update({'x-goog-copy-source': src, 'x-goog-metadata-directive': copy_meta}) api = storage_api._get_storage_api(retry_pa...
def copy2(src, dst, metadata=None, retry_params=None)
Copy the file content from src to dst. Args: src: /bucket/filename dst: /bucket/filename metadata: a dict of metadata for this copy. If None, old metadata is copied. For example, {'x-goog-meta-foo': 'bar'}. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default ...
3.693357
3.591774
1.028282
if prefix: common.validate_bucket_path(path_prefix) bucket = path_prefix else: bucket, prefix = common._process_path_prefix(path_prefix) if marker and marker.startswith(bucket): marker = marker[len(bucket) + 1:] api = storage_api._get_storage_api(retry_params=retry_params, ...
def listbucket(path_prefix, marker=None, prefix=None, max_keys=None, delimiter=None, retry_params=None, _account_id=None)
Returns a GCSFileStat iterator over a bucket. Optional arguments can limit the result to a subset of files under bucket. This function has two modes: 1. List bucket mode: Lists all files in the bucket without any concept of hierarchy. GCS doesn't have real directory hierarchies. 2. Directory emulation mo...
2.426115
3.077791
0.788265
api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) if os.getenv('SERVER_SOFTWARE').startswith('Dev'): def _temp_func(file_list, destination_file, content_type): bucket = '/' + destination_file.split('/')[1] + '/' with open(...
def compose(list_of_files, destination_file, files_metadata=None, content_type=None, retry_params=None, _account_id=None)
Runs the GCS Compose on the given files. Merges between 2 and 32 files into one file. Composite files may even be built from other existing composites, provided that the total component count does not exceed 1024. See here for details: https://cloud.google.com/storage/docs/composite-objects Args: list_o...
3.441729
3.571709
0.963608
common.validate_file_path(destination_file) bucket = destination_file[0:(destination_file.index('/', 1) + 1)] try: if isinstance(file_list, types.StringTypes): raise TypeError list_len = len(file_list) except TypeError: raise TypeError('file_list must be a list') if list_len > number_of_...
def _validate_compose_list(destination_file, file_list, files_metadata=None, number_of_files=32)
Validates the file_list and merges the file_list, files_metadata. Args: destination: Path to the file (ie. /destination_bucket/destination_file). file_list: List of files to compose, see compose for details. files_metadata: Meta details for each file in the file_list. number_of_files: Maximum number ...
3.422943
3.371963
1.015119
for e in root.getiterator(common._T_CONTENTS): st_ctime, size, etag, key = None, None, None, None for child in e.getiterator('*'): if child.tag == common._T_LAST_MODIFIED: st_ctime = common.dt_str_to_posix(child.text) elif child.tag == common._T_ETAG: etag = chil...
def _next_file_gen(self, root)
Generator for next file element in the document. Args: root: root element of the XML tree. Yields: GCSFileStat for the next file.
3.397257
3.037824
1.118319
for e in root.getiterator(common._T_COMMON_PREFIXES): yield common.GCSFileStat( self._path + '/' + e.find(common._T_PREFIX).text, st_size=None, etag=None, st_ctime=None, is_dir=True) e.clear() yield None
def _next_dir_gen(self, root)
Generator for next directory element in the document. Args: root: root element in the XML tree. Yields: GCSFileStat for the next directory.
10.036906
7.393932
1.357452
if ('max-keys' in self._options and self._options['max-keys'] <= common._MAX_GET_BUCKET_RESULT): return False elements = self._find_elements( content, set([common._T_IS_TRUNCATED, common._T_NEXT_MARKER])) if elements.get(common._T_IS_TRUNCATED, 'false').lowe...
def _should_get_another_batch(self, content)
Whether to issue another GET bucket call. Args: content: response XML. Returns: True if should, also update self._options for the next request. False otherwise.
4.070587
3.385119
1.202495
element_mapping = {} result = StringIO.StringIO(result) for _, e in ET.iterparse(result, events=('end',)): if not elements: break if e.tag in elements: element_mapping[e.tag] = e.text elements.remove(e.tag) return element_mapping
def _find_elements(self, result, elements)
Find interesting elements from XML. This function tries to only look for specified elements without parsing the entire XML. The specified elements is better located near the beginning. Args: result: response XML. elements: a set of interesting element tags. Returns: A dict from ...
3.617611
3.193244
1.132895
self.response.write('Creating file %s\n' % filename) write_retry_params = gcs.RetryParams(backoff_factor=1.1) gcs_file = gcs.open(filename, 'w', content_type='text/plain', options={'x-goog-meta-foo': 'foo', ...
def create_file(self, filename)
Create a file. The retry_params specified in the open call will override the default retry params for this particular file handle. Args: filename: filename.
2.32423
2.505136
0.927786
self.response.write('Listbucket result:\n') page_size = 1 stats = gcs.listbucket(bucket + '/foo', max_keys=page_size) while True: count = 0 for stat in stats: count += 1 self.response.write(repr(stat)) self.response.write('\n') if count != page_size or co...
def list_bucket(self, bucket)
Create several files and paginate through them. Production apps should set page_size to a practical value. Args: bucket: bucket.
3.443981
3.45461
0.996923
with gcs.open(filename, 'w') as f: f.write('abcde\n') blobstore_filename = '/gs' + filename return blobstore.create_gs_key(blobstore_filename)
def CreateFile(filename)
Create a GCS file with GCS client lib. Args: filename: GCS filename. Returns: The corresponding string blobkey for this GCS file.
5.038405
5.055385
0.996641
rpc = app_identity.create_rpc() app_identity.make_get_access_token_call(rpc, scopes, service_account_id) token, expires_at = yield rpc raise ndb.Return((token, expires_at))
def _make_token_async(scopes, service_account_id)
Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch.
5.061475
4.384845
1.154311
def sync_wrapper(self, *args, **kwds): method = getattr(self, name) future = method(*args, **kwds) return future.get_result() return sync_wrapper
def _make_sync_method(name)
Helper to synthesize a synchronous method from an async method name. Used by the @add_sync_methods class decorator below. Args: name: The name of the synchronous method. Returns: A method (with first argument 'self') that retrieves and calls self.<name>, passing its own arguments, expects it to ret...
3.605031
2.816468
1.279983
for name in cls.__dict__.keys(): if name.endswith('_async'): sync_name = name[:-6] if not hasattr(cls, sync_name): setattr(cls, sync_name, _make_sync_method(name)) return cls
def add_sync_methods(cls)
Class decorator to add synchronous methods corresponding to async methods. This modifies the class in place, adding additional methods to it. If a synchronous method of a given name already exists it is not replaced. Args: cls: A class. Returns: The same class, modified in place.
2.732586
2.74543
0.995321
retry_wrapper = api_utils._RetryWrapper( self.retry_params, retriable_exceptions=api_utils._RETRIABLE_EXCEPTIONS, should_retry=api_utils._should_retry) resp = yield retry_wrapper.run( self.urlfetch_async, url=url, method=method, headers=headers, ...
def do_request_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None)
Issue one HTTP request. It performs async retries using tasklets. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to ...
2.846779
3.277542
0.868571
key = '%s,%s' % (self.service_account_id, ','.join(self.scopes)) ts = yield _AE_TokenStorage_.get_by_id_async( key, use_cache=True, use_memcache=self.retry_params.memcache_access_token, use_datastore=self.retry_params.save_access_token) if refresh or ts is None or ts.exp...
def get_token_async(self, refresh=False)
Get an authentication token. The token is cached in memcache, keyed by the scopes argument. Uses a random token expiration headroom value generated in the constructor to eliminate a burst of GET_ACCESS_TOKEN API requests. Args: refresh: If True, ignore a cached token; default False. Yields:...
3.604033
3.473031
1.03772
headers = {} if headers is None else dict(headers) headers.update(self.user_agent) try: self.token = yield self.get_token_async() except app_identity.InternalError, e: if os.environ.get('DATACENTER', '').endswith('sandman'): self.token = None logging.warning('Could not f...
def urlfetch_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None, follow_redirects=False)
Make an async urlfetch() call. This is an async wrapper around urlfetch(). It adds an authentication header. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which...
4.416503
4.719441
0.935811
length = headers.get('x-goog-stored-content-length') if length is None: length = headers.get('content-length') return length
def get_stored_content_length(headers)
Return the content length (in bytes) of the object as stored in GCS. x-goog-stored-content-length should always be present except when called via the local dev_appserver. Therefore if it is not present we default to the standard content-length header. Args: headers: a dict of headers from the http respons...
3.657306
2.891211
1.264974
return dict((k, v) for k, v in headers.iteritems() if any(k.lower().startswith(valid) for valid in _GCS_METADATA))
def get_metadata(headers)
Get user defined options from HTTP response headers.
6.434036
5.866315
1.096776
_validate_path(path_prefix) if not _GCS_PATH_PREFIX_REGEX.match(path_prefix): raise ValueError('Path prefix should have format /bucket, /bucket/, ' 'or /bucket/prefix but got %s.' % path_prefix) bucket_name_end = path_prefix.find('/', 1) bucket = path_prefix prefix = None if buck...
def _process_path_prefix(path_prefix)
Validate and process a Google Cloud Stoarge path prefix. Args: path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix' or '/bucket/' or '/bucket'. Raises: ValueError: if path is invalid. Returns: a tuple of /bucket and prefix. prefix can be None.
2.894457
2.686888
1.077253
if not path: raise ValueError('Path is empty') if not isinstance(path, basestring): raise TypeError('Path should be a string but is %s (%s).' % (path.__class__, path))
def _validate_path(path)
Basic validation of Google Storage paths. Args: path: a Google Storage path. It should have form '/bucket/filename' or '/bucket'. Raises: ValueError: if path is invalid. TypeError: if path is not of type basestring.
3.826698
3.547414
1.078729
if not options: return for k, v in options.iteritems(): if not isinstance(k, str): raise TypeError('option %r should be a str.' % k) if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS): raise ValueError('option %s is not supported.' % k) if not isinstance(v, basestring)...
def validate_options(options)
Validate Google Cloud Storage options. Args: options: a str->basestring dict of options to pass to Google Cloud Storage. Raises: ValueError: if option is not supported. TypeError: if option is not of type str or value of an option is not of type basestring.
3.370697
2.768247
1.217629
parsable, _ = dt_str.split('.') dt = datetime.datetime.strptime(parsable, _DT_FORMAT) return calendar.timegm(dt.utctimetuple())
def dt_str_to_posix(dt_str)
format str to posix. datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ, e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator between date and time when they are on the same line. Z indicates UTC (zero meridian). A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html This is used to parse...
4.912409
5.991446
0.819904
dt = datetime.datetime.utcfromtimestamp(posix) dt_str = dt.strftime(_DT_FORMAT) return dt_str + '.000Z'
def posix_to_dt_str(posix)
Reverse of str_to_datetime. This is used by GCS stub to generate GET bucket XML response. Args: posix: A float of secs from unix epoch. Returns: A datetime str.
3.906369
4.81231
0.811745
server_software = os.environ.get('SERVER_SOFTWARE') if server_software is None: return True if 'remote_api' in server_software: return False if server_software.startswith(('Development', 'testutil')): return True return False
def local_run()
Whether we should hit GCS dev appserver stub.
5.130584
3.593247
1.427841
def wrapper(*args, **kwargs): logging.info('Memory before method %s is %s.', method.__name__, runtime.memory_usage().current()) result = method(*args, **kwargs) logging.info('Memory after method %s is %s', method.__name__, runtime.memory_usage().current()) return r...
def memory_usage(method)
Log memory usage before and after a method.
2.701719
2.520803
1.071769
api = _StorageApi(_StorageApi.full_control_scope, service_account_id=account_id, retry_params=retry_params) # when running local unit tests, the service account is test@localhost # from google.appengine.api.app_identity.app_identity_stub.APP_SERVICE_ACCOUNT_NAME ser...
def _get_storage_api(retry_params, account_id=None)
Returns storage_api instance for API methods. Args: retry_params: An instance of api_utils.RetryParams. If none, thread's default will be used. account_id: Internal-use only. Returns: A storage_api instance to handle urlfetch work to GCS. On dev appserver, this instance will talk to a local s...
5.154644
4.571634
1.127528
if headers is None: headers = {} if 'x-goog-api-version' not in headers: headers['x-goog-api-version'] = '2' headers['accept-encoding'] = 'gzip, *' try: resp_tuple = yield super(_StorageApi, self).do_request_async( url, method=method, headers=headers, payload=payload, ...
def do_request_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None)
Inherit docs. This method translates urlfetch exceptions to more service specific ones.
2.800537
2.821513
0.992565
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def post_object_async(self, path, **kwds)
POST to an object.
5.456117
5.637441
0.967836
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def put_object_async(self, path, **kwds)
PUT an object.
5.949553
6.113973
0.973107
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def get_object_async(self, path, **kwds)
GET an object. Note: No payload argument is supported.
5.987063
6.904568
0.867116
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def delete_object_async(self, path, **kwds)
DELETE an object. Note: No payload argument is supported.
5.536346
7.445181
0.743615
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def head_object_async(self, path, **kwds)
HEAD an object. Depending on request headers, HEAD returns various object properties, e.g. Content-Length, Last-Modified, and ETag. Note: No payload argument is supported.
5.77278
8.221437
0.702162
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def get_bucket_async(self, path, **kwds)
GET a bucket.
6.618447
6.612217
1.000942
xml_setting_list = ['<ComposeRequest>'] for meta_data in file_list: xml_setting_list.append('<Component>') for key, val in meta_data.iteritems(): xml_setting_list.append('<%s>%s</%s>' % (key, val, key)) xml_setting_list.append('</Component>') xml_setting_list.append('</Compo...
def compose_object(self, file_list, destination_file, content_type)
COMPOSE multiple objects together. Using the given list of files, calls the put object with the compose flag. This call merges all the files into the destination file. Args: file_list: list of dicts with the file name. destination_file: Path to the destination file. content_type: Content...
3.321434
3.067461
1.082796
self._check_open() if size == 0 or not self._remaining(): return '' data_list = [] newline_offset = self._buffer.find_newline(size) while newline_offset < 0: data = self._buffer.read(size) size -= len(data) self._offset += len(data) data_list.append(data) if...
def readline(self, size=-1)
Read one line delimited by '\n' from the file. A trailing newline character is kept in the string. It may be absent when a file ends with an incomplete line. If the size argument is non-negative, it specifies the maximum string size (counting the newline) to return. A negative size is the same as unspe...
2.777022
2.867131
0.968572
self._check_open() if not self._remaining(): return '' data_list = [] while True: remaining = self._buffer.remaining() if size >= 0 and size < remaining: data_list.append(self._buffer.read(size)) self._offset += size break else: size -= remai...
def read(self, size=-1)
Read data from RAW file. Args: size: Number of bytes to read as integer. Actual number of bytes read is always equal to size unless EOF is reached. If size is negative or unspecified, read the entire file. Returns: data read as str. Raises: IOError: When this buffer is c...
3.068032
3.113316
0.985455
self._buffer_future = None next_offset = self._offset + self._buffer.remaining() if next_offset != self._file_size: self._buffer_future = self._get_segment(next_offset, self._buffer_size)
def _request_next_buffer(self)
Request next buffer. Requires self._offset and self._buffer are in consistent state.
4.973589
4.33041
1.148526
if not request_size: return [] end = start + request_size futures = [] while request_size > self._max_request_size: futures.append(self._get_segment(start, self._max_request_size)) request_size -= self._max_request_size start += self._max_request_size if start < end: ...
def _get_segments(self, start, request_size)
Get segments of the file from Google Storage as a list. A large request is broken into segments to avoid hitting urlfetch response size limit. Each segment is returned from a separate urlfetch. Args: start: start offset to request. Inclusive. Have to be within the range of the file. re...
2.292042
2.426096
0.944745
end = start + request_size - 1 content_range = '%d-%d' % (start, end) headers = {'Range': 'bytes=' + content_range} status, resp_headers, content = yield self._api.get_object_async( self._path, headers=headers) def _checker(): errors.check_status(status, [200, 206], self._path, he...
def _get_segment(self, start, request_size, check_response=True)
Get a segment of the file from Google Storage. Args: start: start offset of the segment. Inclusive. Have to be within the range of the file. request_size: number of bytes to request. Have to be small enough for a single urlfetch request. May go over the logical range of the file...
3.716991
3.835196
0.969179
if etag is None: return elif self._etag is None: self._etag = etag elif self._etag != etag: raise ValueError('File on GCS has changed while reading.')
def _check_etag(self, etag)
Check if etag is the same across requests to GCS. If self._etag is None, set it. If etag is set, check that the new etag equals the old one. In the __init__ method, we fire one HEAD and one GET request using ndb tasklet. One of them would return first and set the first value. Args: etag: et...
4.645007
4.335534
1.07138
self._check_open() self._buffer.reset() self._buffer_future = None if whence == os.SEEK_SET: self._offset = offset elif whence == os.SEEK_CUR: self._offset += offset elif whence == os.SEEK_END: self._offset = self._file_size + offset else: raise ValueError('Whe...
def seek(self, offset, whence=os.SEEK_SET)
Set the file's current offset. Note if the new offset is out of bound, it is adjusted to either 0 or EOF. Args: offset: seek offset as number. whence: seek mode. Supported modes are os.SEEK_SET (absolute seek), os.SEEK_CUR (seek relative to the current position), and os.SEEK_END (s...
2.522865
2.507135
1.006274
if size < 0: offset = len(self._buffer) else: offset = self._offset + size return self.read_to_offset(offset)
def read(self, size=-1)
Returns bytes from self._buffer and update related offsets. Args: size: number of bytes to read starting from current offset. Read the entire buffer if negative. Returns: Requested bytes from buffer.
4.657034
3.889977
1.197188
assert offset >= self._offset result = self._buffer[self._offset: offset] self._offset += len(result) return result
def read_to_offset(self, offset)
Returns bytes from self._buffer and update related offsets. Args: offset: read from current offset to this offset, exclusive. Returns: Requested bytes from buffer.
4.391972
3.69275
1.18935
if size < 0: return self._buffer.find('\n', self._offset) return self._buffer.find('\n', self._offset, self._offset + size)
def find_newline(self, size=-1)
Search for newline char in buffer starting from current offset. Args: size: number of bytes to search. -1 means all. Returns: offset of newline char in buffer. -1 if doesn't exist.
2.827333
2.647459
1.067942
self._check_open() if not isinstance(data, str): raise TypeError('Expected str but got %s.' % type(data)) if not data: return self._buffer.append(data) self._buffered += len(data) self._offset += len(data) if self._buffered >= self._flushsize: self._flush()
def write(self, data)
Write some bytes. Args: data: data to write. str. Raises: TypeError: if data is not of type str.
3.157082
3.195366
0.988019
if not self.closed: self.closed = True self._flush(finish=True) self._buffer = None
def close(self)
Flush the buffer and finalize the file. When this returns the new file is available for reading.
7.605606
7.033398
1.081356
while ((finish and self._buffered >= 0) or (not finish and self._buffered >= self._blocksize)): tmp_buffer = [] tmp_buffer_len = 0 excess = 0 while self._buffer: buf = self._buffer.popleft() size = len(buf) self._buffered -= size tmp_buffer.ap...
def _flush(self, finish=False)
Internal API to flush. Buffer is flushed to GCS only when the total amount of buffered data is at least self._blocksize, or to flush the final (incomplete) block of the file with finish=True.
2.915126
2.848913
1.023242
headers = {} end_offset = start_offset + len(data) - 1 if data: headers['content-range'] = ('bytes %d-%d/%s' % (start_offset, end_offset, file_len)) else: headers['content-range'] = ('bytes */%s' % file_len) status, response_headers, content = sel...
def _send_data(self, data, start_offset, file_len)
Send the block to the storage service. This is a utility method that does not modify self. Args: data: data to send in str. start_offset: start offset of the data in relation to the file. file_len: an int if this is the last data to append to the file. Otherwise '*'.
3.614523
3.467521
1.042394
headers = {'content-range': 'bytes */*'} status, response_headers, content = self._api.put_object( self._path_with_token, headers=headers) errors.check_status(status, [308], self._path, headers, response_headers, content, {'upload_path': self._pat...
def _get_offset_from_gcs(self)
Get the last offset that has been written to GCS. This is a utility method that does not modify self. Returns: an int of the last offset written to GCS by this upload, inclusive. -1 means nothing has been written.
5.102049
4.577492
1.114595
if file_length is None: file_length = self._get_offset_from_gcs() + 1 self._send_data('', 0, file_length)
def _force_close(self, file_length=None)
Close this buffer on file_length. Finalize this upload immediately on file_length. Contents that are still in memory will not be uploaded. This is a utility method that does not modify self. Args: file_length: file length. Must match what has been uploaded. If None, it will be queried f...
6.477224
6.40016
1.012041
import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 ...
def _extractall(self, path=".", members=None)
Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers().
2.487439
2.386001
1.042514
old_states = dict(self._cstate.states) self._cstate.start_next_cycle() self._request_method = None # self.their_http_version gets left alone, since it presumably lasts # beyond a single request/response cycle assert not self.client_is_waiting_for_100_continue ...
def start_next_cycle(self)
Attempt to reset our connection state for a new request/response cycle. If both client and server are in :data:`DONE` state, then resets them both to :data:`IDLE` state in preparation for a new request/response cycle on this same connection. Otherwise, raises a :exc:`LocalProtoc...
11.579619
10.009425
1.156872
if data: if self._receive_buffer_closed: raise RuntimeError( "received close, then received more data?") self._receive_buffer += data else: self._receive_buffer_closed = True
def receive_data(self, data)
Add data to our internal recieve buffer. This does not actually do any processing on the data, just stores it. To trigger processing, you have to call :meth:`next_event`. Args: data (:term:`bytes-like object`): The new data that was just received. S...
6.531369
6.36
1.026945
if self.their_state is ERROR: raise RemoteProtocolError( "Can't receive data when peer state is ERROR") try: event = self._extract_next_receive_event() if event not in [NEED_DATA, PAUSED]: self._process_event(self.their_role, ...
def next_event(self)
Parse the next event out of our receive buffer, update our internal state, and return it. This is a mutating operation -- think of it like calling :func:`next` on an iterator. Returns: : One of three things: 1) An event object -- see :ref:`events`. ...
6.114956
4.866131
1.256636
data_list = self.send_with_data_passthrough(event) if data_list is None: return None else: return b"".join(data_list)
def send(self, event)
Convert a high-level event into bytes that can be sent to the peer, while updating our internal state machine. Args: event: The :ref:`event <events>` to send. Returns: If ``type(event) is ConnectionClosed``, then returns ``None``. Otherwise, returns a :term:...
5.664944
5.609054
1.009964
if self.our_state is ERROR: raise LocalProtocolError( "Can't send data when our state is ERROR") try: if type(event) is Response: self._clean_up_response_headers_for_sending(event) # We want to call _process_event before callin...
def send_with_data_passthrough(self, event)
Identical to :meth:`send`, except that in situations where :meth:`send` returns a single :term:`bytes-like object`, this instead returns a list of them -- and when sending a :class:`Data` event, this list is guaranteed to contain the exact object you passed in as :attr:`Data.data`. See :...
7.061586
6.828357
1.034156
for xstart, ystart, xstep, ystep in adam7: if xstart >= width: continue yield ((xstart, y, xstep) for y in range(ystart, height, ystep))
def adam7_generate(width, height)
Generate the coordinates for the reduced scanlines of an Adam7 interlaced image of size `width` by `height` pixels. Yields a generator for each pass, and each pass generator yields a series of (x, y, xstep) triples, each one identifying a reduced scanline consisting of pixels starting at (x, y)...
5.106574
3.745416
1.36342
# None is the default and is allowed. if palette is None: return None p = list(palette) if not (0 < len(p) <= 256): raise ProtocolError( "a palette must have between 1 and 256 entries," " see https://www.w3.org/TR/PNG/#11PLTE") seen_triple = False f...
def check_palette(palette)
Check a palette argument (to the :class:`Writer` class) for validity. Returns the palette as a list if okay; raises an exception otherwise.
2.881241
2.867483
1.004798
if not size: return width, height if len(size) != 2: raise ProtocolError( "size argument should be a pair (width, height)") if width is not None and width != size[0]: raise ProtocolError( "size[0] (%r) and width (%r) should match when both are used." ...
def check_sizes(size, width, height)
Check that these arguments, if supplied, are consistent. Return a (width, height) pair.
2.306082
2.121224
1.087147
if c is None: return c if greyscale: try: len(c) except TypeError: c = (c,) if len(c) != 1: raise ProtocolError("%s for greyscale must be 1-tuple" % which) if not is_natural(c[0]): raise ProtocolError( ...
def check_color(c, greyscale, which)
Checks that a colour argument for transparent or background options is the right form. Returns the colour (which, if it's a bare integer, is "corrected" to a 1-tuple).
2.536611
2.36282
1.073552
data = bytes(data) # http://www.w3.org/TR/PNG/#5Chunk-layout outfile.write(struct.pack("!I", len(data))) outfile.write(tag) outfile.write(data) checksum = zlib.crc32(tag) checksum = zlib.crc32(data, checksum) checksum &= 2 ** 32 - 1 outfile.write(struct.pack("!I", checksum))
def write_chunk(outfile, tag, data=b'')
Write a PNG chunk to the output file, including length and checksum.
2.464453
2.053153
1.200326
out.write(signature) for chunk in chunks: write_chunk(out, *chunk)
def write_chunks(out, chunks)
Create a PNG file by writing out the chunks.
5.476696
5.100472
1.073763
# One factor for each channel fs = [float(2 ** s[1] - 1)/float(2 ** s[0] - 1) for s in rescale] # Assume all target_bitdepths are the same target_bitdepths = set(s[1] for s in rescale) assert len(target_bitdepths) == 1 (target_bitdepth, ) = target_bitdepths typecode = 'BH'[t...
def rescale_rows(rows, rescale)
Take each row in rows (an iterator) and yield a fresh row with the pixels scaled according to the rescale parameters in the list `rescale`. Each element of `rescale` is a tuple of (source_bitdepth, target_bitdepth), with one element per channel.
3.494182
2.853627
1.224471
assert bitdepth < 8 assert 8 % bitdepth == 0 # samples per byte spb = int(8 / bitdepth) def make_byte(block): res = 0 for v in block: res = (res << bitdepth) + v return res for row in rows: a = bytearray(row) # Adding padding...
def pack_rows(rows, bitdepth)
Yield packed rows that are a byte array. Each byte is packed with the values from several pixels.
5.232723
4.983342
1.050043
for row in rows: fmt = '!%dH' % len(row) yield bytearray(struct.pack(fmt, *row))
def unpack_rows(rows)
Unpack each row from being 16-bits per value, to being a sequence of bytes.
4.60766
4.100169
1.123773
p = bytearray() t = bytearray() for x in palette: p.extend(x[0:3]) if len(x) > 3: t.append(x[3]) if t: return p, t return p, None
def make_palette_chunks(palette)
Create the byte sequences for a ``PLTE`` and if necessary a ``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be ``None`` if no ``tRNS`` chunk is necessary.
3.530527
2.637214
1.338734
if palette: if len(bitdepth) != 1: raise ProtocolError( "with palette, only a single bitdepth may be used") (bitdepth, ) = bitdepth if bitdepth not in (1, 2, 4, 8): raise ProtocolError( "with palette, bitdepth must be 1, 2, 4, or ...
def check_bitdepth_rescale( palette, bitdepth, transparent, alpha, greyscale)
Returns (bitdepth, rescale) pair.
2.898846
2.905487
0.997714
# Currently, with no max_length parameter to decompress, # this routine will do one yield per IDAT chunk: Not very # incremental. d = zlib.decompressobj() # Each IDAT chunk is passed to the decompressor, then any # remaining state is decompressed out. for data in data_blocks: #...
def decompress(data_blocks)
`data_blocks` should be an iterable that yields the compressed data (from the ``IDAT`` chunks). This yields decompressed byte strings.
11.327638
10.320704
1.097564
if bitdepth not in (1, 2, 4, 8, 16): raise FormatError("invalid bit depth %d" % bitdepth) if colortype not in (0, 2, 3, 4, 6): raise FormatError("invalid colour type %d" % colortype) # Check indexed (palettized) images have 8 or fewer bits # per pixel; check only indexed or greysca...
def check_bitdepth_colortype(bitdepth, colortype)
Check that `bitdepth` and `colortype` are both valid, and specified in a valid combination. Returns (None) if valid, raise an Exception if not valid.
2.708316
2.789125
0.971027
try: is_integer = int(x) == x except (TypeError, ValueError): return False return is_integer and x >= 0
def is_natural(x)
A non-negative integer.
3.162153
3.178486
0.994861
ai = 0 # Loops starts at index fu. Observe that the initial part # of the result is already filled in correctly with # scanline. for i in range(filter_unit, len(result)): x = scanline[i] a = result[ai] result[i] = (x + a) & 0xff ai += 1
def undo_filter_sub(filter_unit, scanline, previous, result)
Undo sub filter.
7.730202
7.585195
1.019117
for i in range(len(result)): x = scanline[i] b = previous[i] result[i] = (x + b) & 0xff
def undo_filter_up(filter_unit, scanline, previous, result)
Undo up filter.
3.334425
3.262263
1.02212
ai = -filter_unit for i in range(len(result)): x = scanline[i] if ai < 0: a = 0 else: a = result[ai] b = previous[i] result[i] = (x + ((a + b) >> 1)) & 0xff ai += 1
def undo_filter_average(filter_unit, scanline, previous, result)
Undo up filter.
3.350786
3.274351
1.023344
# Also used for ci. ai = -filter_unit for i in range(len(result)): x = scanline[i] if ai < 0: a = c = 0 else: a = result[ai] c = previous[ai] b = previous[i] p = a + b - c pa = abs(p - a) pb = abs(p - b) ...
def undo_filter_paeth(filter_unit, scanline, previous, result)
Undo Paeth filter.
2.70942
2.638993
1.026687
# First there is a Python3 issue. try: stdout = sys.stdout.buffer except AttributeError: # Probably Python 2, where bytes are strings. stdout = sys.stdout # On Windows the C runtime file orientation needs changing. if sys.platform == "win32": import msvcrt ...
def binary_stdout()
A sys.stdout that accepts bytes.
5.605894
5.073099
1.105024
# Values per row vpr = self.width * self.planes def check_rows(rows): for i, row in enumerate(rows): try: wrong_length = len(row) != vpr except TypeError: # When using an itertools.ichain ...
def write(self, outfile, rows)
Write a PNG image to the output file. `rows` should be an iterable that yields each row (each row is a sequence of values). The rows should be the rows of the original image, so there should be ``self.height`` rows of ``self.width * self.planes`` values. If `interlace` is...
5.553526
4.838789
1.14771
# Ensure rows are scaled (to 4-/8-/16-bit), # and packed into bytes. if self.rescale: rows = rescale_rows(rows, self.rescale) if self.bitdepth < 8: rows = pack_rows(rows, self.bitdepth) elif self.bitdepth == 16: rows = unpack_rows(r...
def write_passes(self, outfile, rows)
Write a PNG image to the output file. Most users are expected to find the :meth:`write` or :meth:`write_array` method more convenient. The rows should be given to this method in the order that they appear in the output file. For straightlaced images, this is the usual top to bo...
4.729245
4.713798
1.003277
self.write_preamble(outfile) # http://www.w3.org/TR/PNG/#11IDAT if self.compression is not None: compressor = zlib.compressobj(self.compression) else: compressor = zlib.compressobj() # data accumulates bytes to be compressed for the IDAT chunk;...
def write_packed(self, outfile, rows)
Write PNG file to `outfile`. `rows` should be an iterator that yields each packed row; a packed row being a sequence of packed bytes. The rows have a filter byte prefixed and are then compressed into one or more IDAT chunks. They are not processed any further, so if bitd...
4.73825
4.507591
1.051171
if self.interlace: if type(pixels) != array: # Coerce to array type fmt = 'BH'[self.bitdepth > 8] pixels = array(fmt, pixels) self.write_passes(outfile, self.array_scanlines_interlace(pixels)) else: self.write_...
def write_array(self, outfile, pixels)
Write an array that holds all the image values as a PNG file on the output file. See also :meth:`write` method.
4.990457
4.803427
1.038937
# Values per row vpr = self.width * self.planes stop = 0 for y in range(self.height): start = stop stop = start + vpr yield pixels[start:stop]
def array_scanlines(self, pixels)
Generates rows (each a sequence of values) from a single array of values.
5.373328
4.974947
1.080077
# http://www.w3.org/TR/PNG/#8InterlaceMethods # Array type. fmt = 'BH'[self.bitdepth > 8] # Value per row vpr = self.width * self.planes # Each iteration generates a scanline starting at (x, y) # and consisting of every xstep pixels. for lines i...
def array_scanlines_interlace(self, pixels)
Generator for interlaced scanlines from an array. `pixels` is the full source image as a single array of values. The generator yields each scanline of the reduced passes in turn, each scanline being a sequence of values.
5.691334
5.576667
1.020562
w = Writer(**self.info) with open(file, 'wb') as fd: w.write(fd, self.rows)
def save(self, file)
Save the image to the named *file*. See `.write()` if you already have an open file object. In general, you can only call this method once; after it has been called the first time the PNG image is written, the source data will have been streamed, and cannot be streamed again.
8.073407
8.311494
0.971355
w = Writer(**self.info) w.write(file, self.rows)
def write(self, file)
Write the image to the open file object. See `.save()` if you have a filename. In general, you can only call this method once; after it has been called the first time the PNG image is written, the source data will have been streamed, and cannot be streamed again.
16.045704
17.060797
0.940501
self.validate_signature() # http://www.w3.org/TR/PNG/#5Chunk-layout if not self.atchunk: self.atchunk = self._chunk_len_type() if not self.atchunk: raise ChunkError("No more chunks.") length, type = self.atchunk self.atchunk = None ...
def chunk(self, lenient=False)
Read the next PNG chunk from the input file; returns a (*type*, *data*) tuple. *type* is the chunk's type as a byte string (all PNG chunk types are 4 bytes long). *data* is the chunk's data content, as a byte string. If the optional `lenient` argument evaluates to `True`, ...
3.475635
3.180253
1.09288
while True: t, v = self.chunk() yield t, v if t == b'IEND': break
def chunks(self)
Return an iterator that will yield each chunk as a (*chunktype*, *content*) pair.
10.64927
7.14708
1.490017
# :todo: Would it be better to update scanline in place? result = scanline if filter_type == 0: return result if filter_type not in (1, 2, 3, 4): raise FormatError( 'Invalid PNG Filter Type. ' 'See http://www.w3.org/TR/...
def undo_filter(self, filter_type, scanline, previous)
Undo the filter for a scanline. `scanline` is a sequence of bytes that does not include the initial filter type byte. `previous` is decoded previous scanline (for straightlaced images this is the previous pixel row, but for interlaced images, it is the previous scanline i...
6.213413
6.324096
0.982498
# Values per row (of the target image) vpr = self.width * self.planes # Values per image vpi = vpr * self.height # Interleaving writes to the output array randomly # (well, not quite), so the entire output array must be in memory. # Make a result array,...
def _deinterlace(self, raw)
Read raw pixel data, undo filters, deinterlace, and flatten. Return a single array of values.
5.163463
4.919772
1.049533
if self.bitdepth == 8: return bytearray(bs) if self.bitdepth == 16: return array('H', struct.unpack('!%dH' % (len(bs) // 2), bs)) assert self.bitdepth < 8 if width is None: width = self.width # Samples per by...
def _bytes_to_values(self, bs, width=None)
Convert a packed row of bytes into a row of values. Result will be a freshly allocated object, not shared with the argument.
3.236434
3.228019
1.002607
# length of row, in bytes rb = self.row_bytes a = bytearray() # The previous (reconstructed) scanline. # None indicates first line of image. recon = None for some_bytes in byte_blocks: a.extend(some_bytes) while len(a) >= rb + 1: ...
def _iter_straight_packed(self, byte_blocks)
Iterator that undoes the effect of filtering; yields each row as a sequence of packed bytes. Assumes input is straightlaced. `byte_blocks` should be an iterable that yields the raw bytes in blocks of arbitrary size.
7.794652
7.373907
1.057059
self.validate_signature() while True: if not self.atchunk: self.atchunk = self._chunk_len_type() if self.atchunk is None: raise FormatError('This PNG file has no IDAT chunks.') if self.atchunk[1] == b'IDAT': ...
def preamble(self, lenient=False)
Extract the image metadata by reading the initial part of the PNG file up to the start of the ``IDAT`` chunk. All the chunks that precede the ``IDAT`` chunk are read and either processed for metadata or discarded. If the optional `lenient` argument evaluates to `True`, c...
7.142844
5.767161
1.238537
x = self.file.read(8) if not x: return None if len(x) != 8: raise FormatError( 'End of file whilst reading chunk length and type.') length, type = struct.unpack('!I4s', x) if length > 2 ** 31 - 1: raise FormatError('Ch...
def _chunk_len_type(self)
Reads just enough of the input to determine the next chunk's length and type; return a (*length*, *type*) pair where *type* is a byte sequence. If there are no more chunks, ``None`` is returned.
3.831939
3.373599
1.135861
type, data = self.chunk(lenient=lenient) method = '_process_' + type.decode('ascii') m = getattr(self, method, None) if m: m(data)
def process_chunk(self, lenient=False)
Process the next chunk and its data. This only processes the following chunk types: ``IHDR``, ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``. All other chunk types are ignored. If the optional `lenient` argument evaluates to `True`, checksum failures will raise warni...
5.063302
4.682735
1.08127
def iteridat(): while True: type, data = self.chunk(lenient=lenient) if type == b'IEND': # http://www.w3.org/TR/PNG/#11IEND break if type != b'IDAT': continue ...
def read(self, lenient=False)
Read the PNG file and decode it. Returns (`width`, `height`, `rows`, `info`). May use excessive memory. `rows` is a sequence of rows; each row is a sequence of values. If the optional `lenient` argument evaluates to True, checksum failures will raise warnings rather th...
4.608234
4.354334
1.05831
width, height, pixels, info = self.asDirect() if info['alpha']: raise Error("will not convert image with alpha channel to RGB") if not info['greyscale']: return width, height, pixels, info info['greyscale'] = False info['planes'] = 3 if ...
def asRGB(self)
Return image as RGB pixels. RGB colour images are passed through unchanged; greyscales are expanded into RGB triplets (there is a small speed overhead for doing this). An alpha channel in the source image will raise an exception. The return values are as for the :meth:`read` me...
4.69467
4.256019
1.103066
width, height, pixels, info = self.asDirect() if info['alpha'] and not info['greyscale']: return width, height, pixels, info typecode = 'BH'[info['bitdepth'] > 8] maxval = 2**info['bitdepth'] - 1 maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width ...
def asRGBA(self)
Return image as RGBA pixels. Greyscales are expanded into RGB triplets; an alpha channel is synthesized if necessary. The return values are as for the :meth:`read` method except that the *info* reflect the returned pixels, not the source image. In particular, for this method ...
3.490437
3.420701
1.020387
import re import binascii # Remove all non-hexadecimal digits s = re.sub(br'[^a-fA-F\d]', b'', s) # binscii.unhexlify works in Python 2 and Python 3 (unlike # thing.decode('hex')). return binascii.unhexlify(s)
def _dehex(s)
Liberally convert from hex string to binary string.
7.33954
7.009093
1.047145