docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance.
def describe_field(field_definition): field_descriptor = FieldDescriptor() field_descriptor.name = field_definition.name field_descriptor.number = field_definition.number field_descriptor.variant = field_definition.variant if isinstance(field_definition, messages.EnumField): field_descriptor.type_name = field_definition.type.definition_name() if isinstance(field_definition, messages.MessageField): field_descriptor.type_name = ( field_definition.message_type.definition_name()) if field_definition.default is not None: field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[ type(field_definition)](field_definition.default) # Set label. if field_definition.repeated: field_descriptor.label = FieldDescriptor.Label.REPEATED elif field_definition.required: field_descriptor.label = FieldDescriptor.Label.REQUIRED else: field_descriptor.label = FieldDescriptor.Label.OPTIONAL return field_descriptor
388,309
Build descriptor for Message class. Args: message_definition: Message class to provide descriptor for. Returns: Initialized MessageDescriptor instance describing the Message class.
def describe_message(message_definition): message_descriptor = MessageDescriptor() message_descriptor.name = message_definition.definition_name().split( '.')[-1] fields = sorted(message_definition.all_fields(), key=lambda v: v.number) if fields: message_descriptor.fields = [describe_field(field) for field in fields] try: nested_messages = message_definition.__messages__ except AttributeError: pass else: message_descriptors = [] for name in nested_messages: value = getattr(message_definition, name) message_descriptors.append(describe_message(value)) message_descriptor.message_types = message_descriptors try: nested_enums = message_definition.__enums__ except AttributeError: pass else: enum_descriptors = [] for name in nested_enums: value = getattr(message_definition, name) enum_descriptors.append(describe_enum(value)) message_descriptor.enum_types = enum_descriptors return message_descriptor
388,310
Build a file from a specified Python module. Args: module: Python module to describe. Returns: Initialized FileDescriptor instance describing the module.
def describe_file(module): descriptor = FileDescriptor() descriptor.package = util.get_package_for_module(module) if not descriptor.package: descriptor.package = None message_descriptors = [] enum_descriptors = [] # Need to iterate over all top level attributes of the module looking for # message and enum definitions. Each definition must be itself described. for name in sorted(dir(module)): value = getattr(module, name) if isinstance(value, type): if issubclass(value, messages.Message): message_descriptors.append(describe_message(value)) elif issubclass(value, messages.Enum): enum_descriptors.append(describe_enum(value)) if message_descriptors: descriptor.message_types = message_descriptors if enum_descriptors: descriptor.enum_types = enum_descriptors return descriptor
388,311
Build a file set from a specified Python modules. Args: modules: Iterable of Python module to describe. Returns: Initialized FileSet instance describing the modules.
def describe_file_set(modules): descriptor = FileSet() file_descriptors = [] for module in modules: file_descriptors.append(describe_file(module)) if file_descriptors: descriptor.files = file_descriptors return descriptor
388,312
Describe any value as a descriptor. Helper function for describing any object with an appropriate descriptor object. Args: value: Value to describe as a descriptor. Returns: Descriptor message class if object is describable as a descriptor, else None.
def describe(value): if isinstance(value, types.ModuleType): return describe_file(value) elif isinstance(value, messages.Field): return describe_field(value) elif isinstance(value, messages.Enum): return describe_enum_value(value) elif isinstance(value, type): if issubclass(value, messages.Message): return describe_message(value) elif issubclass(value, messages.Enum): return describe_enum(value) return None
388,313
Constructor. Args: descriptors: A dictionary or dictionary-like object that can be used to store and cache descriptors by definition name. definition_loader: A function used for resolving missing descriptors. The function takes a definition name as its parameter and returns an appropriate descriptor. It may raise DefinitionNotFoundError.
def __init__(self, descriptors=None, descriptor_loader=import_descriptor_loader): self.__descriptor_loader = descriptor_loader self.__descriptors = descriptors or {}
388,315
Lookup descriptor by name. Get descriptor from library by name. If descriptor is not found will attempt to find via descriptor loader if provided. Args: definition_name: Definition name to find. Returns: Descriptor that describes definition name. Raises: DefinitionNotFoundError if not descriptor exists for definition name.
def lookup_descriptor(self, definition_name): try: return self.__descriptors[definition_name] except KeyError: pass if self.__descriptor_loader: definition = self.__descriptor_loader(definition_name) self.__descriptors[definition_name] = definition return definition else: raise messages.DefinitionNotFoundError( 'Could not find definition for %s' % definition_name)
388,316
Determines the package name for any definition. Determine the package that any definition name belongs to. May check parent for package name and will resolve missing descriptors if provided descriptor loader. Args: definition_name: Definition name to find package for.
def lookup_package(self, definition_name): while True: descriptor = self.lookup_descriptor(definition_name) if isinstance(descriptor, FileDescriptor): return descriptor.package else: index = definition_name.rfind('.') if index < 0: return None definition_name = definition_name[:index]
388,317
Constructor. Args: protojson_protocol: ProtoJson instance.
def __init__(self, protojson_protocol=None, **kwargs): super(MessageJSONEncoder, self).__init__(**kwargs) self.__protojson_protocol = ( protojson_protocol or ProtoJson.get_default())
388,319
Return dictionary instance from a message object. Args: value: Value to get dictionary for. If not encodable, will call superclasses default method.
def default(self, value): if isinstance(value, messages.Enum): return str(value) if six.PY3 and isinstance(value, bytes): return value.decode('utf8') if isinstance(value, messages.Message): result = {} for field in value.all_fields(): item = value.get_assigned_value(field.name) if item not in (None, [], ()): result[field.name] = ( self.__protojson_protocol.encode_field(field, item)) # Handle unrecognized fields, so they're included when a message is # decoded then encoded. for unknown_key in value.all_unrecognized_fields(): unrecognized_field, _ = value.get_unrecognized_field_info( unknown_key) # Unknown fields are not encoded as they should have been # processed before we get to here. result[unknown_key] = unrecognized_field return result return super(MessageJSONEncoder, self).default(value)
388,320
Encode a python field value to a JSON value. Args: field: A ProtoRPC field instance. value: A python value supported by field. Returns: A JSON serializable value appropriate for field.
def encode_field(self, field, value): if isinstance(field, messages.BytesField): if field.repeated: value = [base64.b64encode(byte) for byte in value] else: value = base64.b64encode(value) elif isinstance(field, message_types.DateTimeField): # DateTimeField stores its data as a RFC 3339 compliant string. if field.repeated: value = [i.isoformat() for i in value] else: value = value.isoformat() return value
388,321
Encode Message instance to JSON string. Args: Message instance to encode in to JSON string. Returns: String encoding of Message instance in protocol JSON format. Raises: messages.ValidationError if message is not initialized.
def encode_message(self, message): message.check_initialized() return json.dumps(message, cls=MessageJSONEncoder, protojson_protocol=self)
388,322
Merge JSON structure to Message instance. Args: message_type: Message to decode data to. encoded_message: JSON encoded version of message. Returns: Decoded instance of message_type. Raises: ValueError: If encoded_message is not valid JSON. messages.ValidationError if merged message is not initialized.
def decode_message(self, message_type, encoded_message): encoded_message = six.ensure_str(encoded_message) if not encoded_message.strip(): return message_type() dictionary = json.loads(encoded_message) message = self.__decode_dictionary(message_type, dictionary) message.check_initialized() return message
388,323
Find the messages.Variant type that describes this value. Args: value: The value whose variant type is being determined. Returns: The messages.Variant value that best describes value's type, or None if it's a type we don't know how to handle.
def __find_variant(self, value): if isinstance(value, bool): return messages.Variant.BOOL elif isinstance(value, six.integer_types): return messages.Variant.INT64 elif isinstance(value, float): return messages.Variant.DOUBLE elif isinstance(value, six.string_types): return messages.Variant.STRING elif isinstance(value, (list, tuple)): # Find the most specific variant that covers all elements. variant_priority = [None, messages.Variant.INT64, messages.Variant.DOUBLE, messages.Variant.STRING] chosen_priority = 0 for v in value: variant = self.__find_variant(v) try: priority = variant_priority.index(variant) except IndexError: priority = -1 if priority > chosen_priority: chosen_priority = priority return variant_priority[chosen_priority] # Unrecognized type. return None
388,324
Merge dictionary in to message. Args: message: Message to merge dictionary in to. dictionary: Dictionary to extract information from. Dictionary is as parsed from JSON. Nested objects will also be dictionaries.
def __decode_dictionary(self, message_type, dictionary): message = message_type() for key, value in six.iteritems(dictionary): if value is None: try: message.reset(key) except AttributeError: pass # This is an unrecognized field, skip it. continue try: field = message.field_by_name(key) except KeyError: # Save unknown values. variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) continue if field.repeated: # This should be unnecessary? Or in fact become an error. if not isinstance(value, list): value = [value] valid_value = [self.decode_field(field, item) for item in value] setattr(message, field.name, valid_value) continue # This is just for consistency with the old behavior. if value == []: continue try: setattr(message, field.name, self.decode_field(field, value)) except messages.DecodeError: # Save unknown enum values. if not isinstance(field, messages.EnumField): raise variant = self.__find_variant(value) if variant: message.set_unrecognized_field(key, value, variant) return message
388,325
Decode a JSON value to a python value. Args: field: A ProtoRPC field instance. value: A serialized JSON value. Return: A Python value compatible with field.
def decode_field(self, field, value): if isinstance(field, messages.EnumField): try: return field.type(value) except TypeError: raise messages.DecodeError( 'Invalid enum value "%s"' % (value or '')) elif isinstance(field, messages.BytesField): try: return base64.b64decode(value) except (binascii.Error, TypeError) as err: raise messages.DecodeError('Base64 decoding error: %s' % err) elif isinstance(field, message_types.DateTimeField): try: return util.decode_datetime(value) except ValueError as err: raise messages.DecodeError(err) elif (isinstance(field, messages.MessageField) and issubclass(field.type, messages.Message)): return self.__decode_dictionary(field.type, value) elif (isinstance(field, messages.FloatField) and isinstance(value, (six.integer_types, six.string_types))): try: return float(value) except: # pylint:disable=bare-except pass elif (isinstance(field, messages.IntegerField) and isinstance(value, six.string_types)): try: return int(value) except: # pylint:disable=bare-except pass return value
388,326
Initialize this download by setting self.http and self.url. We want the user to be able to override self.http by having set the value in the constructor; in that case, we ignore the provided http. Args: http: An httplib2.Http instance or None. url: The url for this transfer. Returns: None. Initializes self.
def _Initialize(self, http, url): self.EnsureUninitialized() if self.http is None: self.__http = http or http_wrapper.GetHttp() self.__url = url
388,338
Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead.
def InitializeDownload(self, http_request, http=None, client=None): self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks()
388,348
Send this resumable upload in a single request. Args: callback: Progress callback function with inputs (http_wrapper.Response, transfer.Upload) finish_callback: Final callback function with inputs (http_wrapper.Response, transfer.Upload) additional_headers: Dict of headers to include with the upload http_wrapper.Request. Returns: http_wrapper.Response of final response.
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None): return self.__StreamMedia( callback=callback, finish_callback=finish_callback, additional_headers=additional_headers, use_chunks=False)
388,374
Read at most size bytes from this buffer. Bytes read from this buffer are consumed and are permanently removed. Args: size: If provided, read no more than size bytes from the buffer. Otherwise, this reads the entire buffer. Returns: The bytes read from this buffer.
def read(self, size=None): if size is None: size = self.__size ret_list = [] while size > 0 and self.__buf: data = self.__buf.popleft() size -= len(data) ret_list.append(data) if size < 0: ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:] self.__buf.appendleft(remainder) ret = b''.join(ret_list) self.__size -= len(ret) return ret
388,381
Get package name for a module. Helper calculates the package name of a module. Args: module: Module to get name for. If module is a string, try to find module in sys.modules. Returns: If module contains 'package' attribute, uses that as package name. Else, if module is not the '__main__' module, the module __name__. Else, the base name of the module file name. Else None.
def get_package_for_module(module): if isinstance(module, six.string_types): try: module = sys.modules[module] except KeyError: return None try: return six.text_type(module.package) except AttributeError: if module.__name__ == '__main__': try: file_name = module.__file__ except AttributeError: pass else: base_name = os.path.basename(file_name) split_name = os.path.splitext(base_name) if len(split_name) == 1: return six.text_type(base_name) return u'.'.join(split_name[:-1]) return six.text_type(module.__name__)
388,408
Decode a DateTimeField parameter from a string to a python datetime. Args: encoded_datetime: A string in RFC 3339 format. Returns: A datetime object with the date and time specified in encoded_datetime. Raises: ValueError: If the string is not in a recognized format.
def decode_datetime(encoded_datetime): # Check if the string includes a time zone offset. Break out the # part that doesn't include time zone info. Convert to uppercase # because all our comparisons should be case-insensitive. time_zone_match = _TIME_ZONE_RE.search(encoded_datetime) if time_zone_match: time_string = encoded_datetime[:time_zone_match.start(1)].upper() else: time_string = encoded_datetime.upper() if '.' in time_string: format_string = '%Y-%m-%dT%H:%M:%S.%f' else: format_string = '%Y-%m-%dT%H:%M:%S' decoded_datetime = datetime.datetime.strptime(time_string, format_string) if not time_zone_match: return decoded_datetime # Time zone info was included in the parameter. Add a tzinfo # object to the datetime. Datetimes can't be changed after they're # created, so we'll need to create a new one. if time_zone_match.group('z'): offset_minutes = 0 else: sign = time_zone_match.group('sign') hours, minutes = [int(value) for value in time_zone_match.group('hours', 'minutes')] offset_minutes = hours * 60 + minutes if sign == '-': offset_minutes *= -1 return datetime.datetime(decoded_datetime.year, decoded_datetime.month, decoded_datetime.day, decoded_datetime.hour, decoded_datetime.minute, decoded_datetime.second, decoded_datetime.microsecond, TimeZoneOffset(offset_minutes))
388,410
Initialize a time zone offset. Args: offset: Integer or timedelta time zone offset, in minutes from UTC. This can be negative.
def __init__(self, offset): super(TimeZoneOffset, self).__init__() if isinstance(offset, datetime.timedelta): offset = total_seconds(offset) / 60 self.__offset = offset
388,411
Convert DateTimeMessage to a datetime. Args: A DateTimeMessage instance. Returns: A datetime instance.
def value_from_message(self, message): message = super(DateTimeField, self).value_from_message(message) if message.time_zone_offset is None: return datetime.datetime.utcfromtimestamp( message.milliseconds / 1000.0) # Need to subtract the time zone offset, because when we call # datetime.fromtimestamp, it will add the time zone offset to the # value we pass. milliseconds = (message.milliseconds - 60000 * message.time_zone_offset) timezone = util.TimeZoneOffset(message.time_zone_offset) return datetime.datetime.fromtimestamp(milliseconds / 1000.0, tz=timezone)
388,413
Calculates amount of time to wait before a retry attempt. Wait time grows exponentially with the number of attempts. A random amount of jitter is added to spread out retry attempts from different clients. Args: retry_attempt: Retry attempt counter. max_wait: Upper bound for wait time [seconds]. Returns: Number of seconds to wait before retrying request.
def CalculateWaitForRetry(retry_attempt, max_wait=60): wait_time = 2 ** retry_attempt max_jitter = wait_time / 4.0 wait_time += random.uniform(-max_jitter, max_jitter) return max(1, min(wait_time, max_wait))
388,419
Get the userinfo associated with the given credentials. This is dependent on the token having either the userinfo.email or userinfo.profile scope for the given token. Args: credentials: (oauth2client.client.Credentials) incoming credentials http: (httplib2.Http, optional) http instance to use Returns: The email address for this token, or None if the required scopes aren't available.
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name http = http or httplib2.Http() url = _GetUserinfoUrl(credentials) # We ignore communication woes here (i.e. SSL errors, socket # timeout), as handling these should be done in a common location. response, content = http.request(url) if response.status == http_client.BAD_REQUEST: credentials.refresh(http) url = _GetUserinfoUrl(credentials) response, content = http.request(url) return json.loads(content or '{}')
388,470
Initializes the credentials instance. Args: scopes: The scopes to get. If None, whatever scopes that are available to the instance are used. service_account_name: The service account to retrieve the scopes from. **kwds: Additional keyword args.
def __init__(self, scopes=None, service_account_name='default', **kwds): # If there is a connectivity issue with the metadata server, # detection calls may fail even if we've already successfully # identified these scopes in the same execution. However, the # available scopes don't change once an instance is created, # so there is no reason to perform more than one query. self.__service_account_name = service_account_name cached_scopes = None cache_filename = kwds.get('cache_filename') if cache_filename: cached_scopes = self._CheckCacheFileForMatch( cache_filename, scopes) scopes = cached_scopes or self._ScopesFromMetadataServer(scopes) if cache_filename and not cached_scopes: self._WriteCacheFile(cache_filename, scopes) # We check the scopes above, but don't need them again after # this point. Newer versions of oauth2client let us drop them # here, but since we support older versions as well, we just # catch and squelch the warning. with warnings.catch_warnings(): warnings.simplefilter('ignore') super(GceAssertionCredentials, self).__init__(scope=scopes, **kwds)
388,476
Checks the cache file to see if it matches the given credentials. Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials. Returns: List of scopes (if cache matches) or None.
def _CheckCacheFileForMatch(self, cache_filename, scopes): creds = { # Credentials metadata dict. 'scopes': sorted(list(scopes)) if scopes else None, 'svc_acct_name': self.__service_account_name, } cache_file = _MultiProcessCacheFile(cache_filename) try: cached_creds_str = cache_file.LockedRead() if not cached_creds_str: return None cached_creds = json.loads(cached_creds_str) if creds['svc_acct_name'] == cached_creds['svc_acct_name']: if creds['scopes'] in (None, cached_creds['scopes']): return cached_creds['scopes'] except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
388,477
Writes the credential metadata to the cache file. This does not save the credentials themselves (CredentialStore class optionally handles that after this class is initialized). Args: cache_filename: Cache filename to check. scopes: Scopes for the desired credentials.
def _WriteCacheFile(self, cache_filename, scopes): # Credentials metadata dict. creds = {'scopes': sorted(list(scopes)), 'svc_acct_name': self.__service_account_name} creds_str = json.dumps(creds) cache_file = _MultiProcessCacheFile(cache_filename) try: cache_file.LockedWrite(creds_str) except KeyboardInterrupt: raise except: # pylint: disable=bare-except # Treat exceptions as a cache miss. pass
388,478
Refresh self.access_token. Args: _: (ignored) A function matching httplib2.Http.request's signature.
def _refresh(self, _): # pylint: disable=import-error from google.appengine.api import app_identity try: token, _ = app_identity.get_access_token(self._scopes) except app_identity.Error as e: raise exceptions.CredentialsError(str(e)) self.access_token = token
388,485
Acquire an interprocess lock and write a string. This method safely acquires the locks then writes a string to the cache file. If the string is written successfully the function will return True, if the write fails for any reason it will return False. Args: cache_data: string or bytes to write. Returns: bool: success
def LockedWrite(self, cache_data): if isinstance(cache_data, six.text_type): cache_data = cache_data.encode(encoding=self._encoding) with self._thread_lock: if not self._EnsureFileExists(): return False with self._process_lock_getter() as acquired_plock: if not acquired_plock: return False with open(self._filename, 'wb') as f: f.write(cache_data) return True
388,489
Rebuilds all http connections in the httplib2.Http instance. httplib2 overloads the map in http.connections to contain two different types of values: { scheme string: connection class } and { scheme + authority string : actual http connection } Here we remove all of the entries for actual connections so that on the next request httplib2 will rebuild them from the connection types. Args: http: An httplib2.Http instance.
def RebuildHttpConnections(http): if getattr(http, 'connections', None): for conn_key in list(http.connections.keys()): if ':' in conn_key: del http.connections[conn_key]
388,534
Exception handler for http failures. This catches known failures and rebuilds the underlying HTTP connections. Args: retry_args: An ExceptionRetryArgs tuple.
def HandleExceptionsAndRebuildHttpConnections(retry_args): # If the server indicates how long to wait, use that value. Otherwise, # calculate the wait time on our own. retry_after = None # Transport failures if isinstance(retry_args.exc, (http_client.BadStatusLine, http_client.IncompleteRead, http_client.ResponseNotReady)): logging.debug('Caught HTTP error %s, retrying: %s', type(retry_args.exc).__name__, retry_args.exc) elif isinstance(retry_args.exc, socket.error): logging.debug('Caught socket error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, socket.gaierror): logging.debug( 'Caught socket address error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, socket.timeout): logging.debug( 'Caught socket timeout error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, httplib2.ServerNotFoundError): logging.debug( 'Caught server not found error, retrying: %s', retry_args.exc) elif isinstance(retry_args.exc, ValueError): # oauth2client tries to JSON-decode the response, which can result # in a ValueError if the response was invalid. Until that is fixed in # oauth2client, need to handle it here. logging.debug('Response content was invalid (%s), retrying', retry_args.exc) elif (isinstance(retry_args.exc, TokenRefreshError) and hasattr(retry_args.exc, 'status') and (retry_args.exc.status == TOO_MANY_REQUESTS or retry_args.exc.status >= 500)): logging.debug( 'Caught transient credential refresh error (%s), retrying', retry_args.exc) elif isinstance(retry_args.exc, exceptions.RequestError): logging.debug('Request returned no response, retrying') # API-level failures elif isinstance(retry_args.exc, exceptions.BadStatusCodeError): logging.debug('Response returned status %s, retrying', retry_args.exc.status_code) elif isinstance(retry_args.exc, exceptions.RetryAfterError): logging.debug('Response returned a retry-after header, retrying') retry_after = retry_args.exc.retry_after else: raise retry_args.exc RebuildHttpConnections(retry_args.http) logging.debug('Retrying request to url %s after exception %s', retry_args.http_request.url, retry_args.exc) time.sleep( retry_after or util.CalculateWaitForRetry( retry_args.num_retries, max_wait=retry_args.max_retry_wait))
388,535
Initialize a batch API request object. Args: batch_url: Base URL for batch API calls. retryable_codes: A list of integer HTTP codes that can be retried. response_encoding: The encoding type of response content.
def __init__(self, batch_url=None, retryable_codes=None, response_encoding=None): self.api_requests = [] self.retryable_codes = retryable_codes or [] self.batch_url = batch_url or 'https://www.googleapis.com/batch' self.response_encoding = response_encoding
388,583
Add a request to the batch. Args: service: A class inheriting base_api.BaseApiService. method: A string indicated desired method from the service. See the example in the class docstring. request: An input message appropriate for the specified service.method. global_params: Optional additional parameters to pass into method.PrepareHttpRequest. Returns: None
def Add(self, service, method, request, global_params=None): # Retrieve the configs for the desired method and service. method_config = service.GetMethodConfig(method) upload_config = service.GetUploadConfig(method) # Prepare the HTTP Request. http_request = service.PrepareHttpRequest( method_config, request, global_params=global_params, upload_config=upload_config) # Create the request and add it to our master list. api_request = self.ApiCall( http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request)
388,584
Convert a Content-ID header value to an id. Presumes the Content-ID header conforms to the format that _ConvertIdToHeader() returns. Args: header: A string indicating the Content-ID header value. Returns: The extracted id value. Raises: BatchError if the header is not in the expected format.
def _ConvertHeaderToId(header): if not (header.startswith('<') or header.endswith('>')): raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) if '+' not in header: raise exceptions.BatchError( 'Invalid value for Content-ID: %s' % header) _, request_id = header[1:-1].rsplit('+', 1) return urllib_parse.unquote(request_id)
388,587
Convert a http_wrapper.Request object into a string. Args: request: A http_wrapper.Request to serialize. Returns: The request as a string in application/http format.
def _SerializeRequest(self, request): # Construct status line parsed = urllib_parse.urlsplit(request.url) request_line = urllib_parse.urlunsplit( ('', '', parsed.path, parsed.query, '')) if not isinstance(request_line, six.text_type): request_line = request_line.decode('utf-8') status_line = u' '.join(( request.http_method, request_line, u'HTTP/1.1\n' )) major, minor = request.headers.get( 'content-type', 'application/json').split('/') msg = mime_nonmultipart.MIMENonMultipart(major, minor) # MIMENonMultipart adds its own Content-Type header. # Keep all of the other headers in `request.headers`. for key, value in request.headers.items(): if key == 'content-type': continue msg[key] = value msg['Host'] = parsed.netloc msg.set_unixfrom(None) if request.body is not None: msg.set_payload(request.body) # Serialize the mime message. str_io = six.StringIO() # maxheaderlen=0 means don't line wrap headers. gen = generator.Generator(str_io, maxheaderlen=0) gen.flatten(msg, unixfrom=False) body = str_io.getvalue() return status_line + body
388,588
Convert string into Response and content. Args: payload: Header and body string to be deserialized. Returns: A Response object
def _DeserializeResponse(self, payload): # Strip off the status line. status_line, payload = payload.split('\n', 1) _, status, _ = status_line.split(' ', 2) # Parse the rest of the response. parser = email_parser.Parser() msg = parser.parsestr(payload) # Get the headers. info = dict(msg) info['status'] = status # Create Response from the parsed headers. content = msg.get_payload() return http_wrapper.Response(info, content, self.__batch_url)
388,589
Serialize batch request, send to server, process response. Args: http: A httplib2.Http object to be used to make the request with. Raises: httplib2.HttpLib2Error if a transport error has occured. apiclient.errors.BatchError if the response is the wrong format.
def _Execute(self, http): message = mime_multipart.MIMEMultipart('mixed') # Message should not write out its own headers. setattr(message, '_write_headers', lambda self: None) # Add all the individual requests. for key in self.__request_response_handlers: msg = mime_nonmultipart.MIMENonMultipart('application', 'http') msg['Content-Transfer-Encoding'] = 'binary' msg['Content-ID'] = self._ConvertIdToHeader(key) body = self._SerializeRequest( self.__request_response_handlers[key].request) msg.set_payload(body) message.attach(msg) request = http_wrapper.Request(self.__batch_url, 'POST') request.body = message.as_string() request.headers['content-type'] = ( 'multipart/mixed; boundary="%s"') % message.get_boundary() response = http_wrapper.MakeRequest(http, request) if response.status_code >= 300: raise exceptions.HttpError.FromResponse(response) # Prepend with a content-type header so Parser can handle it. header = 'content-type: %s\r\n\r\n' % response.info['content-type'] content = response.content if isinstance(content, bytes) and self.__response_encoding: content = response.content.decode(self.__response_encoding) parser = email_parser.Parser() mime_response = parser.parsestr(header + content) if not mime_response.is_multipart(): raise exceptions.BatchError( 'Response not in multipart/mixed format.') for part in mime_response.get_payload(): request_id = self._ConvertHeaderToId(part['Content-ID']) response = self._DeserializeResponse(part.get_payload()) # Disable protected access because namedtuple._replace(...) # is not actually meant to be protected. # pylint: disable=protected-access self.__request_response_handlers[request_id] = ( self.__request_response_handlers[request_id]._replace( response=response))
388,591
Execute all the requests as a single batched HTTP request. Args: http: A httplib2.Http object to be used with the request. Returns: None Raises: BatchError if the response is the wrong format.
def Execute(self, http): self._Execute(http) for key in self.__request_response_handlers: response = self.__request_response_handlers[key].response callback = self.__request_response_handlers[key].handler exception = None if response.status_code >= 300: exception = exceptions.HttpError.FromResponse(response) if callback is not None: callback(response, exception) if self.__callback is not None: self.__callback(response, exception)
388,592
Overridden to avoid setting variables after init. Setting attributes on a class must work during the period of initialization to set the enumation value class variables and build the name/number maps. Once __init__ has set the __initialized flag to True prohibits setting any more values on the class. The class is in effect frozen. Args: name: Name of value to set. value: Value to set.
def __setattr__(cls, name, value): if cls.__initialized and name not in _POST_INIT_ATTRIBUTE_NAMES: raise AttributeError('May not change values: %s' % name) else: type.__setattr__(cls, name, value)
388,596
Get the assigned value of an attribute. Get the underlying value of an attribute. If value has not been set, will not return the default for the field. Args: name: Name of attribute to get. Returns: Value of attribute, None if it has not been set.
def get_assigned_value(self, name): message_type = type(self) try: field = message_type.field_by_name(name) except KeyError: raise AttributeError('Message %s has no field %s' % ( message_type.__name__, name)) return self.__tags.get(field.number)
388,615
Reset assigned value for field. Resetting a field will return it to its default value or None. Args: name: Name of field to reset.
def reset(self, name): message_type = type(self) try: field = message_type.field_by_name(name) except KeyError: if name not in message_type.__by_name: raise AttributeError('Message %s has no field %s' % ( message_type.__name__, name)) if field.repeated: self.__tags[field.number] = FieldList(field, []) else: self.__tags.pop(field.number, None)
388,616
Get the value and variant of an unknown field in this message. Args: key: The name or number of the field to retrieve. value_default: Value to be returned if the key isn't found. variant_default: Value to be returned as variant if the key isn't found. Returns: (value, variant), where value and variant are whatever was passed to set_unrecognized_field.
def get_unrecognized_field_info(self, key, value_default=None, variant_default=None): value, variant = self.__unrecognized_fields.get(key, (value_default, variant_default)) return value, variant
388,617
Set an unrecognized field, used when decoding a message. Args: key: The name or number used to refer to this unknown value. value: The value of the field. variant: Type information needed to interpret the value or re-encode it. Raises: TypeError: If the variant is not an instance of messages.Variant.
def set_unrecognized_field(self, key, value, variant): if not isinstance(variant, Variant): raise TypeError('Variant type %s is not valid.' % variant) self.__unrecognized_fields[key] = value, variant
388,618
Change set behavior for messages. Messages may only be assigned values that are fields. Does not try to validate field when set. Args: name: Name of field to assign to. value: Value to assign to field. Raises: AttributeError when trying to assign value that is not a field.
def __setattr__(self, name, value): if name in self.__by_name or name.startswith('_Message__'): object.__setattr__(self, name, value) else: raise AttributeError("May not assign arbitrary value %s " "to message %s" % (name, type(self).__name__))
388,619
Constructor. Args: field_instance: Instance of field that validates the list. sequence: List or tuple to construct list from.
def __init__(self, field_instance, sequence): if not field_instance.repeated: raise FieldDefinitionError( 'FieldList may only accept repeated fields') self.__field = field_instance self.__field.validate(sequence) list.__init__(self, sequence)
388,622
Enable unpickling. Args: state: A 3-tuple containing: - The field instance, or None if it belongs to a Message class. - The Message class that the field instance belongs to, or None. - The field instance number of the Message class it belongs to, or None.
def __setstate__(self, state): field_instance, message_class, number = state if field_instance is None: self.__field = message_class.field_by_number(number) else: self.__field = field_instance
388,624
Setter overidden to prevent assignment to fields after creation. Args: name: Name of attribute to set. value: Value to assign.
def __setattr__(self, name, value): # Special case post-init names. They need to be set after constructor. if name in _POST_INIT_FIELD_ATTRIBUTE_NAMES: object.__setattr__(self, name, value) return # All other attributes must be set before __initialized. if not self.__initialized: # Not initialized yet, allow assignment. object.__setattr__(self, name, value) else: raise AttributeError('Field objects are read-only')
388,632
Set value on message. Args: message_instance: Message instance to set value on. value: Value to set on message.
def __set__(self, message_instance, value): # Reaches in to message instance directly to assign to private tags. if value is None: if self.repeated: raise ValidationError( 'May not assign None to repeated field %s' % self.name) else: message_instance._Message__tags.pop(self.number, None) else: if self.repeated: value = FieldList(self, value) else: value = self.validate(value) message_instance._Message__tags[self.number] = value
388,633
Validate single element of field. This is different from validate in that it is used on individual values of repeated fields. Args: value: Value to validate. Returns: The value casted in the expected type. Raises: ValidationError if value is not expected type.
def validate_element(self, value): if not isinstance(value, self.type): # Authorize int values as float. if isinstance(value, six.integer_types) and self.type == float: return float(value) if value is None: if self.required: raise ValidationError('Required field is missing') else: try: name = self.name except AttributeError: raise ValidationError('Expected type %s for %s, ' 'found %s (type %s)' % (self.type, self.__class__.__name__, value, type(value))) else: raise ValidationError( 'Expected type %s for field %s, found %s (type %s)' % (self.type, name, value, type(value))) return value
388,635
Internal validation function. Validate an internal value using a function to validate individual elements. Args: value: Value to validate. validate_element: Function to use to validate individual elements. Raises: ValidationError if value is not expected type.
def __validate(self, value, validate_element): if not self.repeated: return validate_element(value) else: # Must be a list or tuple, may not be a string. if isinstance(value, (list, tuple)): result = [] for element in value: if element is None: try: name = self.name except AttributeError: raise ValidationError( 'Repeated values for %s ' 'may not be None' % self.__class__.__name__) else: raise ValidationError( 'Repeated values for field %s ' 'may not be None' % name) result.append(validate_element(element)) return result elif value is not None: try: name = self.name except AttributeError: raise ValidationError('%s is repeated. Found: %s' % ( self.__class__.__name__, value)) else: raise ValidationError( 'Field %s is repeated. Found: %s' % (name, value)) return value
388,636
Set value on message. Args: message_instance: Message instance to set value on. value: Value to set on message.
def __set__(self, message_instance, value): t = self.type if isinstance(t, type) and issubclass(t, Message): if self.repeated: if value and isinstance(value, (list, tuple)): value = [(t(**v) if isinstance(v, dict) else v) for v in value] elif isinstance(value, dict): value = t(**value) super(MessageField, self).__set__(message_instance, value)
388,639
Convert a message to a value instance. Used by deserializers to convert from underlying messages to value of expected user type. Args: message: A message instance of type self.message_type. Returns: Value of self.message_type.
def value_from_message(self, message): if not isinstance(message, self.message_type): raise DecodeError('Expected type %s, got %s: %r' % (self.message_type.__name__, type(message).__name__, message)) return message
388,641
Convert a value instance to a message. Used by serializers to convert Python user types to underlying messages for transmission. Args: value: A value of type self.type. Returns: An instance of type self.message_type.
def value_to_message(self, value): if not isinstance(value, self.type): raise EncodeError('Expected type %s, got %s: %r' % (self.type.__name__, type(value).__name__, value)) return value
388,642
Validate default element of Enum field. Enum fields allow for delayed resolution of default values when the type of the field has not been resolved. The default value of a field may be a string or an integer. If the Enum type of the field has been resolved, the default value is validated against that type. Args: value: Value to validate. Raises: ValidationError if value is not expected message type.
def validate_default_element(self, value): if isinstance(value, (six.string_types, six.integer_types)): # Validation of the value does not happen for delayed resolution # enumerated types. Ignore if type is not yet resolved. if self.__type: self.__type(value) return value return super(EnumField, self).validate_default_element(value)
388,644
Add a custom wire encoding for a given enum value. This is primarily used in generated code, to handle enum values which happen to be Python keywords. Args: enum_type: (messages.Enum) An enum type python_name: (basestring) Python name for this value. json_name: (basestring) JSON name to be used on the wire. package: (NoneType, optional) No effect, exists for legacy compatibility.
def AddCustomJsonEnumMapping(enum_type, python_name, json_name, package=None): # pylint: disable=unused-argument if not issubclass(enum_type, messages.Enum): raise exceptions.TypecheckError( 'Cannot set JSON enum mapping for non-enum "%s"' % enum_type) if python_name not in enum_type.names(): raise exceptions.InvalidDataError( 'Enum value %s not a value for type %s' % (python_name, enum_type)) field_mappings = _JSON_ENUM_MAPPINGS.setdefault(enum_type, {}) _CheckForExistingMappings('enum', enum_type, python_name, json_name) field_mappings[python_name] = json_name
388,666
Add a custom wire encoding for a given message field. This is primarily used in generated code, to handle enum values which happen to be Python keywords. Args: message_type: (messages.Message) A message type python_name: (basestring) Python name for this value. json_name: (basestring) JSON name to be used on the wire. package: (NoneType, optional) No effect, exists for legacy compatibility.
def AddCustomJsonFieldMapping(message_type, python_name, json_name, package=None): # pylint: disable=unused-argument if not issubclass(message_type, messages.Message): raise exceptions.TypecheckError( 'Cannot set JSON field mapping for ' 'non-message "%s"' % message_type) try: _ = message_type.field_by_name(python_name) except KeyError: raise exceptions.InvalidDataError( 'Field %s not recognized for type %s' % ( python_name, message_type)) field_mappings = _JSON_FIELD_MAPPINGS.setdefault(message_type, {}) _CheckForExistingMappings('field', message_type, python_name, json_name) field_mappings[python_name] = json_name
388,667
Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field.
def decode_field(self, field, value): for decoder in _GetFieldCodecs(field, 'decoder'): result = decoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.MessageField): field_value = self.decode_message( field.message_type, json.dumps(value)) elif isinstance(field, messages.EnumField): value = GetCustomJsonEnumMapping( field.type, json_name=value) or value try: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) except messages.DecodeError: if not isinstance(value, six.string_types): raise field_value = None else: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) return field_value
388,680
Encode the given value as JSON. Args: field: a messages.Field for the field we're encoding. value: a value for field. Returns: A python value suitable for json.dumps.
def encode_field(self, field, value): for encoder in _GetFieldCodecs(field, 'encoder'): result = encoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.EnumField): if field.repeated: remapped_value = [GetCustomJsonEnumMapping( field.type, python_name=e.name) or e.name for e in value] else: remapped_value = GetCustomJsonEnumMapping( field.type, python_name=value.name) if remapped_value: return remapped_value if (isinstance(field, messages.MessageField) and not isinstance(field, message_types.DateTimeField)): value = json.loads(self.encode_message(value)) return super(_ProtoJsonApiTools, self).encode_field(field, value)
388,682
Give as input raw data and output a str in Python 3 and unicode in Python 2. Args: raw_data: Python 2 str, Python 3 bytes or str to porting encoding: string giving the name of an encoding errors: his specifies the treatment of characters which are invalid in the input encoding Returns: str (Python 3) or unicode (Python 2)
def ported_string(raw_data, encoding='utf-8', errors='ignore'): if not raw_data: return six.text_type() if isinstance(raw_data, six.text_type): return raw_data.strip() if six.PY2: try: return six.text_type(raw_data, encoding, errors).strip() except LookupError: return six.text_type(raw_data, "utf-8", errors).strip() if six.PY3: try: return six.text_type(raw_data, encoding).strip() except (LookupError, UnicodeDecodeError): return six.text_type(raw_data, "utf-8", errors).strip()
389,237
Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2)
def decode_header_part(header): if not header: return six.text_type() output = six.text_type() try: for d, c in decode_header(header): c = c if c else 'utf-8' output += ported_string(d, c, 'ignore') # Header parsing failed, when header has charset Shift_JIS except (HeaderParseError, UnicodeError): log.error("Failed decoding header part: {}".format(header)) output += header return output
389,238
This function return the fingerprints of data. Args: data (string): raw data Returns: namedtuple: fingerprints md5, sha1, sha256, sha512
def fingerprints(data): Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512") if six.PY2: if not isinstance(data, str): data = data.encode("utf-8") elif six.PY3: if not isinstance(data, bytes): data = data.encode("utf-8") # md5 md5 = hashlib.md5() md5.update(data) md5 = md5.hexdigest() # sha1 sha1 = hashlib.sha1() sha1.update(data) sha1 = sha1.hexdigest() # sha256 sha256 = hashlib.sha256() sha256.update(data) sha256 = sha256.hexdigest() # sha512 sha512 = hashlib.sha512() sha512.update(data) sha512 = sha512.hexdigest() return Hashes(md5, sha1, sha256, sha512)
389,241
Exec msgconvert tool, to convert msg Outlook mail in eml mail format Args: email (string): file path of Outlook msg mail Returns: tuple with file path of mail converted and standard output data (unicode Python 2, str Python 3)
def msgconvert(email): log.debug("Started converting Outlook email") temph, temp = tempfile.mkstemp(prefix="outlook_") command = ["msgconvert", "--outfile", temp, email] try: if six.PY2: with open(os.devnull, "w") as devnull: out = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=devnull) elif six.PY3: out = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) except OSError: message = "To use this function you must install 'msgconvert' tool" log.exception(message) raise MailParserOSError(message) else: stdoutdata, _ = out.communicate() return temp, stdoutdata.decode("utf-8").strip() finally: os.close(temph)
389,242
Parse a single received header. Return a dictionary of values by clause. Arguments: received {str} -- single received header Raises: MailParserReceivedParsingError -- Raised when a received header cannot be parsed Returns: dict -- values by clause
def parse_received(received): values_by_clause = {} for pattern in RECEIVED_COMPILED_LIST: matches = [match for match in pattern.finditer(received)] if len(matches) == 0: # no matches for this clause, but it's ok! keep going! log.debug("No matches found for %s in %s" % ( pattern.pattern, received)) continue elif len(matches) > 1: # uh, can't have more than one of each clause in a received. # so either there's more than one or the current regex is wrong msg = "More than one match found for %s in %s" % ( pattern.pattern, received) log.error(msg) raise MailParserReceivedParsingError(msg) else: # otherwise we have one matching clause! log.debug("Found one match for %s in %s" % ( pattern.pattern, received)) match = matches[0].groupdict() if six.PY2: values_by_clause[match.keys()[0]] = match.values()[0] elif six.PY3: key = list(match.keys())[0] value = list(match.values())[0] values_by_clause[key] = value if len(values_by_clause) == 0: # we weren't able to match anything... msg = "Unable to match any clauses in %s" % (received) log.error(msg) raise MailParserReceivedParsingError(msg) return values_by_clause
389,243
This function parses the receiveds headers. Args: receiveds (list): list of raw receiveds headers Returns: a list of parsed receiveds headers with first hop in first position
def receiveds_parsing(receiveds): parsed = [] receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds] n = len(receiveds) log.debug("Nr. of receiveds. {}".format(n)) for idx, received in enumerate(receiveds): log.debug("Parsing received {}/{}".format(idx + 1, n)) log.debug("Try to parse {!r}".format(received)) try: # try to parse the current received header... values_by_clause = parse_received(received) except MailParserReceivedParsingError: # if we can't, let's append the raw parsed.append({'raw': received}) else: # otherwise append the full values_by_clause dict parsed.append(values_by_clause) log.debug("len(receiveds) %s, len(parsed) %s" % ( len(receiveds), len(parsed))) if len(receiveds) != len(parsed): # something really bad happened, # so just return raw receiveds with hop indices log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \ parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed)) return receiveds_not_parsed(receiveds) else: # all's good! we have parsed or raw receiveds for each received header return receiveds_format(parsed)
389,244
If receiveds are not parsed, makes a new structure with raw field. It's useful to have the same structure of receiveds parsed. Args: receiveds (list): list of raw receiveds headers Returns: a list of not parsed receiveds headers with first hop in first position
def receiveds_not_parsed(receiveds): log.debug("Receiveds for this email are not parsed") output = [] counter = Counter() for i in receiveds[::-1]: j = {"raw": i.strip()} j["hop"] = counter["hop"] + 1 counter["hop"] += 1 output.append(j) else: return output
389,246
Given a list of receiveds hop, adds metadata and reformat field values Args: receiveds (list): list of receiveds hops already formatted Returns: list of receiveds reformated and with new fields
def receiveds_format(receiveds): log.debug("Receiveds for this email are parsed") output = [] counter = Counter() for i in receiveds[::-1]: # Clean strings j = {k: v.strip() for k, v in i.items() if v} # Add hop j["hop"] = counter["hop"] + 1 # Add UTC date if i.get("date"): # Modify date to manage strange header like: # "for <eboktor@romolo.com>; Tue, 7 Mar 2017 14:29:24 -0800", i["date"] = i["date"].split(";")[-1] try: j["date_utc"], _ = convert_mail_date(i["date"]) except TypeError: j["date_utc"] = None # Add delay size = len(output) now = j.get("date_utc") if size and now: before = output[counter["hop"] - 1].get("date_utc") if before: j["delay"] = (now - before).total_seconds() else: j["delay"] = 0 else: j["delay"] = 0 # append result output.append(j) # new hop counter["hop"] += 1 else: for i in output: if i.get("date_utc"): i["date_utc"] = i["date_utc"].isoformat() else: return output
389,247
Gets an email.message.Message and a header name and returns the mail header decoded with the correct charset. Args: message (email.message.Message): email message object name (string): header to get Returns: decoded header
def get_header(message, name): header = message.get(name) log.debug("Getting header {!r}: {!r}".format(name, header)) if header: return decode_header_part(header) return six.text_type()
389,249
Given an email.message.Message, return a set with all email parts to get Args: message (email.message.Message): email message object complete (bool): if True returns all email headers Returns: set with all email parts
def get_mail_keys(message, complete=True): if complete: log.debug("Get all headers") all_headers_keys = {i.lower() for i in message.keys()} all_parts = ADDRESSES_HEADERS | OTHERS_PARTS | all_headers_keys else: log.debug("Get only mains headers") all_parts = ADDRESSES_HEADERS | OTHERS_PARTS log.debug("All parts to get: {}".format(", ".join(all_parts))) return all_parts
389,250
This function writes a sample on file system. Args: binary (bool): True if it's a binary file payload: payload of sample, in base64 if it's a binary path (string): path of file filename (string): name of file hash_ (string): file hash
def write_sample(binary, payload, path, filename): # pragma: no cover if not os.path.exists(path): os.makedirs(path) sample = os.path.join(path, filename) if binary: with open(sample, "wb") as f: f.write(base64.b64decode(payload)) else: with open(sample, "w") as f: f.write(payload)
389,254
Init a new object from a file-like object. Not for Outlook msg. Args: fp (file-like object): file-like object of raw email Returns: Instance of MailParser
def from_file_obj(cls, fp): log.debug("Parsing email from file object") try: fp.seek(0) except IOError: # When stdout is a TTY it's a character device # and it's not seekable, you cannot seek in a TTY. pass finally: s = fp.read() return cls.from_string(s)
389,258
Init a new object from a file path. Args: fp (string): file path of raw email is_outlook (boolean): if True is an Outlook email Returns: Instance of MailParser
def from_file(cls, fp, is_outlook=False): log.debug("Parsing email from file {!r}".format(fp)) with ported_open(fp) as f: message = email.message_from_file(f) if is_outlook: log.debug("Removing temp converted Outlook email {!r}".format(fp)) os.remove(fp) return cls(message)
389,259
Init a new object from a Outlook message file, mime type: application/vnd.ms-outlook Args: fp (string): file path of raw Outlook email Returns: Instance of MailParser
def from_file_msg(cls, fp): log.debug("Parsing email from file Outlook") f, _ = msgconvert(fp) return cls.from_file(f, True)
389,260
Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser
def from_string(cls, s): log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
389,261
Init a new object from bytes. Args: bt (bytes-like object): raw email as bytes-like object Returns: Instance of MailParser
def from_bytes(cls, bt): log.debug("Parsing email from bytes") if six.PY2: raise MailParserEnvironmentError( "Parsing from bytes is valid only for Python 3.x version") message = email.message_from_bytes(bt) return cls(message)
389,262
Add new defects and defects categories to object attributes. The defects are a list of all the problems found when parsing this message. Args: part (string): mail part part_content_type (string): content type of part
def _append_defects(self, part, part_content_type): part_defects = {} for e in part.defects: defects = "{}: {}".format(e.__class__.__name__, e.__doc__) self._defects_categories.add(e.__class__.__name__) part_defects.setdefault(part_content_type, []).append(defects) log.debug("Added defect {!r}".format(defects)) # Tag mail with defect if part_defects: self._has_defects = True # Save all defects self._defects.append(part_defects)
389,264
Compute and factorize the covariance matrix. Args: x (ndarray[nsamples, ndim]): The independent coordinates of the data points. yerr (ndarray[nsamples] or float): The Gaussian uncertainties on the data points at coordinates ``x``. These values will be added in quadrature to the diagonal of the covariance matrix.
def compute(self, x, yerr): # Compute the kernel matrix. K = self.kernel.get_value(x) K[np.diag_indices_from(K)] += yerr ** 2 # Factor the matrix and compute the log-determinant. self._factor = (cholesky(K, overwrite_a=True, lower=False), False) self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0]))) self.computed = True
389,994
r""" Apply the inverse of the covariance matrix to the input by solving .. math:: K\,x = y Args: y (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or matrix :math:`y`. in_place (Optional[bool]): Should the data in ``y`` be overwritten with the result :math:`x`? (default: ``False``)
def apply_inverse(self, y, in_place=False): r return cho_solve(self._factor, y, overwrite_b=in_place)
389,995
r""" Compute the inner product of a vector with the inverse of the covariance matrix applied to itself: .. math:: y\,K^{-1}\,y Args: y (ndarray[nsamples]): The vector :math:`y`.
def dot_solve(self, y): r return np.dot(y.T, cho_solve(self._factor, y))
389,996
Get an ordered dictionary of the parameters Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
def get_parameter_dict(self, include_frozen=False): return OrderedDict(zip( self.get_parameter_names(include_frozen=include_frozen), self.get_parameter_vector(include_frozen=include_frozen), ))
390,004
Get a list of the parameter names Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
def get_parameter_names(self, include_frozen=False): if include_frozen: return self.parameter_names return tuple(p for p, f in zip(self.parameter_names, self.unfrozen_mask) if f)
390,005
Get a list of the parameter bounds Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
def get_parameter_bounds(self, include_frozen=False): if include_frozen: return self.parameter_bounds return list(p for p, f in zip(self.parameter_bounds, self.unfrozen_mask) if f)
390,006
Get an array of the parameter values in the correct order Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
def get_parameter_vector(self, include_frozen=False): if include_frozen: return self.parameter_vector return self.parameter_vector[self.unfrozen_mask]
390,007
Set the parameter values to the given vector Args: vector (array[vector_size] or array[full_size]): The target parameter vector. This must be in the same order as ``parameter_names`` and it should only include frozen parameters if ``include_frozen`` is ``True``. include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
def set_parameter_vector(self, vector, include_frozen=False): v = self.parameter_vector if include_frozen: v[:] = vector else: v[self.unfrozen_mask] = vector self.parameter_vector = v self.dirty = True
390,008
Freeze a parameter by name Args: name: The name of the parameter
def freeze_parameter(self, name): i = self.get_parameter_names(include_frozen=True).index(name) self.unfrozen_mask[i] = False
390,010
Thaw a parameter by name Args: name: The name of the parameter
def thaw_parameter(self, name): i = self.get_parameter_names(include_frozen=True).index(name) self.unfrozen_mask[i] = True
390,011
Get a parameter value by name Args: name: The name of the parameter
def get_parameter(self, name): i = self.get_parameter_names(include_frozen=True).index(name) return self.get_parameter_vector(include_frozen=True)[i]
390,012
Set a parameter value by name Args: name: The name of the parameter value (float): The new value for the parameter
def set_parameter(self, name, value): i = self.get_parameter_names(include_frozen=True).index(name) v = self.get_parameter_vector(include_frozen=True) v[i] = value self.set_parameter_vector(v, include_frozen=True)
390,013
Initialize the property. Args: get_media_files_func (callable): The function to call to generate the media files. media_cls (type): The Media class owning the property. extra_files (object): Files listed in the original ``css`` or ``js`` attribute on the Media class.
def __init__(self, get_media_files_func, media_cls, extra_files): self._get_media_files_func = get_media_files_func self._media_cls = media_cls self._extra_files = extra_files
390,117
Construct the class. Args: name (bytes): The name of the class. bases (tuple): The base classes for the class. attrs (dict): The attributes going into the class. Returns: type: The new class.
def __new__(cls, name, bases, attrs): new_class = super(PipelineFormMediaMetaClass, cls).__new__( cls, name, bases, attrs) # If we define any packages, we'll need to use our special # PipelineFormMediaProperty class. We use this instead of intercepting # in __getattribute__ because Django does not access them through # normal properpty access. Instead, grabs the Media class's __dict__ # and accesses them from there. By using these special properties, we # can handle direct access (Media.css) and dictionary-based access # (Media.__dict__['css']). if 'css_packages' in attrs: new_class.css = PipelineFormMediaProperty( cls._get_css_files, new_class, attrs.get('css') or {}) if 'js_packages' in attrs: new_class.js = PipelineFormMediaProperty( cls._get_js_files, new_class, attrs.get('js') or []) return new_class
390,118
Return all CSS files from the Media class. Args: extra_files (dict): The contents of the Media class's original :py:attr:`css` attribute, if one was provided. Returns: dict: The CSS media types and files to return for the :py:attr:`css` attribute.
def _get_css_files(cls, extra_files): packager = Packager() css_packages = getattr(cls, 'css_packages', {}) return dict( (media_target, cls._get_media_files(packager=packager, media_packages=media_packages, media_type='css', extra_files=extra_files.get(media_target, []))) for media_target, media_packages in six.iteritems(css_packages) )
390,119
Return all JavaScript files from the Media class. Args: extra_files (list): The contents of the Media class's original :py:attr:`js` attribute, if one was provided. Returns: list: The JavaScript files to return for the :py:attr:`js` attribute.
def _get_js_files(cls, extra_files): return cls._get_media_files( packager=Packager(), media_packages=getattr(cls, 'js_packages', {}), media_type='js', extra_files=extra_files)
390,120
Initializes a path specification. Note that the TSK path specification must have a parent. Args: data_stream (Optional[str]): data stream name, where None indicates the default data stream. inode (Optional[int]): inode. location (Optional[str]): location. parent (Optional[PathSpec]): parent path specification. Raises: ValueError: when inode and location, or parent are not set.
def __init__( self, data_stream=None, inode=None, location=None, parent=None, **kwargs): # Note that pytsk/libtsk overloads inode and a value of 0 is valid in # contrast to an ext file system. if (inode is None and not location) or not parent: raise ValueError('Missing inode and location, or parent value.') super(TSKPathSpec, self).__init__(parent=parent, **kwargs) self.data_stream = data_stream self.inode = inode self.location = location
391,141
Initializes a volume scanner. Args: mediator (VolumeScannerMediator): a volume scanner mediator.
def __init__(self, mediator=None): super(VolumeScanner, self).__init__() self._mediator = mediator self._source_path = None self._source_scanner = source_scanner.SourceScanner() self._source_type = None
391,155
Determines the APFS volume identifiers. Args: scan_node (SourceScanNode): scan node. Returns: list[str]: APFS volume identifiers. Raises: ScannerError: if the format of or within the source is not supported or the the scan node is invalid. UserAbort: if the user requested to abort.
def _GetAPFSVolumeIdentifiers(self, scan_node): if not scan_node or not scan_node.path_spec: raise errors.ScannerError('Invalid scan node.') volume_system = apfs_volume_system.APFSVolumeSystem() volume_system.Open(scan_node.path_spec) volume_identifiers = self._source_scanner.GetVolumeIdentifiers( volume_system) if not volume_identifiers: return [] if len(volume_identifiers) > 1: if not self._mediator: raise errors.ScannerError( 'Unable to proceed. APFS volumes found but no mediator to ' 'determine how they should be used.') try: volume_identifiers = self._mediator.GetAPFSVolumeIdentifiers( volume_system, volume_identifiers) except KeyboardInterrupt: raise errors.UserAbort('File system scan aborted.') return self._NormalizedVolumeIdentifiers( volume_system, volume_identifiers, prefix='apfs')
391,156
Determines the TSK partition identifiers. Args: scan_node (SourceScanNode): scan node. Returns: list[str]: TSK partition identifiers. Raises: ScannerError: if the format of or within the source is not supported or the scan node is invalid or if the volume for a specific identifier cannot be retrieved. UserAbort: if the user requested to abort.
def _GetTSKPartitionIdentifiers(self, scan_node): if not scan_node or not scan_node.path_spec: raise errors.ScannerError('Invalid scan node.') volume_system = tsk_volume_system.TSKVolumeSystem() volume_system.Open(scan_node.path_spec) volume_identifiers = self._source_scanner.GetVolumeIdentifiers( volume_system) if not volume_identifiers: return [] if len(volume_identifiers) == 1: return volume_identifiers if not self._mediator: raise errors.ScannerError( 'Unable to proceed. Partitions found but no mediator to determine ' 'how they should be used.') try: volume_identifiers = self._mediator.GetPartitionIdentifiers( volume_system, volume_identifiers) except KeyboardInterrupt: raise errors.UserAbort('File system scan aborted.') return self._NormalizedVolumeIdentifiers( volume_system, volume_identifiers, prefix='p')
391,157
Determines the VSS store identifiers. Args: scan_node (SourceScanNode): scan node. Returns: list[str]: VSS store identifiers. Raises: ScannerError: if the format the scan node is invalid or no mediator is provided and VSS store identifiers are found. UserAbort: if the user requested to abort.
def _GetVSSStoreIdentifiers(self, scan_node): if not scan_node or not scan_node.path_spec: raise errors.ScannerError('Invalid scan node.') volume_system = vshadow_volume_system.VShadowVolumeSystem() volume_system.Open(scan_node.path_spec) volume_identifiers = self._source_scanner.GetVolumeIdentifiers( volume_system) if not volume_identifiers: return [] if not self._mediator: raise errors.ScannerError( 'Unable to proceed. VSS stores found but no mediator to determine ' 'how they should be used.') try: volume_identifiers = self._mediator.GetVSSStoreIdentifiers( volume_system, volume_identifiers) except KeyboardInterrupt: raise errors.UserAbort('File system scan aborted.') return self._NormalizedVolumeIdentifiers( volume_system, volume_identifiers, prefix='vss')
391,158
Normalizes volume identifiers. Args: volume_system (VolumeSystem): volume system. volume_identifiers (list[int|str]): allowed volume identifiers, formatted as an integer or string with prefix. prefix (Optional[str]): volume identifier prefix. Returns: list[str]: volume identifiers with prefix. Raises: ScannerError: if the volume identifier is not supported or no volume could be found that corresponds with the identifier.
def _NormalizedVolumeIdentifiers( self, volume_system, volume_identifiers, prefix='v'): normalized_volume_identifiers = [] for volume_identifier in volume_identifiers: if isinstance(volume_identifier, int): volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier) elif not volume_identifier.startswith(prefix): try: volume_identifier = int(volume_identifier, 10) volume_identifier = '{0:s}{1:d}'.format(prefix, volume_identifier) except (TypeError, ValueError): pass try: volume = volume_system.GetVolumeByIdentifier(volume_identifier) except KeyError: volume = None if not volume: raise errors.ScannerError( 'Volume missing for identifier: {0:s}.'.format(volume_identifier)) normalized_volume_identifiers.append(volume_identifier) return normalized_volume_identifiers
391,159
Scans an encrypted volume scan node for volume and file systems. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): volume scan node. Raises: ScannerError: if the format of or within the source is not supported, the scan node is invalid, there are no credentials defined for the format or no mediator is provided and a locked scan node was found, e.g. an encrypted volume,
def _ScanEncryptedVolume(self, scan_context, scan_node): if not scan_node or not scan_node.path_spec: raise errors.ScannerError('Invalid or missing scan node.') credentials = credentials_manager.CredentialsManager.GetCredentials( scan_node.path_spec) if not credentials: raise errors.ScannerError('Missing credentials for scan node.') if not self._mediator: raise errors.ScannerError( 'Unable to proceed. Encrypted volume found but no mediator to ' 'determine how it should be unlocked.') if self._mediator.UnlockEncryptedVolume( self._source_scanner, scan_context, scan_node, credentials): self._source_scanner.Scan( scan_context, scan_path_spec=scan_node.path_spec)
391,160