code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def load_global_settings(): with open(settings_path, 'r') as settings_f: global global_settings settings_json = json.loads(settings_f.read()) if global_settings is None: global_settings = settings_json global_settings[u'package_path'] = package_dir else: for k, v in settings_json.items(): if type(v) == dict: global_settings[k].update(v) else: global_settings[k] = v
Loads settings file containing paths to dependencies and other optional configuration elements.
def enforce_signature(function): argspec = inspect.getfullargspec(function) annotations = argspec.annotations argnames = argspec.args unnamed_annotations = {} for i, arg in enumerate(argnames): if arg in annotations: unnamed_annotations[i] = (annotations[arg], arg) def decorated(*args, **kwargs): for i, annotation in unnamed_annotations.items(): if i < len(args): assert_right_type(args[i], annotation[0], annotation[1]) for argname, argval in kwargs.items(): if argname in annotations: assert_right_type(argval, annotations[argname], argname) return function(*args, **kwargs) return decorated
Enforces the signature of the function by throwing TypeError's if invalid arguments are provided. The return value is not checked. You can annotate any parameter of your function with the desired type or a tuple of allowed types. If you annotate the function with a value, this value only will be allowed (useful especially for None). Example: >>> @enforce_signature ... def test(arg: bool, another: (int, None)): ... pass ... >>> test(True, 5) >>> test(True, None) Any string value for any parameter e.g. would then trigger a TypeError. :param function: The function to check.
def bulk_create_datetimes(date_start: DateTime, date_end: DateTime, **kwargs) -> List[DateTime]: dt_objects = [] if not date_start and not date_end: raise ValueError('You must pass date_start and date_end') if date_end < date_start: raise ValueError('date_start can not be larger than date_end') while date_start <= date_end: date_start += timedelta(**kwargs) dt_objects.append(date_start) return dt_objects
Bulk create datetime objects. This method creates list of datetime objects from ``date_start`` to ``date_end``. You can use the following keyword arguments: * ``days`` * ``hours`` * ``minutes`` * ``seconds`` * ``microseconds`` See datetime module documentation for more: https://docs.python.org/3.7/library/datetime.html#timedelta-objects :param date_start: Begin of the range. :param date_end: End of the range. :param kwargs: Keyword arguments for datetime.timedelta :return: List of datetime objects :raises: ValueError: When ``date_start``/``date_end`` not passed and when ``date_start`` larger than ``date_end``.
def is_time_valid(self, timestamp): sec_from_morning = get_sec_from_morning(timestamp) return (self.is_valid and self.hstart * 3600 + self.mstart * 60 <= sec_from_morning <= self.hend * 3600 + self.mend * 60)
Check if time is valid for this Timerange If sec_from_morning is not provided, get the value. :param timestamp: time to check :type timestamp: int :return: True if time is valid (in interval), False otherwise :rtype: bool
def set_client_params( self, start_unsubscribed=None, clear_on_exit=None, unsubscribe_on_reload=None, announce_interval=None): self._set('start-unsubscribed', start_unsubscribed, cast=bool) self._set('subscription-clear-on-shutdown', clear_on_exit, cast=bool) self._set('unsubscribe-on-graceful-reload', unsubscribe_on_reload, cast=bool) self._set('subscribe-freq', announce_interval) return self._section
Sets subscribers related params. :param bool start_unsubscribed: Configure subscriptions but do not send them. .. note:: Useful with master FIFO. :param bool clear_on_exit: Force clear instead of unsubscribe during shutdown. :param bool unsubscribe_on_reload: Force unsubscribe request even during graceful reload. :param int announce_interval: Send subscription announce at the specified interval. Default: 10 master cycles.
def _post_transition(self, result, *args, **kwargs): for hook in self._filter_hooks(HOOK_AFTER, HOOK_ON_ENTER): hook(self.instance, result, *args, **kwargs)
Performs post-transition actions.
def add_recording_behavior(self, component, runnable): simulation = component.simulation for rec in simulation.records: rec.id = runnable.id self.current_record_target.add_variable_recorder(self.current_data_output, rec)
Adds recording-related dynamics to a runnable component based on the dynamics specifications in the component model. @param component: Component model containing dynamics specifications. @type component: lems.model.component.FatComponent runnable: Runnable component to which dynamics is to be added. @type runnable: lems.sim.runnable.Runnable @raise SimBuildError: Raised when a target for recording could not be found.
def _calc_sizes(self): if self.size > 1024: self.unit = "Mb" self.size = (self.size / 1024) if self.size > 1024: self.unit = "Gb" self.size = (self.size / 1024)
Package size calculation
def _get_firmware_update_service_resource(self): manager, uri = self._get_ilo_details() try: fw_uri = manager['Oem']['Hp']['links']['UpdateService']['href'] except KeyError: msg = ("Firmware Update Service resource not found.") raise exception.IloCommandNotSupportedError(msg) return fw_uri
Gets the firmware update service uri. :returns: firmware update service uri :raises: IloError, on an error from iLO. :raises: IloConnectionError, if not able to reach iLO. :raises: IloCommandNotSupportedError, for not finding the uri
def has_out_of_flow_tables(self): if self.article.body is None: return False for table_wrap in self.article.body.findall('.//table-wrap'): graphic = table_wrap.xpath('./graphic | ./alternatives/graphic') table = table_wrap.xpath('./table | ./alternatives/table') if graphic and table: return True return False
Returns True if the article has out-of-flow tables, indicates separate tables document. This method is used to indicate whether rendering this article's content will result in the creation of out-of-flow HTML tables. This method has a base class implementation representing a common logic; if an article has a graphic(image) representation of a table then the HTML representation will be placed out-of-flow if it exists, if there is no graphic(image) represenation then the HTML representation will be placed in-flow. Returns ------- bool True if there are out-of-flow HTML tables, False otherwise
def change_ref(self, gm=None, r0=None, lmax=None): if lmax is None: lmax = self.lmax clm = self.pad(lmax) if gm is not None and gm != self.gm: clm.coeffs *= self.gm / gm clm.gm = gm if self.errors is not None: clm.errors *= self.gm / gm if r0 is not None and r0 != self.r0: for l in _np.arange(lmax+1): clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**l if self.errors is not None: clm.errors[:, l, :l+1] *= (self.r0 / r0)**l clm.r0 = r0 return clm
Return a new SHGravCoeffs class instance with a different reference gm or r0. Usage ----- clm = x.change_ref([gm, r0, lmax]) Returns ------- clm : SHGravCoeffs class instance. Parameters ---------- gm : float, optional, default = self.gm The gravitational constant time the mass that is associated with the gravitational potential coefficients. r0 : float, optional, default = self.r0 The reference radius of the spherical harmonic coefficients. lmax : int, optional, default = self.lmax Maximum spherical harmonic degree to output. Description ----------- This method returns a new class instance of the gravitational potential, but using a difference reference gm or r0. When changing the reference radius r0, the spherical harmonic coefficients will be upward or downward continued under the assumption that the reference radius is exterior to the body.
def _get_attr_value(instance, attr, default=None): value = default if hasattr(instance, attr): value = getattr(instance, attr) if callable(value): value = value() return value
Simple helper to get the value of an instance's attribute if it exists. If the instance attribute is callable it will be called and the result will be returned. Optionally accepts a default value to return if the attribute is missing. Defaults to `None` >>> class Foo(object): ... bar = 'baz' ... def hi(self): ... return 'hi' >>> f = Foo() >>> _get_attr_value(f, 'bar') 'baz' >>> _get_attr_value(f, 'xyz') >>> _get_attr_value(f, 'xyz', False) False >>> _get_attr_value(f, 'hi') 'hi'
def tally(self, chain): self.db._rows[chain][self.name] = self._getfunc()
Adds current value to trace
def content_type(self, content_type): allowed_values = ["application/json", "text/html", "text/plain", "application/x-www-form-urlencoded", ""] if content_type not in allowed_values: raise ValueError( "Invalid value for `content_type` ({0}), must be one of {1}" .format(content_type, allowed_values) ) self._content_type = content_type
Sets the content_type of this Notificant. The value of the Content-Type header of the webhook POST request. # noqa: E501 :param content_type: The content_type of this Notificant. # noqa: E501 :type: str
def logfile_generator(self): if not self.args['exclude']: start_limits = [f.start_limit for f in self.filters if hasattr(f, 'start_limit')] if start_limits: for logfile in self.args['logfile']: logfile.fast_forward(max(start_limits)) if len(self.args['logfile']) > 1: for logevent in self._merge_logfiles(): yield logevent else: for logevent in self.args['logfile'][0]: if self.args['timezone'][0] != 0 and logevent.datetime: logevent._datetime = (logevent.datetime + timedelta(hours=self .args['timezone'][0])) yield logevent
Yield each line of the file, or the next line if several files.
def get_host_datastore_system(host_ref, hostname=None): if not hostname: hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.datastoreSystem', type=vim.HostSystem, skip=False) objs = get_mors_with_properties(service_instance, vim.HostDatastoreSystem, property_list=['datastore'], container_ref=host_ref, traversal_spec=traversal_spec) if not objs: raise salt.exceptions.VMwareObjectRetrievalError( 'Host\'s \'{0}\' datastore system was not retrieved' ''.format(hostname)) log.trace('[%s] Retrieved datastore system', hostname) return objs[0]['object']
Returns a host's datastore system host_ref Reference to the ESXi host hostname Name of the host. This argument is optional.
def find_globals_and_nonlocals(node, globs, nonlocals, code, version): for n in node: if isinstance(n, SyntaxTree): globs, nonlocals = find_globals_and_nonlocals(n, globs, nonlocals, code, version) elif n.kind in read_global_ops: globs.add(n.pattr) elif (version >= 3.0 and n.kind in nonglobal_ops and n.pattr in code.co_freevars and n.pattr != code.co_name and code.co_name != '<lambda>'): nonlocals.add(n.pattr) return globs, nonlocals
search a node of parse tree to find variable names that need a either 'global' or 'nonlocal' statements added.
def ndim(self): try: return self.__ndim except AttributeError: ndim = len(self.coord_vectors) self.__ndim = ndim return ndim
Number of dimensions of the grid.
def writelines(self, sequence): iterator = iter(sequence) def iterate(_=None): try: return self.write(next(iterator)).addCallback(iterate) except StopIteration: return return defer.maybeDeferred(iterate)
Write a sequence of strings to the file. Does not add separators.
def get_assessments_taken_for_assessment(self, assessment_id): collection = JSONClientValidated('assessment', collection='AssessmentOffered', runtime=self._runtime) result = collection.find( dict({'assessmentId': str(assessment_id)}, **self._view_filter())).sort('_id', DESCENDING) assessments_offered = objects.AssessmentOfferedList( result, runtime=self._runtime) collection = JSONClientValidated('assessment', collection='AssessmentTaken', runtime=self._runtime) ao_ids = [] for assessment_offered in assessments_offered: ao_ids.append(str(assessment_offered.get_id())) result = collection.find( dict({'assessmentOfferedId': {'$in': ao_ids}}, **self._view_filter())).sort('_id', DESCENDING) return objects.AssessmentTakenList(result, runtime=self._runtime, proxy=self._proxy)
Gets an ``AssessmentTakenList`` for the given assessment. In plenary mode, the returned list contains all known assessments taken or an error results. Otherwise, the returned list may contain only those assessments taken that are accessible through this session. arg: assessment_id (osid.id.Id): ``Id`` of an ``Assessment`` return: (osid.assessment.AssessmentTakenList) - the returned ``AssessmentTaken`` list raise: NullArgument - ``assessment_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
def get_cached_moderated_reddits(self): if self._mod_subs is None: self._mod_subs = {'mod': self.reddit_session.get_subreddit('mod')} for sub in self.reddit_session.get_my_moderation(limit=None): self._mod_subs[six.text_type(sub).lower()] = sub return self._mod_subs
Return a cached dictionary of the user's moderated reddits. This list is used internally. Consider using the `get_my_moderation` function instead.
def _str_to_int(self, string): string = string.lower() if string.endswith("l"): string = string[:-1] if string.lower().startswith("0x"): match = re.match(r'0[xX]([a-fA-F0-9]+)', string) return int(match.group(1), 0x10) else: return int(string)
Check for the hex
def release(self, *args, **kwargs): if not self.field.lockable: return if self.sub_lock_mode: return super(FieldLock, self).release(*args, **kwargs) self.already_locked_by_model = self.sub_lock_mode = False
Really release the lock only if it's not a sub-lock. Then save the sub-lock status and mark the model as unlocked.
def unique_email_validator(form, field): user_manager = current_app.user_manager if not user_manager.email_is_available(field.data): raise ValidationError(_('This Email is already in use. Please try another one.'))
Username must be unique. This validator may NOT be customized.
def get_value(self, column=0, row=0): x = self._widget.item(row, column) if x==None: return x else: return str(self._widget.item(row,column).text())
Returns a the value at column, row.
def resolution(self, indicator=None): self._request_entity = 'dnsResolution' self._request_uri = '{}/dnsResolutions'.format(self._request_uri) if indicator is not None: self._request_uri = '{}/{}/dnsResolutions'.format(self._api_uri, indicator)
Update the URI to retrieve host resolutions for the provided indicator. Args: indicator (string): The indicator to retrieve resolutions.
def create_api_client(api='BatchV1'): k8s_config.load_incluster_config() api_configuration = client.Configuration() api_configuration.verify_ssl = False if api == 'extensions/v1beta1': api_client = client.ExtensionsV1beta1Api() elif api == 'CoreV1': api_client = client.CoreV1Api() elif api == 'StorageV1': api_client = client.StorageV1Api() else: api_client = client.BatchV1Api() return api_client
Create Kubernetes API client using config. :param api: String which represents which Kubernetes API to spawn. By default BatchV1. :returns: Kubernetes python client object for a specific API i.e. BatchV1.
def insert_tree(self, items, node, headers): first = items[0] child = node.get_child(first) if child is not None: child.count += 1 else: child = node.add_child(first) if headers[first] is None: headers[first] = child else: current = headers[first] while current.link is not None: current = current.link current.link = child remaining_items = items[1:] if len(remaining_items) > 0: self.insert_tree(remaining_items, child, headers)
Recursively grow FP tree.
def stop_listener(self): if self.sock is not None: self.sock.close() self.sock = None self.tracks = {}
stop listening for packets
def list_cert_bindings(site): ret = dict() sites = list_sites() if site not in sites: log.warning('Site not found: %s', site) return ret for binding in sites[site]['bindings']: if sites[site]['bindings'][binding]['certificatehash']: ret[binding] = sites[site]['bindings'][binding] if not ret: log.warning('No certificate bindings found for site: %s', site) return ret
List certificate bindings for an IIS site. .. versionadded:: 2016.11.0 Args: site (str): The IIS site name. Returns: dict: A dictionary of the binding names and properties. CLI Example: .. code-block:: bash salt '*' win_iis.list_bindings site
def trips(self, val): self._trips = val if val is not None and not val.empty: self._trips_i = self._trips.set_index("trip_id") else: self._trips_i = None
Update ``self._trips_i`` if ``self.trips`` changes.
def connected_component(self, ident): ident = normalize_ident(ident) done = set() todo = set([ident]) labels = set() while todo: ident = todo.pop() done.add(ident) for label in self.directly_connected(ident): if label.value != CorefValue.Positive: continue ident1, ident2 = idents_from_label( label, subtopic=ident_has_subtopic(ident)) if ident1 not in done: todo.add(ident1) if ident2 not in done: todo.add(ident2) if label not in labels: labels.add(label) yield label
Return a connected component generator for ``ident``. ``ident`` may be a ``content_id`` or a ``(content_id, subtopic_id)``. Given an ``ident``, return the corresponding connected component by following all positive transitivity relationships. For example, if ``(a, b, 1)`` is a label and ``(b, c, 1)`` is a label, then ``connected_component('a')`` will return both labels even though ``a`` and ``c`` are not directly connected. (Note that even though this returns a generator, it will still consume memory proportional to the number of labels in the connected component.) :param ident: content id or (content id and subtopic id) :type ident: ``str`` or ``(str, str)`` :rtype: generator of :class:`Label`
def put(self, key, value): value = self.serializedValue(value) self.child_datastore.put(key, value)
Stores the object `value` named by `key`. Serializes values on the way in, and stores the serialized data into the ``child_datastore``. Args: key: Key naming `value` value: the object to store.
def __execute_kadmin(cmd): ret = {} auth_keytab = __opts__.get('auth_keytab', None) auth_principal = __opts__.get('auth_principal', None) if __salt__['file.file_exists'](auth_keytab) and auth_principal: return __salt__['cmd.run_all']( 'kadmin -k -t {0} -p {1} -q "{2}"'.format( auth_keytab, auth_principal, cmd ) ) else: log.error('Unable to find kerberos keytab/principal') ret['retcode'] = 1 ret['comment'] = 'Missing authentication keytab/principal' return ret
Execute kadmin commands
def allZero(buffer): allZero = True for byte in buffer: if byte != "\x00": allZero = False break return allZero
Tries to determine if a buffer is empty. @type buffer: str @param buffer: Buffer to test if it is empty. @rtype: bool @return: C{True} if the given buffer is empty, i.e. full of zeros, C{False} if it doesn't.
def sY(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: return Pauli.sigma(qubit, 'Y', coefficient)
Return the Pauli sigma_Y operator acting on the given qubit
def unq_argument(self) -> str: start = self.offset self.dfa([ { "": lambda: 0, ";": lambda: -1, " ": lambda: -1, "\t": lambda: -1, "\r": lambda: -1, "\n": lambda: -1, "{": lambda: -1, '/': lambda: 1 }, { "": lambda: 0, "/": self._back_break, "*": self._back_break }]) self._arg = self.input[start:self.offset]
Parse unquoted argument. Raises: EndOfInput: If past the end of input.
def to_dict(self, delimiter=DEFAULT_DELIMITER, dict_type=collections.OrderedDict): root_key = self.sections()[0] return self._build_dict( self._sections, delimiter=delimiter, dict_type=dict_type ).get(root_key, {})
Get the dictionary representation of the current parser. :param str delimiter: The delimiter used for nested dictionaries, defaults to ":", optional :param class dict_type: The dictionary type to use for building the dictionary reperesentation, defaults to collections.OrderedDict, optional :return: The dictionary representation of the parser instance :rtype: dict
def get_repositories_by_query(self, repository_query): if self._catalog_session is not None: return self._catalog_session.get_catalogs_by_query(repository_query) query_terms = dict(repository_query._query_terms) collection = JSONClientValidated('repository', collection='Repository', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) return objects.RepositoryList(result, runtime=self._runtime)
Gets a list of ``Repositories`` matching the given repository query. arg: repository_query (osid.repository.RepositoryQuery): the repository query return: (osid.repository.RepositoryList) - the returned ``RepositoryList`` raise: NullArgument - ``repository_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``repository_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
def add_eval(self, agent, e, fr=None): self._evals[agent.name] = e self._framings[agent.name] = fr
Add or change agent's evaluation of the artifact with given framing information. :param agent: Name of the agent which did the evaluation. :param float e: Evaluation for the artifact. :param object fr: Framing information for the evaluation.
def _check_connectivity(self, err): try: import requests requests.get(self.uptime_ssl) except: from requests import Request request_object = Request(method='GET', url=self.uptime_ssl) request_details = self.handle_requests(request_object) self.printer('ERROR.') raise ConnectionError(request_details['error']) self.printer('ERROR.') raise err
a method to check connectivity as source of error
def _getSyntaxBySourceFileName(self, name): for regExp, xmlFileName in self._extensionToXmlFileName.items(): if regExp.match(name): return self._getSyntaxByXmlFileName(xmlFileName) else: raise KeyError("No syntax for " + name)
Get syntax by source name of file, which is going to be highlighted
def numericshape(self): try: numericshape = [self.subseqs.seqs.model.numconsts.nmb_stages] except AttributeError: objecttools.augment_excmessage( 'The `numericshape` of a sequence like `%s` depends on the ' 'configuration of the actual integration algorithm. ' 'While trying to query the required configuration data ' '`nmb_stages` of the model associated with element `%s`' % (self.name, objecttools.devicename(self))) numericshape.extend(self.shape) return tuple(numericshape)
Shape of the array of temporary values required for the numerical solver actually being selected.
def _geolocation_extract(response): body = response.json() if response.status_code in (200, 404): return body try: error = body["error"]["errors"][0]["reason"] except KeyError: error = None if response.status_code == 403: raise exceptions._OverQueryLimit(response.status_code, error) else: raise exceptions.ApiError(response.status_code, error)
Mimics the exception handling logic in ``client._get_body``, but for geolocation which uses a different response format.
def nominations(self, congress=CURRENT_CONGRESS): "Return votes on nominations from a given Congress" path = "{congress}/nominations.json".format(congress=congress) return self.fetch(path)
Return votes on nominations from a given Congress
def greedy(problem, graph_search=False, viewer=None): return _search(problem, BoundedPriorityQueue(), graph_search=graph_search, node_factory=SearchNodeHeuristicOrdered, graph_replace_when_better=True, viewer=viewer)
Greedy search. If graph_search=True, will avoid exploring repeated states. Requires: SearchProblem.actions, SearchProblem.result, SearchProblem.is_goal, SearchProblem.cost, and SearchProblem.heuristic.
def dicom_to_nifti(dicom_input, output_file=None): assert common.is_philips(dicom_input) if common.is_multiframe_dicom(dicom_input): _assert_explicit_vr(dicom_input) logger.info('Found multiframe dicom') if _is_multiframe_4d(dicom_input): logger.info('Found sequence type: MULTIFRAME 4D') return _multiframe_to_nifti(dicom_input, output_file) if _is_multiframe_anatomical(dicom_input): logger.info('Found sequence type: MULTIFRAME ANATOMICAL') return _multiframe_to_nifti(dicom_input, output_file) else: logger.info('Found singleframe dicom') grouped_dicoms = _get_grouped_dicoms(dicom_input) if _is_singleframe_4d(dicom_input): logger.info('Found sequence type: SINGLEFRAME 4D') return _singleframe_to_nifti(grouped_dicoms, output_file) logger.info('Assuming anatomical data') return convert_generic.dicom_to_nifti(dicom_input, output_file)
This is the main dicom to nifti conversion fuction for philips images. As input philips images are required. It will then determine the type of images and do the correct conversion Examples: See unit test :param output_file: file path to the output nifti :param dicom_input: directory with dicom files for 1 scan
def lambda_handler(self): def wrapper(event, context): skill = CustomSkill(skill_configuration=self.skill_configuration) request_envelope = skill.serializer.deserialize( payload=json.dumps(event), obj_type=RequestEnvelope) response_envelope = skill.invoke( request_envelope=request_envelope, context=context) return skill.serializer.serialize(response_envelope) return wrapper
Create a handler function that can be used as handler in AWS Lambda console. The lambda handler provides a handler function, that acts as an entry point to the AWS Lambda console. Users can set the lambda_handler output to a variable and set the variable as AWS Lambda Handler on the console. :return: Handler function to tag on AWS Lambda console.
def prune_by_ngram(self, ngrams): self._logger.info('Pruning results by n-gram') self._matches = self._matches[ ~self._matches[constants.NGRAM_FIELDNAME].isin(ngrams)]
Removes results rows whose n-gram is in `ngrams`. :param ngrams: n-grams to remove :type ngrams: `list` of `str`
def projector_functions(self): projector_functions = OrderedDict() for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"): state = attrib["state"] projector_functions[state] = RadialFunction(mesh, values) return projector_functions
Dictionary with the PAW projectors indexed by state.
def _prevent_default_initializer_splitting(self, item, indent_amt): if unicode(item) == '=': self._delete_whitespace() return if (not self._prev_item or not self._prev_prev_item or unicode(self._prev_item) != '='): return self._delete_whitespace() prev_prev_index = self._lines.index(self._prev_prev_item) if ( isinstance(self._lines[prev_prev_index - 1], self._Indent) or self.fits_on_current_line(item.size + 1) ): return if isinstance(self._lines[prev_prev_index - 1], self._Space): del self._lines[prev_prev_index - 1] self.add_line_break_at(self._lines.index(self._prev_prev_item), indent_amt)
Prevent splitting between a default initializer. When there is a default initializer, it's best to keep it all on the same line. It's nicer and more readable, even if it goes over the maximum allowable line length. This goes back along the current line to determine if we have a default initializer, and, if so, to remove extraneous whitespaces and add a line break/indent before it if needed.
def get_canonical_key_id(self, key_id): shard_num = self.get_shard_num_by_key_id(key_id) return self._canonical_keys[shard_num]
get_canonical_key_id is used by get_canonical_key, see the comment for that method for more explanation. Keyword arguments: key_id -- the key id (e.g. '12345') returns the canonical key id (e.g. '12')
def get_editor_cmd_from_environment(): result = os.getenv(ENV_VISUAL) if (not result): result = os.getenv(ENV_EDITOR) return result
Gets and editor command from environment variables. It first tries $VISUAL, then $EDITOR, following the same order git uses when it looks up edits. If neither is available, it returns None.
def delete_older(self): logger.info( "Deleting all mails strictly older than the {} timestamp..." "".format(self.newest_timestamp)) candidates = [ mail for mail in self.pool if mail.timestamp < self.newest_timestamp] if len(candidates) == self.size: logger.warning( "Skip deletion: all {} mails share the same timestamp." "".format(self.size)) logger.info( "{} candidates found for deletion.".format(len(candidates))) for mail in candidates: self.delete(mail)
Delete all older duplicates. Only keeps the subset sharing the most recent timestamp.
def get_object_or_404(queryset, *filter_args, **filter_kwargs): try: return _get_object_or_404(queryset, *filter_args, **filter_kwargs) except (TypeError, ValueError, ValidationError): raise Http404
Same as Django's standard shortcut, but make sure to also raise 404 if the filter_kwargs don't match the required types. This function was copied from rest_framework.generics because of issue #36.
def league_header(self, league): league_name = " {0} ".format(league) click.secho("{:=^62}".format(league_name), fg=self.colors.MISC) click.echo()
Prints the league header
def find_nir_file_with_missing_depth(video_file_list, depth_file_list): "Remove all files without its own counterpart. Returns new lists of files" new_video_list = [] new_depth_list = [] for fname in video_file_list: try: depth_file = Kinect.depth_file_for_nir_file(fname, depth_file_list) new_video_list.append(fname) new_depth_list.append(depth_file) except IndexError: pass bad_nir = [f for f in video_file_list if f not in new_video_list] bad_depth = [f for f in depth_file_list if f not in new_depth_list] return (new_video_list, new_depth_list, bad_nir, bad_depth)
Remove all files without its own counterpart. Returns new lists of files
def rc4(data, key): S, j, out = list(range(256)), 0, [] for i in range(256): j = (j + S[i] + ord(key[i % len(key)])) % 256 S[i], S[j] = S[j], S[i] i = j = 0 for ch in data: i = (i + 1) % 256 j = (j + S[i]) % 256 S[i], S[j] = S[j], S[i] out.append(chr(ord(ch) ^ S[(S[i] + S[j]) % 256])) return "".join(out)
RC4 encryption and decryption method.
def remove_link(self, rel, value=None, href=None): links_node = self.metadata.find('links') if links_node is None: log.warning('No links node present') return False counter = 0 links = links_node.xpath('.//link[@rel="{}"]'.format(rel)) for link in links: if value and href: if link.text == value and link.attrib['href'] == href: links_node.remove(link) counter += 1 elif value and not href: if link.text == value: links_node.remove(link) counter += 1 elif not value and href: if link.attrib['href'] == href: links_node.remove(link) counter += 1 else: links_node.remove(link) counter += 1 return counter
Removes link nodes based on the function arguments. This can remove link nodes based on the following combinations of arguments: link/@rel link/@rel & link/text() link/@rel & link/@href link/@rel & link/text() & link/@href :param rel: link/@rel value to remove. Required. :param value: link/text() value to remove. This is used in conjunction with link/@rel. :param href: link/@href value to remove. This is used in conjunction with link/@rel. :return: Return the number of link nodes removed, or False if no nodes are removed.
def first_rec(ofile, Rec, file_type): keylist = [] opened = False while not opened: try: pmag_out = open(ofile, 'w') opened = True except IOError: time.sleep(1) outstring = "tab \t" + file_type + "\n" pmag_out.write(outstring) keystring = "" for key in list(Rec.keys()): keystring = keystring + '\t' + key.strip() keylist.append(key) keystring = keystring + '\n' pmag_out.write(keystring[1:]) pmag_out.close() return keylist
opens the file ofile as a magic template file with headers as the keys to Rec
def from_shapefile(cls, shapefile, *args, **kwargs): reader = Reader(shapefile) return cls.from_records(reader.records(), *args, **kwargs)
Loads a shapefile from disk and optionally merges it with a dataset. See ``from_records`` for full signature. Parameters ---------- records: list of cartopy.io.shapereader.Record Iterator containing Records. dataset: holoviews.Dataset Any HoloViews Dataset type. on: str or list or dict A mapping between the attribute names in the records and the dimensions in the dataset. value: str The value dimension in the dataset the values will be drawn from. index: str or list One or more dimensions in the dataset the Shapes will be indexed by. drop_missing: boolean Whether to drop shapes which are missing from the provides dataset. Returns ------- shapes: Polygons or Path object A Polygons or Path object containing the geometries
def positional(max_pos_args): __ndb_debug__ = 'SKIP' def positional_decorator(wrapped): if not DEBUG: return wrapped __ndb_debug__ = 'SKIP' @wrapping(wrapped) def positional_wrapper(*args, **kwds): __ndb_debug__ = 'SKIP' if len(args) > max_pos_args: plural_s = '' if max_pos_args != 1: plural_s = 's' raise TypeError( '%s() takes at most %d positional argument%s (%d given)' % (wrapped.__name__, max_pos_args, plural_s, len(args))) return wrapped(*args, **kwds) return positional_wrapper return positional_decorator
A decorator to declare that only the first N arguments may be positional. Note that for methods, n includes 'self'.
def user_to_request(handler): @wraps(handler) async def decorator(*args): request = _get_request(args) request[cfg.REQUEST_USER_KEY] = await get_cur_user(request) return await handler(*args) return decorator
Add user to request if user logged in
def duration(self): if self._stop_instant is None: return int((instant() - self._start_instant) * 1000) if self._duration is None: self._duration = int((self._stop_instant - self._start_instant) * 1000) return self._duration
Returns the integer value of the interval, the value is in milliseconds. If the interval has not had stop called yet, it will report the number of milliseconds in the interval up to the current point in time.
def cp_cropduster_image(self, the_image_path, del_after_upload=False, overwrite=False, invalidate=False): local_file = os.path.join(settings.MEDIA_ROOT, the_image_path) if os.path.exists(local_file): the_image_crops_path = os.path.splitext(the_image_path)[0] the_image_crops_path_full_path = os.path.join(settings.MEDIA_ROOT, the_image_crops_path) self.cp(local_path=local_file, target_path=os.path.join(settings.S3_ROOT_BASE, the_image_path), del_after_upload=del_after_upload, overwrite=overwrite, invalidate=invalidate, ) self.cp(local_path=the_image_crops_path_full_path + "/*", target_path=os.path.join(settings.S3_ROOT_BASE, the_image_crops_path), del_after_upload=del_after_upload, overwrite=overwrite, invalidate=invalidate, )
Deal with saving cropduster images to S3. Cropduster is a Django library for resizing editorial images. S3utils was originally written to put cropduster images on S3 bucket. Extra Items in your Django Settings ----------------------------------- MEDIA_ROOT : string Django media root. Currently it is ONLY used in cp_cropduster_image method. NOT any other method as this library was originally made to put Django cropduster images on s3 bucket. S3_ROOT_BASE : string S3 media root base. This will be the root folder in S3. Currently it is ONLY used in cp_cropduster_image method. NOT any other method as this library was originally made to put Django cropduster images on s3 bucket.
def get_requests_session(): session = requests.sessions.Session() session.mount('http://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True)) session.mount('https://', HTTPAdapter(pool_connections=25, pool_maxsize=25, pool_block=True)) return session
Set connection pool maxsize and block value to avoid `connection pool full` warnings. :return: requests session
def get_stats(self): sent = ctypes.c_uint32() recv = ctypes.c_uint32() send_errors = ctypes.c_uint32() recv_errors = ctypes.c_uint32() result = self.library.Par_GetStats(self.pointer, ctypes.byref(sent), ctypes.byref(recv), ctypes.byref(send_errors), ctypes.byref(recv_errors)) check_error(result, "partner") return sent, recv, send_errors, recv_errors
Returns some statistics. :returns: a tuple containing bytes send, received, send errors, recv errors
def detect_keep_boundary(start, end, namespaces): result_start, result_end = False, False parent_start = start.getparent() parent_end = end.getparent() if parent_start.tag == "{%s}p" % namespaces['text']: result_start = len(parent_start.getchildren()) > 1 if parent_end.tag == "{%s}p" % namespaces['text']: result_end = len(parent_end.getchildren()) > 1 return result_start, result_end
a helper to inspect a link and see if we should keep the link boundary
async def add_items(self, *items): items = [item.id for item in await self.process(items)] if not items: return await self.connector.post('Playlists/{Id}/Items'.format(Id=self.id), data={'Ids': ','.join(items)}, remote=False )
append items to the playlist |coro| Parameters ---------- items : array_like list of items to add(or their ids) See Also -------- remove_items :
def asarray(self, file=None, out=None, **kwargs): if file is not None: if isinstance(file, int): return self.imread(self.files[file], **kwargs) return self.imread(file, **kwargs) im = self.imread(self.files[0], **kwargs) shape = self.shape + im.shape result = create_output(out, shape, dtype=im.dtype) result = result.reshape(-1, *im.shape) for index, fname in zip(self._indices, self.files): index = [i-j for i, j in zip(index, self._startindex)] index = numpy.ravel_multi_index(index, self.shape) im = self.imread(fname, **kwargs) result[index] = im result.shape = shape return result
Read image data from files and return as numpy array. The kwargs parameters are passed to the imread function. Raise IndexError or ValueError if image shapes do not match.
def safe_url(url): parsed = urlparse(url) if parsed.password is not None: pwd = ':%s@' % parsed.password url = url.replace(pwd, ':*****@') return url
Remove password from printed connection URLs.
def _make_graphite_api_points_list(influxdb_data): _data = {} for key in influxdb_data.keys(): _data[key[0]] = [(datetime.datetime.fromtimestamp(float(d['time'])), d['value']) for d in influxdb_data.get_points(key[0])] return _data
Make graphite-api data points dictionary from Influxdb ResultSet data
def tpictr(sample, lenout=_default_len_out, lenerr=_default_len_out): sample = stypes.stringToCharP(sample) pictur = stypes.stringToCharP(lenout) errmsg = stypes.stringToCharP(lenerr) lenout = ctypes.c_int(lenout) lenerr = ctypes.c_int(lenerr) ok = ctypes.c_int() libspice.tpictr_c(sample, lenout, lenerr, pictur, ctypes.byref(ok), errmsg) return stypes.toPythonString(pictur), ok.value, stypes.toPythonString( errmsg)
Given a sample time string, create a time format picture suitable for use by the routine timout. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tpictr_c.html :param sample: A sample time string. :type sample: str :param lenout: The length for the output picture string. :type lenout: int :param lenerr: The length for the output error string. :type lenerr: int :return: A format picture that describes sample, Flag indicating whether sample parsed successfully, Diagnostic returned if sample cannot be parsed :rtype: tuple
def build_diagonals(self): self.l2 = np.roll(self.l2, -2) self.l1 = np.roll(self.l1, -1) self.r1 = np.roll(self.r1, 1) self.r2 = np.roll(self.r2, 2) if self.coeff_matrix is not None: pass elif self.BC_E == 'Periodic' and self.BC_W == 'Periodic': pass else: self.diags = np.vstack((self.l2,self.l1,self.c0,self.r1,self.r2)) self.offsets = np.array([-2,-1,0,1,2]) self.coeff_matrix = spdiags(self.diags, self.offsets, self.nx, self.nx, format='csr')
Builds the diagonals for the coefficient array
def process_jpeg_bytes(bytes_in, quality=DEFAULT_JPEG_QUALITY): bytes_out_p = ffi.new("char**") bytes_out_p_gc = ffi.gc(bytes_out_p, lib.guetzli_free_bytes) length = lib.guetzli_process_jpeg_bytes( bytes_in, len(bytes_in), bytes_out_p_gc, quality ) if length == 0: raise ValueError("Invalid JPEG: Guetzli was not able to decode the image") bytes_out = ffi.cast("char*", bytes_out_p_gc[0]) return ffi.unpack(bytes_out, length)
Generates an optimized JPEG from JPEG-encoded bytes. :param bytes_in: the input image's bytes :param quality: the output JPEG quality (default 95) :returns: Optimized JPEG bytes :rtype: bytes :raises ValueError: Guetzli was not able to decode the image (the image is probably corrupted or is not a JPEG) .. code:: python import pyguetzli input_jpeg_bytes = open("./test/image.jpg", "rb").read() optimized_jpeg = pyguetzli.process_jpeg_bytes(input_jpeg_bytes)
def save(self, model, joining=None, touch=True): if joining is None: joining = {} model.save({'touch': False}) self.attach(model.get_key(), joining, touch) return model
Save a new model and attach it to the parent model. :type model: eloquent.Model :type joining: dict :type touch: bool :rtype: eloquent.Model
def load_defaults(self): extract_files = [ self.config.settings.user.extract, self.config.settings.system.extract, ] for extract_file in extract_files: if extract_file: try: self.load_from_file(extract_file) except KeyboardInterrupt as e: raise e except Exception as e: if binwalk.core.common.DEBUG: raise Exception("Extractor.load_defaults failed to load file '%s': %s" % (extract_file, str(e)))
Loads default extraction rules from the user and system extract.conf files. Returns None.
def open_consolidated(store, metadata_key='.zmetadata', mode='r+', **kwargs): from .storage import ConsolidatedMetadataStore store = normalize_store_arg(store) if mode not in {'r', 'r+'}: raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}" .format(mode)) meta_store = ConsolidatedMetadataStore(store, metadata_key=metadata_key) return open(store=meta_store, chunk_store=store, mode=mode, **kwargs)
Open group using metadata previously consolidated into a single key. This is an optimised method for opening a Zarr group, where instead of traversing the group/array hierarchy by accessing the metadata keys at each level, a single key contains all of the metadata for everything. For remote data sources where the overhead of accessing a key is large compared to the time to read data. The group accessed must have already had its metadata consolidated into a single key using the function :func:`consolidate_metadata`. This optimised method only works in modes which do not change the metadata, although the data may still be written/updated. Parameters ---------- store : MutableMapping or string Store or path to directory in file system or name of zip file. metadata_key : str Key to read the consolidated metadata from. The default (.zmetadata) corresponds to the default used by :func:`consolidate_metadata`. mode : {'r', 'r+'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist) although only writes to data are allowed, changes to metadata including creation of new arrays or group are not allowed. **kwargs Additional parameters are passed through to :func:`zarr.creation.open_array` or :func:`zarr.hierarchy.open_group`. Returns ------- g : :class:`zarr.hierarchy.Group` Group instance, opened with the consolidated metadata. See Also -------- consolidate_metadata
def run(input, conf, filepath=None): if conf.is_file_ignored(filepath): return () if isinstance(input, (type(b''), type(u''))): return _run(input, conf, filepath) elif hasattr(input, 'read'): content = input.read() return _run(content, conf, filepath) else: raise TypeError('input should be a string or a stream')
Lints a YAML source. Returns a generator of LintProblem objects. :param input: buffer, string or stream to read from :param conf: yamllint configuration object
def collapse_pair(graph, survivor: BaseEntity, victim: BaseEntity) -> None: graph.add_edges_from( (survivor, successor, key, data) for _, successor, key, data in graph.out_edges(victim, keys=True, data=True) if successor != survivor ) graph.add_edges_from( (predecessor, survivor, key, data) for predecessor, _, key, data in graph.in_edges(victim, keys=True, data=True) if predecessor != survivor ) graph.remove_node(victim)
Rewire all edges from the synonymous node to the survivor node, then deletes the synonymous node. Does not keep edges between the two nodes. :param pybel.BELGraph graph: A BEL graph :param survivor: The BEL node to collapse all edges on the synonym to :param victim: The BEL node to collapse into the surviving node
def from_content(cls, content): parsed_content = parse_tibiacom_content(content) tables = cls._parse_tables(parsed_content) char = Character() if "Could not find character" in tables.keys(): return None if "Character Information" in tables.keys(): char._parse_character_information(tables["Character Information"]) else: raise InvalidContent("content does not contain a tibia.com character information page.") char._parse_achievements(tables.get("Account Achievements", [])) char._parse_deaths(tables.get("Character Deaths", [])) char._parse_account_information(tables.get("Account Information", [])) char._parse_other_characters(tables.get("Characters", [])) return char
Creates an instance of the class from the html content of the character's page. Parameters ---------- content: :class:`str` The HTML content of the page. Returns ------- :class:`Character` The character contained in the page, or None if the character doesn't exist Raises ------ InvalidContent If content is not the HTML of a character's page.
def out_format(data, out='nested', opts=None, **kwargs): if not opts: opts = __opts__ return salt.output.out_format(data, out, opts=opts, **kwargs)
Return the formatted outputter string for the Python object. data The JSON serializable object. out: ``nested`` The name of the output to use to transform the data. Default: ``nested``. opts Dictionary of configuration options. Default: ``__opts__``. kwargs Arguments to sent to the outputter module. CLI Example: .. code-block:: bash salt '*' out.out_format "{'key': 'value'}"
def _NTU_max_for_P_solver(data, R1): offset_max = data['offset'][-1] for offset, p, q in zip(data['offset'], data['p'], data['q']): if R1 < offset or offset == offset_max: x = R1 - offset return _horner(p, x)/_horner(q, x)
Private function to calculate the upper bound on the NTU1 value in the P-NTU method. This value is calculated via a pade approximation obtained on the result of a global minimizer which calculated the maximum P1 at a given R1 from ~1E-7 to approximately 100. This should suffice for engineering applications. This value is needed to bound the solver.
def clean_pred(self, pred, ignore_warning=False): original_pred = pred pred = pred.lower().strip() if 'http' in pred: pred = pred.split('/')[-1] elif ':' in pred: if pred[-1] != ':': pred = pred.split(':')[-1] else: if not ignore_warning: exit('Not a valid predicate: ' + original_pred + '. Needs to be an iri "/" or curie ":".') return pred
Takes the predicate and returns the suffix, lower case, stripped version
def _dirint_from_dni_ktprime(dni, kt_prime, solar_zenith, use_delta_kt_prime, temp_dew): times = dni.index delta_kt_prime = _delta_kt_prime_dirint(kt_prime, use_delta_kt_prime, times) w = _temp_dew_dirint(temp_dew, times) dirint_coeffs = _dirint_coeffs(times, kt_prime, solar_zenith, w, delta_kt_prime) dni_dirint = dni * dirint_coeffs return dni_dirint
Calculate DIRINT DNI from supplied DISC DNI and Kt'. Supports :py:func:`gti_dirint`
def create(self, **fields): entry = self.instance(**fields) entry.save() return entry
Create new entry.
def get_list(self, ids: List[str]) -> List[Account]: query = ( self.query .filter(Account.guid.in_(ids)) ) return query.all()
Loads accounts by the ids passed as an argument
def update(self, pointvol): if self.use_kdtree: kdtree = spatial.KDTree(self.live_u) else: kdtree = None if self.use_pool_update: pool = self.pool else: pool = None self.radfriends.update(self.live_u, pointvol=pointvol, rstate=self.rstate, bootstrap=self.bootstrap, pool=pool, kdtree=kdtree) if self.enlarge != 1.: self.radfriends.scale_to_vol(self.radfriends.vol_ball * self.enlarge) return copy.deepcopy(self.radfriends)
Update the N-sphere radii using the current set of live points.
def render(self, parts=None): if not parts: parts = self.parts fmt = [] data = [] for name, part_class in parts: if issubclass(part_class, Primitive): part = part_class(getattr(self, name, None)) else: part = getattr(self, name, None) part_format, part_data = part.render() fmt.extend(part_format) data.extend(part_data) return "".join(fmt), data
Returns a two-element tuple with the ``struct`` format and values. Iterates over the applicable sub-parts and calls `render()` on them, accumulating the format string and values. Optionally takes a subset of parts to render, default behavior is to render all sub-parts belonging to the class.
def update_attribute_value_items(self): for attr in self._attribute_iterator(): if attr.kind != RESOURCE_ATTRIBUTE_KINDS.COLLECTION: try: attr_val = self._get_proxied_attribute_value(attr) except AttributeError: continue else: yield (attr, attr_val)
Returns an iterator of items for an attribute value map to use for an UPDATE operation. The iterator ignores collection attributes as these are processed implicitly by the traversal algorithm. :returns: iterator yielding tuples with objects implementing :class:`everest.resources.interfaces.IResourceAttribute` as the first and the proxied attribute value as the second argument.
def bm3_v_single(p, v0, k0, k0p, p_ref=0.0, min_strain=0.01): if p <= 1.e-5: return v0 def f_diff(v, v0, k0, k0p, p, p_ref=0.0): return bm3_p(v, v0, k0, k0p, p_ref=p_ref) - p v = brenth(f_diff, v0, v0 * min_strain, args=(v0, k0, k0p, p, p_ref)) return v
find volume at given pressure using brenth in scipy.optimize this is for single p value, not vectorized this cannot handle uncertainties :param p: pressure :param v0: volume at reference conditions :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at different conditions :param p_ref: reference pressure (default = 0) :param min_strain: minimum strain value to find solution (default = 0.01) :return: volume at high pressure
def execute(self, task): try: return task.run() except Exception: if task.retries > 0: task.retries -= 1 task.to_retrying() if task.async: data = task.serialize() task.task_id = self.backend.push( self.queue_name, task.task_id, data ) else: return self.execute(task) else: raise
Given a task instance, this runs it. This includes handling retries & re-raising exceptions. Ex:: task = Task(async=False, retries=5) task.to_call(add, 101, 35) finished_task = gator.execute(task) :param task_id: The identifier of the task to process :type task_id: string :returns: The completed ``Task`` instance
def _WorkerCommand_environment(self): worker = self.workersArguments c = [] if worker.prolog: c.extend([ "source", worker.prolog, "&&", ]) if worker.pythonPath and not self.isLocal(): c.extend([ "env", "PYTHONPATH={0}:$PYTHONPATH".format(worker.pythonPath), ]) elif worker.pythonPath and self.isLocal(): c.extend([ "env", "PYTHONPATH={0}:{1}".format( worker.pythonPath, os.environ.get("PYTHONPATH", ""), ), ]) return c
Return list of shell commands to prepare the environment for bootstrap.
def eval(conn, string, strip_command=True, **kwargs): parser_args = {'strip_command': strip_command} return _run(conn, None, string, parser_args, **kwargs)
Compiles the given template and executes it on the given connection. Raises an exception if the compilation fails. if strip_command is True, the first line of each response that is received after any command sent by the template is stripped. For example, consider the following template:: ls -1{extract /(\S+)/ as filenames} {loop filenames as filename} touch $filename {end} If strip_command is False, the response, (and hence, the `filenames' variable) contains the following:: ls -1 myfile myfile2 [...] By setting strip_command to True, the first line is ommitted. :type conn: Exscript.protocols.Protocol :param conn: The connection on which to run the template. :type string: string :param string: The template to compile. :type strip_command: bool :param strip_command: Whether to strip the command echo from the response. :type kwargs: dict :param kwargs: Variables to define in the template. :rtype: dict :return: The variables that are defined after execution of the script.
def rlogistic(mu, tau, size=None): u = np.random.random(size) return mu + np.log(u / (1 - u)) / tau
Logistic random variates.
def register(self): user, created = self.Model.create_account( self._json_params) if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user pk_field = user.pk_field() headers = remember(self.request, getattr(user, pk_field)) return JHTTPOk('Registered', headers=headers)
Register new user by POSTing all required data.
def str_to_etree(xml_str, encoding='utf-8'): parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc.
def segments(self): segments = dict() for i in xrange(len(self)): image = self[i] for z, contour in image.as_segments.iteritems(): for byte_value, contour_set in contour.iteritems(): if byte_value not in segments: segments[byte_value] = dict() if z not in segments[byte_value]: segments[byte_value][z] = contour_set else: segments[byte_value][z] += contour_set return segments
A dictionary of lists of contours keyed by z-index
def genpass(pattern=r'[\w]{32}'): try: return rstr.xeger(pattern) except re.error as e: raise ValueError(str(e))
generates a password with random chararcters
def bake(self): self._yamllint_command = sh.yamllint.bake( self.options, self._tests, _env=self.env, _out=LOG.out, _err=LOG.error)
Bake a `yamllint` command so it's ready to execute and returns None. :return: None