code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def unmarshal_props(self, fp, cls=None, id=None): if isinstance(fp, str) or isinstance(fp, unicode): doc = parseString(fp) else: doc = parse(fp) return self.get_props_from_doc(cls, id, doc)
Same as unmarshalling an object, except it returns from "get_props_from_doc"
def find_module(self, modname, folder=None): for src in self.get_source_folders(): module = _find_module_in_folder(src, modname) if module is not None: return module for src in self.get_python_path_folders(): module = _find_module_in_folder(src, modname) if module is not None: return module if folder is not None: module = _find_module_in_folder(folder, modname) if module is not None: return module return None
Returns a resource corresponding to the given module returns None if it can not be found
def get_all_classes(module_name): module = importlib.import_module(module_name) return getmembers(module, lambda m: isclass(m) and not isabstract(m))
Load all non-abstract classes from package
def kelvin_to_rgb(kelvin): temp = kelvin / 100.0 if temp <= 66: red = 255 else: red = 329.698727446 * ((temp - 60) ** -0.1332047592) if temp <= 66: green = 99.4708025861 * math.log(temp) - 161.1195681661 else: green = 288.1221695283 * ((temp - 60) ** -0.0755148492) if temp > 66: blue = 255 elif temp <= 19: blue = 0 else: blue = 138.5177312231 * math.log(temp - 10) - 305.0447927307 return tuple(correct_output(c) for c in (red, green, blue))
Convert a color temperature given in kelvin to an approximate RGB value. :param kelvin: Color temp in K :return: Tuple of (r, g, b), equivalent color for the temperature
def ports(self): with self._mutex: if not self._ports: self._ports = [ports.parse_port(port, self) \ for port in self._obj.get_ports()] return self._ports
The list of all ports belonging to this component.
def after_match(self, func, full_fallback=False): iterator = iter(self) for item in iterator: if func(item): return iterator if full_fallback: iterator = iter(self) return iterator
Returns an iterator for all the elements after the first match. If `full_fallback` is `True`, it will return all the messages if the function never matched.
def get_evaluation_parameter(self, parameter_name, default_value=None): if "evaluation_parameters" in self._expectations_config and \ parameter_name in self._expectations_config['evaluation_parameters']: return self._expectations_config['evaluation_parameters'][parameter_name] else: return default_value
Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation parameter.
def get_cached(self, link, default=None): if hasattr(link, 'uri'): return self.id_map.get(link.uri, default) else: return self.id_map.get(link, default)
Retrieves a cached navigator from the id_map. Either a Link object or a bare uri string may be passed in.
def jtype(c): ct = c['type'] return ct if ct != 'literal' else '{}, {}'.format(ct, c.get('xml:lang'))
Return the a string with the data type of a value, for JSON data
def retryable(retryer=retry_ex, times=3, cap=120000): def _retryable(func): @f.wraps(func) def wrapper(*args, **kwargs): return retryer(lambda: func(*args, **kwargs), times, cap) return wrapper return _retryable
A decorator to make a function retry. By default the retry occurs when an exception is thrown, but this may be changed by modifying the ``retryer`` argument. See also :py:func:`retry_ex` and :py:func:`retry_bool`. By default :py:func:`retry_ex` is used as the retry function. Note that the decorator must be called even if not given keyword arguments. :param function retryer: A function to handle retries :param int times: Number of times to retry on initial failure :param int cap: Maximum wait time in milliseconds :Example: :: @retryable() def can_fail(): .... @retryable(retryer=retry_bool, times=10) def can_fail_bool(): ....
def CheckProg(context, prog_name): res = SCons.Conftest.CheckProg(context, prog_name) context.did_show_result = 1 return res
Simple check if a program exists in the path. Returns the path for the application, or None if not found.
def _inner_search(obj, glob, separator, dirs=True, leaves=False): for path in dpath.path.paths(obj, dirs, leaves, skip=True): if dpath.path.match(path, glob): yield path
Search the object paths that match the glob.
def disable_cert_validation(): current_context = ssl._create_default_https_context ssl._create_default_https_context = ssl._create_unverified_context try: yield finally: ssl._create_default_https_context = current_context
Context manager to temporarily disable certificate validation in the standard SSL library. Note: This should not be used in production code but is sometimes useful for troubleshooting certificate validation issues. By design, the standard SSL library does not provide a way to disable verification of the server side certificate. However, a patch to disable validation is described by the library developers. This context manager allows applying the patch for specific sections of code.
def solve(self): board = Board(self.length, self.height) permutations = Permutations(self.pieces, self.vector_size) for positions in permutations: board.reset() for level, (piece_uid, linear_position) in enumerate(positions): try: board.add(piece_uid, linear_position) except (OccupiedPosition, VulnerablePosition, AttackablePiece): permutations.skip_branch(level) break else: self.result_counter += 1 yield board
Solve all possible positions of pieces within the context. Depth-first, tree-traversal of the product space.
def delete_servers(*servers, **options): test = options.pop('test', False) commit = options.pop('commit', True) return __salt__['net.load_template']('delete_ntp_servers', servers=servers, test=test, commit=commit, inherit_napalm_device=napalm_device)
Removes NTP servers configured on the device. :param servers: list of IP Addresses/Domain Names to be removed as NTP servers :param test (bool): discard loaded config. By default ``test`` is False (will not dicard the changes) :param commit (bool): commit loaded config. By default ``commit`` is True (will commit the changes). Useful when the user does not want to commit after each change, but after a couple. By default this function will commit the config changes (if any). To load without committing, use the ``commit`` option. For dry run use the ``test`` argument. CLI Example: .. code-block:: bash salt '*' ntp.delete_servers 8.8.8.8 time.apple.com salt '*' ntp.delete_servers 172.17.17.1 test=True # only displays the diff salt '*' ntp.delete_servers 192.168.0.1 commit=False # preserves the changes, but does not commit
def handle(self): self.output = PyStratumStyle(self.input, self.output) command = self.get_application().find('constants') ret = command.execute(self.input, self.output) if ret: return ret command = self.get_application().find('loader') ret = command.execute(self.input, self.output) if ret: return ret command = self.get_application().find('wrapper') ret = command.execute(self.input, self.output) self.output.writeln('') return ret
Executes the actual Stratum program.
def push_rule(self, name): if name in self._name: raise PolicyException( "Rule recursion detected; invocation chain: %s -> %s" % (' -> '.join(self._name), name)) self._name.append(name) self._pc.append(0) self._step.append(1) try: yield except Exception as exc: exc_info = sys.exc_info() if not self.reported: log = logging.getLogger('policies') log.warn("Exception raised while evaluating rule %r: %s" % (name, exc)) self.reported = True six.reraise(*exc_info) finally: self._name.pop() self._pc.pop() self._step.pop()
Allow one rule to be evaluated in the context of another. This allows keeping track of the rule names during nested rule evaluation. :param name: The name of the nested rule to be evaluated. :returns: A context manager, suitable for use with the ``with`` statement. No value is generated.
def has_axis(self, axis): if self.type != EventType.POINTER_AXIS: raise AttributeError(_wrong_meth.format(self.type)) return self._libinput.libinput_event_pointer_has_axis( self._handle, axis)
Check if the event has a valid value for the given axis. If this method returns True for an axis and :meth:`get_axis_value` returns a value of 0, the event is a scroll stop event. For pointer events that are not of type :attr:`~libinput.constant.EventType.POINTER_AXIS`, this method raises :exc:`AttributeError`. Args: axis (~libinput.constant.PointerAxis): The axis to check. Returns: bool: True if this event contains a value for this axis. Raises: AttributeError
def get_local_admins(): admin_list = get_users_config() response = [] if 'users' not in admin_list['result']: return response if isinstance(admin_list['result']['users']['entry'], list): for entry in admin_list['result']['users']['entry']: response.append(entry['name']) else: response.append(admin_list['result']['users']['entry']['name']) return response
Show all local administrator accounts. CLI Example: .. code-block:: bash salt '*' panos.get_local_admins
def update_director(self, service_id, version_number, name_key, **kwargs): body = self._formdata(kwargs, FastlyDirector.FIELDS) content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, name_key), method="PUT", body=body) return FastlyDirector(self, content)
Update the director for a particular service and version.
def is_not_null_predicate( raw_crash, dumps, processed_crash, processor, key='' ): try: return bool(raw_crash[key]) except KeyError: return False
a predicate that converts the key'd source to boolean. parameters: raw_crash - dict dumps - placeholder in a fat interface - unused processed_crash - placeholder in a fat interface - unused processor - placeholder in a fat interface - unused
def make_label(var_name, selection, position="below"): if selection: sel = selection_to_string(selection) if position == "below": sep = "\n" elif position == "beside": sep = " " else: sep = sel = "" return "{}{}{}".format(var_name, sep, sel)
Consistent labelling for plots. Parameters ---------- var_name : str Name of the variable selection : dict[Any] -> Any Coordinates of the variable position : whether to position the coordinates' label "below" (default) or "beside" the name of the variable Returns ------- label A text representation of the label
def mapPartitions(self, f, preservesPartitioning=False): return ( self .mapPartitionsWithIndex(lambda i, p: f(p), preservesPartitioning) .transform(lambda rdd: rdd.setName('{}:{}'.format(rdd.prev.name(), f))) )
Map partitions. :param f: mapping function :rtype: DStream
def next_blob(self): blob_file = self.blob_file try: preamble = DAQPreamble(file_obj=blob_file) except struct.error: raise StopIteration try: data_type = DATA_TYPES[preamble.data_type] except KeyError: log.error("Unkown datatype: {0}".format(preamble.data_type)) data_type = 'Unknown' blob = Blob() blob[data_type] = None blob['DAQPreamble'] = preamble if data_type == 'DAQSummaryslice': daq_frame = DAQSummaryslice(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header elif data_type == 'DAQEvent': daq_frame = DAQEvent(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header else: log.warning( "Skipping DAQ frame with data type code '{0}'.".format( preamble.data_type ) ) blob_file.seek(preamble.length - DAQPreamble.size, 1) return blob
Get the next frame from file
def mode_readable(self): ret = "" mode = self._raw_mode if mode.MANUAL: ret = "manual" if self.target_temperature < self.min_temp: ret += " off" elif self.target_temperature >= self.max_temp: ret += " on" else: ret += " (%sC)" % self.target_temperature else: ret = "auto" if mode.AWAY: ret += " holiday" if mode.BOOST: ret += " boost" if mode.DST: ret += " dst" if mode.WINDOW: ret += " window" if mode.LOCKED: ret += " locked" if mode.LOW_BATTERY: ret += " low battery" return ret
Return a readable representation of the mode..
def get_utc_timestamp(self, handle): fpath = self._fpath_from_handle(handle) datetime_obj = datetime.datetime.utcfromtimestamp( os.stat(fpath).st_mtime ) return timestamp(datetime_obj)
Return the UTC timestamp.
def get_objective_banks(self): catalogs = self._get_provider_session('objective_bank_lookup_session').get_objective_banks() cat_list = [] for cat in catalogs: cat_list.append(ObjectiveBank(self._provider_manager, cat, self._runtime, self._proxy)) return ObjectiveBankList(cat_list)
Pass through to provider ObjectiveBankLookupSession.get_objective_banks
def similarity(self, other: Trigram) -> float: return max((self._match(x, other) for x in self.trigrams), default=0)
Find the best similarity within known trigrams.
def iterate(self, resource_type=None): for logicalId, resource_dict in self.resources.items(): resource = SamResource(resource_dict) needs_filter = resource.valid() if resource_type: needs_filter = needs_filter and resource.type == resource_type if needs_filter: yield logicalId, resource
Iterate over all resources within the SAM template, optionally filtering by type :param string resource_type: Optional type to filter the resources by :yields (string, SamResource): Tuple containing LogicalId and the resource
def add_whitespace_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData: command = data.statement.command command_pattern = re.compile(r'^([^\s\d]+)(\d+)') match = command_pattern.search(command) if match: data.statement = self.statement_parser.parse("{} {} {}".format( match.group(1), match.group(2), '' if data.statement.args is None else data.statement.args )) return data
A hook to split alphabetic command names immediately followed by a number. l24 -> l 24 list24 -> list 24 list 24 -> list 24
def get_items_of_reminder_per_page(self, reminder_id, per_page=1000, page=1): return self._get_resource_per_page( resource=REMINDER_ITEMS, per_page=per_page, page=page, params={'reminder_id': reminder_id}, )
Get items of reminder per page :param reminder_id: the reminder id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
def annotate(results, settings): annotations = (generate_annotation(result, setting) for result, setting in zip(results, settings)) return '\n'.join(annot for annot in annotations if annot)
Concatenate the annotations of all checkers
def download_series_gui(frame, urls, directory, min_file_size, max_file_size, no_redirects): if not os.path.exists(directory): os.makedirs(directory) app = progress_class(frame, urls, directory, min_file_size, max_file_size, no_redirects)
called when user wants serial downloading
def create_index(self, index, index_type=GEO2D): self.logger.info("Adding %s index to stores on attribute: %s" % (index_type, index)) yield self.collection.create_index([(index, index_type)])
Create an index on a given attribute :param str index: Attribute to set index on :param str index_type: See PyMongo index types for further information, defaults to GEO2D index.
def thaw(self, tmp_dir): for resource in self.resources(): if resource.present: resource.thaw(tmp_dir)
Will thaw every secret into an appropriate temporary location
def normalise_reads(self): logging.info('Normalising reads to a kmer depth of 100') for sample in self.metadata: sample.general.normalisedreads = [fastq.split('.fastq.gz')[0] + '_normalised.fastq.gz' for fastq in sorted(sample.general.fastqfiles)] try: out, err, cmd = bbtools.bbnorm(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], forward_out=sample.general.normalisedreads[0], returncmd=True, threads=self.cpus) sample[self.analysistype].normalisecmd = cmd write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) except CalledProcessError: sample.general.normalisedreads = sample.general.trimmedfastqfiles except IndexError: sample.general.normalisedreads = list()
Use bbnorm from the bbmap suite of tools to perform read normalisation
def render_revalidation_failure(self, step, form, **kwargs): self.storage.current_step = step return self.render(form, **kwargs)
Gets called when a form doesn't validate when rendering the done view. By default, it changed the current step to failing forms step and renders the form.
def _get_odoo_version_info(addons_dir, odoo_version_override=None): odoo_version_info = None addons = os.listdir(addons_dir) for addon in addons: addon_dir = os.path.join(addons_dir, addon) if is_installable_addon(addon_dir): manifest = read_manifest(addon_dir) _, _, addon_odoo_version_info = _get_version( addon_dir, manifest, odoo_version_override, git_post_version=False) if odoo_version_info is not None and \ odoo_version_info != addon_odoo_version_info: raise DistutilsSetupError("Not all addons are for the same " "odoo version in %s (error detected " "in %s)" % (addons_dir, addon)) odoo_version_info = addon_odoo_version_info return odoo_version_info
Detect Odoo version from an addons directory
def _get_bin_width(stdev, count): w = int(round((3.5 * stdev) / (count ** (1.0 / 3)))) if w: return w else: return 1
Return the histogram's optimal bin width based on Sturges http://www.jstor.org/pss/2965501
def coerce(self, value, **kwargs): result = [] for v in value: result.append(self._coercion.coerce(v, **kwargs)) return result
Coerces array items with proper coercion.
def sort_by_formula_id(raw_datasets): by_formula_id = defaultdict(list) for el in raw_datasets: by_formula_id[el['handwriting'].formula_id].append(el['handwriting']) return by_formula_id
Sort a list of formulas by `id`, where `id` represents the accepted formula id. Parameters ---------- raw_datasets : list of dictionaries A list of raw datasets. Examples -------- The parameter `raw_datasets` has to be of the format >>> rd = [{'is_in_testset': 0, ... 'formula_id': 31, ... 'handwriting': HandwrittenData(raw_data_id=2953), ... 'formula_in_latex': 'A', ... 'id': 2953}, ... {'is_in_testset': 0, ... 'formula_id': 31, ... 'handwriting': HandwrittenData(raw_data_id=4037), ... 'formula_in_latex': 'A', ... 'id': 4037}, ... {'is_in_testset': 0, ... 'formula_id': 31, ... 'handwriting': HandwrittenData(raw_data_id=4056), ... 'formula_in_latex': 'A', ... 'id': 4056}] >>> sort_by_formula_id(rd)
def can_access_objective_hierarchy(self): url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['objectiveHierarchyHints']['canAccessHierarchy']
Tests if this user can perform hierarchy queries. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a PermissionDenied. This is intended as a hint to an an application that may not offer traversal functions to unauthorized users. return: (boolean) - false if hierarchy traversal methods are not authorized, true otherwise compliance: mandatory - This method must be implemented.
def is_ipaddress(hostname): if six.PY3 and isinstance(hostname, bytes): hostname = hostname.decode('ascii') families = [socket.AF_INET] if hasattr(socket, 'AF_INET6'): families.append(socket.AF_INET6) for af in families: try: inet_pton(af, hostname) except (socket.error, ValueError, OSError): pass else: return True return False
Detects whether the hostname given is an IP address. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise.
def remove(self, *l): for a in flatten(l): self._remove([self.Inner(a)], self.l)
remove inner from outer Args: *l element that is passes into Inner init
def has_basis_notes(family, data_dir=None): file_path = _basis_notes_path(family, data_dir) return os.path.isfile(file_path)
Check if notes exist for a given basis set Returns True if they exist, false otherwise
def as_wfn(self): wfn = [] wfn.append(CPE2_3_WFN.CPE_PREFIX) for ck in CPEComponent.CPE_COMP_KEYS: lc = self._get_attribute_components(ck) comp = lc[0] if (isinstance(comp, CPEComponentUndefined) or isinstance(comp, CPEComponentEmpty)): continue else: v = [] v.append(ck) v.append("=") v.append('"') v.append(comp.as_wfn()) v.append('"') wfn.append("".join(v)) wfn.append(CPEComponent2_3_WFN.SEPARATOR_COMP) wfn = wfn[:-1] wfn.append(CPE2_3_WFN.CPE_SUFFIX) return "".join(wfn)
Returns the CPE Name as WFN string of version 2.3. Only shows the first seven components. :return: CPE Name as WFN string :rtype: string :exception: TypeError - incompatible version
def count_exceptions(self, c, broker): if c in broker.exceptions: self.counts['exception'] += len(broker.exceptions[c]) return self
Count exceptions as processing proceeds
def merge_coords(objs, compat='minimal', join='outer', priority_arg=None, indexes=None): _assert_compat_valid(compat) coerced = coerce_pandas_values(objs) aligned = deep_align(coerced, join=join, copy=False, indexes=indexes) expanded = expand_variable_dicts(aligned) priority_vars = _get_priority_vars(aligned, priority_arg, compat=compat) variables = merge_variables(expanded, priority_vars, compat=compat) assert_unique_multiindex_level_names(variables) return variables
Merge coordinate variables. See merge_core below for argument descriptions. This works similarly to merge_core, except everything we don't worry about whether variables are coordinates or not.
def GetInstanceInfo(r, instance, static=None): if static is None: return r.request("get", "/2/instances/%s/info" % instance) else: return r.request("get", "/2/instances/%s/info" % instance, query={"static": static})
Gets information about an instance. @type instance: string @param instance: Instance name @rtype: string @return: Job ID
def error_from_exc(nsdk, tracer_h, e_val=None, e_ty=None): if not tracer_h: return if e_ty is None and e_val is None: e_ty, e_val = sys.exc_info()[:2] if e_ty is None and e_val is not None: e_ty = type(e_val) nsdk.tracer_error(tracer_h, getfullname(e_ty), str(e_val))
Attach appropriate error information to tracer_h. If e_val and e_ty are None, the current exception is used.
def expand_query(config, kwds): pattern = [] for query in kwds.pop('pattern', []): expansion = config.search.alias.get(query) if expansion is None: pattern.append(query) else: parser = SafeArgumentParser() search_add_arguments(parser) ns = parser.parse_args(expansion) for (key, value) in vars(ns).items(): if isinstance(value, (list, tuple)): if not kwds.get(key): kwds[key] = value else: kwds[key].extend(value) else: kwds[key] = value kwds['pattern'] = pattern return config.search.kwds_adapter(kwds)
Expand `kwds` based on `config.search.query_expander`. :type config: .config.Configuration :type kwds: dict :rtype: dict :return: Return `kwds`, modified in place.
def path(self): if self.parent: try: parent_path = self.parent.path.encode() except AttributeError: parent_path = self.parent.path return os.path.join(parent_path, self.name) return b"/"
Node's relative path from the root node
def make_niche_grid(res_dict, world_size=(60, 60)): world = initialize_grid(world_size, set()) for res in res_dict: for cell in res_dict[res]: world[cell[1]][cell[0]].add(res) return world
Converts dictionary specifying where resources are to nested lists specifying what sets of resources are where. res_dict - a dictionary in which keys are resources in the environment and values are list of tuples representing the cells they're in. world_size - a tuple indicating the dimensions of the world. Default = 60x60, because that's the default Avida world size Returns a list of lists of sets indicating the set of resources available at each x,y location in the Avida grid.
def value_from_object(self, obj): val = super(JSONField, self).value_from_object(obj) return self.get_prep_value(val)
Return value dumped to string.
def copy_file_internal( src_fs, src_path, dst_fs, dst_path, ): if src_fs is dst_fs: src_fs.copy(src_path, dst_path, overwrite=True) elif dst_fs.hassyspath(dst_path): with dst_fs.openbin(dst_path, "w") as write_file: src_fs.download(src_path, write_file) else: with src_fs.openbin(src_path) as read_file: dst_fs.upload(dst_path, read_file)
Low level copy, that doesn't call manage_fs or lock. If the destination exists, and is a file, it will be first truncated. This method exists to optimize copying in loops. In general you should prefer `copy_file`. Arguments: src_fs (FS): Source filesystem. src_path (str): Path to a file on the source filesystem. dst_fs (FS: Destination filesystem. dst_path (str): Path to a file on the destination filesystem.
def lchownr(path, owner, group): chownr(path, owner, group, follow_links=False)
Recursively change user and group ownership of files and directories in a given path, not following symbolic links. See the documentation for 'os.lchown' for more information. :param str path: The string path to start changing ownership. :param str owner: The owner string to use when looking up the uid. :param str group: The group string to use when looking up the gid.
def extract_col(self, col): new_col = [row[col] for row in self.grid] return new_col
get column number 'col'
def MAE(x1, x2=-1): e = get_valid_error(x1, x2) return np.sum(np.abs(e)) / float(len(e))
Mean absolute error - this function accepts two series of data or directly one series with error. **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - MAE of error (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2`
def _compute_anelestic_attenuation_term(self, C, dists): f_aat = np.zeros_like(dists.rjb) idx = dists.rjb > 80.0 f_aat[idx] = C["b10"] * (dists.rjb[idx] - 80.0) return f_aat
Compute and return anelastic attenuation term in equation 5, page 970.
def check_unique_tokens(sender, instance, **kwargs): if isinstance(instance, CallbackToken): if CallbackToken.objects.filter(key=instance.key, is_active=True).exists(): instance.key = generate_numeric_token()
Ensures that mobile and email tokens are unique or tries once more to generate.
def exclusion_path(cls, project, exclusion): return google.api_core.path_template.expand( "projects/{project}/exclusions/{exclusion}", project=project, exclusion=exclusion, )
Return a fully-qualified exclusion string.
def string_to_timestruct(input_string): try: timestruct = time.strptime(input_string.decode('utf-8'), VolumeDescriptorDate.TIME_FMT) except ValueError: timestruct = time.struct_time((0, 0, 0, 0, 0, 0, 0, 0, 0)) return timestruct
A cacheable function to take an input string and decode it into a time.struct_time from the time module. If the string cannot be decoded because of an illegal value, then the all-zero time.struct_time will be returned instead. Parameters: input_string - The string to attempt to parse. Returns: A time.struct_time object representing the time.
def get_view_name(namespace, view): name = "" if namespace != "": name = namespace + "_" return sanitize(name + view.name)
create the name for the view
def delete(self, id=None): if id is not None: self.where('id', '=', id) sql = self._grammar.compile_delete(self) return self._connection.delete(sql, self.get_bindings())
Delete a record from the database :param id: The id of the row to delete :type id: mixed :return: The number of rows deleted :rtype: int
def start(self, *args, **kwargs): forever = kwargs.get('forever', True) timeout = kwargs.get('timeout', None) if forever: return self.run(timeout=timeout) elif timeout: next((self.consume(timeout=timeout)), None) else: next((self.consume(limit=1, timeout=timeout)), None)
| Launch the consumer. | It can listen forever for messages or just wait for one. :param forever: If set, the consumer listens forever. Default to `True`. :type forever: bool :param timeout: If set, the consumer waits the specified seconds before quitting. :type timeout: None, int :rtype: None :raises socket.timeout: when no message has been received since `timeout`.
def setup_file_watcher(path, use_polling=False): if use_polling: observer_class = watchdog.observers.polling.PollingObserver else: observer_class = EVENTED_OBSERVER file_event_handler = _SourceChangesHandler(patterns=["*.py"]) file_watcher = observer_class() file_watcher.schedule(file_event_handler, path, recursive=True) file_watcher.start() return file_watcher
Sets up a background thread that watches for source changes and automatically sends SIGHUP to the current process whenever a file changes.
def set_default(self): try: os.makedirs(os.path.dirname(self._configfile)) except: pass self._config = configparser.RawConfigParser() self._config.add_section('Settings') for key, val in self.DEFAULTS.items(): self._config.set('Settings', key, val) with open(self._configfile, 'w') as f: self._config.write(f)
Set config to default.
def Poll(generator=None, condition=None, interval=None, timeout=None): if not generator: raise ValueError("generator has to be a lambda") if not condition: raise ValueError("condition has to be a lambda") if interval is None: interval = DEFAULT_POLL_INTERVAL if timeout is None: timeout = DEFAULT_POLL_TIMEOUT started = time.time() while True: obj = generator() check_result = condition(obj) if check_result: return obj if timeout and (time.time() - started) > timeout: raise errors.PollTimeoutError( "Polling on %s timed out after %ds." % (obj, timeout)) time.sleep(interval)
Periodically calls generator function until a condition is satisfied.
def get_target_transcript(self,min_intron=1): if min_intron < 1: sys.stderr.write("ERROR minimum intron should be 1 base or longer\n") sys.exit() rngs = [self.alignment_ranges[0][0].copy()] for i in range(len(self.alignment_ranges)-1): dist = self.alignment_ranges[i+1][0].start - rngs[-1].end-1 if dist >= min_intron: rngs.append(self.alignment_ranges[i+1][0].copy()) else: rngs[-1].end = self.alignment_ranges[i+1][0].end tx = Transcript(rngs,options=Transcript.Options( direction=self.strand, name = self.alignment_ranges[0][1].chr, gene_name = self.alignment_ranges[0][1].chr )) return tx
Get the mapping of to the target strand :returns: Transcript mapped to target :rtype: Transcript
def pkg_version_list(self, pkg_id): pkg_data = self.__reg_software.get(pkg_id, None) if not pkg_data: return [] if isinstance(pkg_data, list): return pkg_data installed_versions = list(pkg_data.get('version').keys()) return sorted(installed_versions, key=cmp_to_key(self.__oldest_to_latest_version))
Returns information on a package. Args: pkg_id (str): Package Id of the software/component. Returns: list: List of version numbers installed.
def add_param(self, param_key, param_val): self.params.append([param_key, param_val]) if param_key == '__success_test': self.success = param_val
adds parameters as key value pairs
def check_by_selector(self, selector): elem = find_element_by_jquery(world.browser, selector) if not elem.is_selected(): elem.click()
Check the checkbox matching the CSS selector.
def binarycontent_sections(chunk): binary_content_tag = BINARY_CONTENT_START[1:] if binary_content_tag not in chunk: yield chunk else: sections = chunk.split(binary_content_tag) for sec in sections: extra = b'' if sec.endswith(b'</'): extra = sec[-2:] yield sec[:-2] elif sec.endswith(b'<'): extra = sec[-1:] yield sec[:-1] else: yield sec if extra: yield b''.join([extra, binary_content_tag])
Split a chunk of data into sections by start and end binary content tags.
def tag_secondary_structure(self, force=False): for polymer in self._molecules: if polymer.molecule_type == 'protein': polymer.tag_secondary_structure(force=force) return
Tags each `Monomer` in the `Assembly` with it's secondary structure. Notes ----- DSSP must be available to call. Check by running `isambard.external_programs.dssp.test_dssp`. If DSSP is not available, please follow instruction here to add it: https://github.com/woolfson-group/isambard#external-programs For more information on DSSP see [1]. References ---------- .. [1] Kabsch W, Sander C (1983) "Dictionary of protein secondary structure: pattern recognition of hydrogen-bonded and geometrical features", Biopolymers, 22, 2577-637. Parameters ---------- force : bool, optional If True the tag will be run even if `Monomers` are already tagged
def assemble(self): self.canonify() args = [sys.argv and sys.argv[0] or "python"] if self.mountpoint: args.append(self.mountpoint) for m, v in self.modifiers.items(): if v: args.append(self.fuse_modifiers[m]) opta = [] for o, v in self.optdict.items(): opta.append(o + '=' + v) opta.extend(self.optlist) if opta: args.append("-o" + ",".join(opta)) return args
Mangle self into an argument array
def create_data_item_from_data_and_metadata(self, data_and_metadata: DataAndMetadata.DataAndMetadata, title: str=None) -> DataItem: data_item = DataItemModule.new_data_item(data_and_metadata) if title is not None: data_item.title = title self.__document_model.append_data_item(data_item) return DataItem(data_item)
Create a data item in the library from a data and metadata object. The data for the data item will be written to disk immediately and unloaded from memory. If you wish to delay writing to disk and keep using the data, create an empty data item and use the data item methods to modify the data. :param data_and_metadata: The data and metadata. :param title: The title of the data item (optional). :return: The new :py:class:`nion.swift.Facade.DataItem` object. :rtype: :py:class:`nion.swift.Facade.DataItem` .. versionadded:: 1.0 Scriptable: Yes
def item_transaction(self, item) -> Transaction: items = self.__build_transaction_items(item) transaction = Transaction(self, item, items) self.__transactions.append(transaction) return transaction
Begin transaction state for item. A transaction state is exists to prevent writing out to disk, mainly for performance reasons. All changes to the object are delayed until the transaction state exits. This method is thread safe.
def collections(record, key, value): return { 'primary': value.get('a'), 'secondary': value.get('b'), 'deleted': value.get('c'), }
Parse custom MARC tag 980.
def scan_url(self, this_url): params = {'apikey': self.api_key, 'url': this_url} try: response = requests.post(self.base + 'url/scan', params=params, proxies=self.proxies) except requests.RequestException as e: return dict(error=e.message) return _return_response_and_status_code(response)
Submit a URL to be scanned by VirusTotal. :param this_url: The URL that should be scanned. This parameter accepts a list of URLs (up to 4 with the standard request rate) so as to perform a batch scanning request with one single call. The URLs must be separated by a new line character. :return: JSON response that contains scan_id and permalink.
def action(args): log.info('loading reference package') refpkg.Refpkg(args.refpkg, create=False).strip()
Strips non-current files and rollback information from a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on).
def annotate_segments(self, Z): P = Z.copy() P[~np.isfinite(P)] = -1 _, mapping = np.unique(np.cumsum(P >= 0), return_index=True) dZ = Z.compressed() uniq, idx = np.unique(dZ, return_inverse=True) segments = [] for i, mean_cn in enumerate(uniq): if not np.isfinite(mean_cn): continue for rr in contiguous_regions(idx == i): segments.append((mean_cn, mapping[rr])) return segments
Report the copy number and start-end segment
def get_limits(self): vals = self._hook_manager.call_hook('task_limits', course=self.get_course(), task=self, default=self._limits) return vals[0] if len(vals) else self._limits
Return the limits of this task
def changes(self, **kwargs): path = '%s/%s/changes' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
List the merge request changes. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: List of changes
def for_json(self): if self.multiselect: return super(MultiSelectField, self).for_json() value = self.get_python() if hasattr(value, 'for_json'): return value.for_json() return value
Handle multi-select vs single-select
def _special_value_maxLength(em, newValue=NOT_PROVIDED): if newValue is NOT_PROVIDED: if not em.hasAttribute('maxlength'): return -1 curValue = em.getAttribute('maxlength', '-1') invalidDefault = -1 else: curValue = newValue invalidDefault = IndexSizeErrorException return convertToIntRange(curValue, minValue=0, maxValue=None, emptyValue='0', invalidDefault=invalidDefault)
_special_value_maxLength - Handle the special "maxLength" property @param em <AdvancedTag> - The tag element @param newValue - Default NOT_PROVIDED, if provided will use that value instead of the current .getAttribute value on the tag. This is because this method can be used for both validation and getting/setting
def from_csv(cls, filename): instance = cls() instance.data = np.loadtxt(filename, delimiter=',') return instance
Create gyro stream from CSV data Load data from a CSV file. The data must be formatted with three values per line: (x, y, z) where x, y, z is the measured angular velocity (in radians) of the specified axis. Parameters ------------------- filename : str Path to the CSV file Returns --------------------- GyroStream A gyroscope stream
def commit(cls, client=None): if not client: client = cls._client rtn = client.write_points(cls._json_body_()) cls._reset_() return rtn
Commit everything from datapoints via the client. :param client: InfluxDBClient instance for writing points to InfluxDB. :attention: any provided client will supersede the class client. :return: result of client.write_points.
def is_enable_action_dependent(self, hosts, services): enable_action = False for (dep_id, status, _, _) in self.act_depend_of: if 'n' in status: enable_action = True else: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] p_is_down = False dep_match = [dep.is_state(stat) for stat in status] if True in dep_match: p_is_down = True if not p_is_down: enable_action = True return enable_action
Check if dependencies states match dependencies statuses This basically means that a dependency is in a bad state and it can explain this object state. :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: True if all dependencies matches the status, false otherwise :rtype: bool
def time(self): x, _, = self._canvas_ticks.coords(self._time_marker_image) return self.get_position_time(x)
Current value the time marker is pointing to :rtype: float
def extend_extents(extents, factor=1.1): width = extents[2] - extents[0] height = extents[3] - extents[1] add_width = (factor - 1) * width add_height = (factor - 1) * height x1 = extents[0] - add_width / 2 x2 = extents[2] + add_width / 2 y1 = extents[1] - add_height / 2 y2 = extents[3] + add_height / 2 return x1, y1, x2, y2
Extend a given bounding box The bounding box (x1, y1, x2, y2) is centrally stretched by the given factor. :param extents: The bound box extents :param factor: The factor for stretching :return: (x1, y1, x2, y2) of the extended bounding box
def embed(self, width=600, height=650): from IPython.display import IFrame return IFrame(self.url, width, height)
Embed a viewer into a Jupyter notebook.
def _find_models(self, constructor, table_name, constraints=None, *, columns=None, order_by=None, limiting=None): for record in self.find_all(table_name, constraints, columns=columns, order_by=order_by, limiting=limiting): yield constructor(record)
Calls DataAccess.find_all and passes the results to the given constructor.
def HeadList(self): return [(rname, repo.currenthead) for rname, repo in self.repos.items() ]
Return a list of all the currently loaded repo HEAD objects.
def _create_instance_attributes(self, arguments): for attribute_name, type_instance in self.getmembers(): if isinstance(type_instance, DataType): self._templates[attribute_name] = type_instance value = None if attribute_name in arguments: value = arguments[attribute_name] try: self._attributes[attribute_name] = type_instance.validate(value) except exception.RequiredAttributeError: self._attributes[attribute_name] = None
Copies class level attribute templates and makes instance placeholders This step is required for direct uses of Model classes. This creates a copy of attribute_names ignores methods and private variables. DataCollection types are deep copied to ignore memory reference conflicts. DataType instances are initialized to None or default value.
def _setup_preferred_paths(self, preferred_conversion_paths): for path in preferred_conversion_paths: for pair in pair_looper(path): if pair not in self.converters: log.warning('Invalid conversion path %s, unknown step %s' % (repr(path), repr(pair))) break else: self.dgraph.add_preferred_path(*path)
Add given valid preferred conversion paths
def generateDirectoryNodeDocuments(self): all_dirs = [] for d in self.dirs: d.findNestedDirectories(all_dirs) for d in all_dirs: self.generateDirectoryNodeRST(d)
Generates all of the directory reStructuredText documents.
def load_file_contents(file_path, as_list=True): abs_file_path = join(HERE, file_path) with open(abs_file_path, encoding='utf-8') as file_pointer: if as_list: return file_pointer.read().splitlines() return file_pointer.read()
Load file as string or list
def run(self): self._connect() super(irc.bot.SingleServerIRCBot, self).start()
Run the bot in a thread. Implementing the IRC listener as a thread allows it to listen without blocking IRCLego's ability to listen as a pykka actor. :return: None
def render(self, data, accepted_media_type=None, renderer_context=None): form = data.serializer style = renderer_context.get('style', {}) if 'template_pack' not in style: style['template_pack'] = self.template_pack style['renderer'] = self template_pack = style['template_pack'].strip('/') template_name = template_pack + '/' + self.base_template template = loader.get_template(template_name) context = { 'form': form, 'style': style } return template_render(template, context)
Render serializer data and return an HTML form, as a string.
def queue_context_entry(exchange, queue_name, routing=None): if routing is None: routing = queue_name queue_entry = QueueContextEntry(mq_queue=queue_name, mq_exchange=exchange, mq_routing_key=routing) return queue_entry
forms queue's context entry