code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def parse_services(config, services): enabled = 0 for service in services: check_disabled = config.getboolean(service, 'check_disabled') if not check_disabled: enabled += 1 return enabled
Parse configuration to return number of enabled service checks. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. services (list): A list of section names which holds configuration for each service check Returns: A number (int) of enabled service checks.
def on_batch_begin(self, train, **kwargs:Any)->None: "Record learning rate and momentum at beginning of batch." if train: self.lrs.append(self.opt.lr) self.moms.append(self.opt.mom)
Record learning rate and momentum at beginning of batch.
def get_files(self): files = {} for filename in os.listdir(self.source): path = os.path.join(self.source, filename) files[filename] = frontmatter.load(path, filename=filename, slug=os.path.splitext(filename)[0]) return files
Read and parse files from a directory, return a dictionary of path => post
def install_timers(config, context): timers = [] if config.get('capture_timeout_warnings'): timeout_threshold = config.get('timeout_warning_threshold') time_remaining = context.get_remaining_time_in_millis() / 1000 timers.append(Timer(time_remaining * timeout_threshold, timeout_warning, (config, context))) timers.append(Timer(max(time_remaining - .5, 0), timeout_error, [config])) if config.get('capture_memory_warnings'): timers.append(Timer(.5, memory_warning, (config, context))) for t in timers: t.start() return timers
Create the timers as specified by the plugin configuration.
def execute_sql(self, query): c = self.con.cursor() c.execute(query) result = c.fetchall() return result
Executes a given query string on an open sqlite database.
def get_oncall(self, **kwargs): endpoint = '/'.join((self.endpoint, self.id, 'users')) return self.request('GET', endpoint=endpoint, query_params=kwargs)
Retrieve this schedule's "on call" users.
def get_setup_requires(): if {'--help', '--help-commands'}.intersection(sys.argv): return list() reqlist = [] for cmd, dependencies in SETUP_REQUIRES.items(): if cmd in sys.argv: reqlist.extend(dependencies) return reqlist
Return the list of packages required for this setup.py run
def ext_pillar(minion_id, pillar, command): try: command = command.replace('%s', minion_id) return deserialize(__salt__['cmd.run']('{0}'.format(command))) except Exception: log.critical('YAML data from %s failed to parse', command) return {}
Execute a command and read the output as YAMLEX
def all(cls): api = Client.instance().api endpoint_list = api.endpoint.get() return endpoint_list
Returns a list of all configured endpoints the server is listening on. For each endpoint, the list of allowed databases is returned too if set. The result is a JSON hash which has the endpoints as keys, and the list of mapped database names as values for each endpoint. If a list of mapped databases is empty, it means that all databases can be accessed via the endpoint. If a list of mapped databases contains more than one database name, this means that any of the databases might be accessed via the endpoint, and the first database in the list will be treated as the default database for the endpoint. The default database will be used when an incoming request does not specify a database name in the request explicitly. *Note*: retrieving the list of all endpoints is allowed in the system database only. Calling this action in any other database will make the server return an error.
def jars(self, absolute=True): jars = glob(os.path.join(self._jar_path, '*.jar')) return jars if absolute else map(lambda j: os.path.abspath(j), jars)
List of jars in the jar path
def get_language_details(self, language): for lang in self.user_data.languages: if language == lang['language_string']: return lang return {}
Get user's status about a language.
def stop(name, timeout=None, **kwargs): if timeout is None: try: timeout = inspect_container(name)['Config']['StopTimeout'] except KeyError: timeout = salt.utils.docker.SHUTDOWN_TIMEOUT orig_state = state(name) if orig_state == 'paused': if kwargs.get('unpause', False): unpause_result = _change_state(name, 'unpause', 'running') if unpause_result['result'] is False: unpause_result['comment'] = ( 'Failed to unpause container \'{0}\''.format(name) ) return unpause_result else: return {'result': False, 'state': {'old': orig_state, 'new': orig_state}, 'comment': ('Container \'{0}\' is paused, run with ' 'unpause=True to unpause before stopping' .format(name))} ret = _change_state(name, 'stop', 'stopped', timeout=timeout) ret['state']['old'] = orig_state return ret
Stops a running container name Container name or ID unpause : False If ``True`` and the container is paused, it will be unpaused before attempting to stop the container. timeout Timeout in seconds after which the container will be killed (if it has not yet gracefully shut down) .. versionchanged:: 2017.7.0 If this argument is not passed, then the container's configuration will be checked. If the container was created using the ``stop_timeout`` argument, then the configured timeout will be used, otherwise the timeout will be 10 seconds. **RETURN DATA** A dictionary will be returned, containing the following keys: - ``status`` - A dictionary showing the prior state of the container as well as the new state - ``result`` - A boolean noting whether or not the action was successful - ``comment`` - Only present if the container can not be stopped CLI Examples: .. code-block:: bash salt myminion docker.stop mycontainer salt myminion docker.stop mycontainer unpause=True salt myminion docker.stop mycontainer timeout=20
def iter_rows(self, start=None, end=None): start = start or 0 end = end or self.nrows for i in range(start, end): yield self.iloc[i, :]
Iterate each of the Region rows in this region
def parse_class_id(self, sel, m, has_selector): selector = m.group(0) if selector.startswith('.'): sel.classes.append(css_unescape(selector[1:])) else: sel.ids.append(css_unescape(selector[1:])) has_selector = True return has_selector
Parse HTML classes and ids.
def _no_spelling_errors(relative_path, contents, linter_options): block_regexps = linter_options.get("block_regexps", None) chunks, shadow = spellcheckable_and_shadow_contents(contents, block_regexps) cache = linter_options.get("spellcheck_cache", None) user_words, valid_words = valid_words_dictionary_helper.create(cache) technical_words = _create_technical_words_dictionary(cache, relative_path, user_words, shadow) if linter_options.get("log_technical_terms_to"): linter_options["log_technical_terms_to"].put(technical_words.words()) return [e for e in _find_spelling_errors_in_chunks(chunks, contents, valid_words, technical_words, user_words) if e]
No spelling errors in strings, comments or anything of the like.
def calc_area_under_PSD(self, lowerFreq, upperFreq): Freq_startAreaPSD = take_closest(self.freqs, lowerFreq) index_startAreaPSD = int(_np.where(self.freqs == Freq_startAreaPSD)[0][0]) Freq_endAreaPSD = take_closest(self.freqs, upperFreq) index_endAreaPSD = int(_np.where(self.freqs == Freq_endAreaPSD)[0][0]) AreaUnderPSD = sum(self.PSD[index_startAreaPSD: index_endAreaPSD]) return AreaUnderPSD
Sums the area under the PSD from lowerFreq to upperFreq. Parameters ---------- lowerFreq : float The lower limit of frequency to sum from upperFreq : float The upper limit of frequency to sum to Returns ------- AreaUnderPSD : float The area under the PSD from lowerFreq to upperFreq
def spectral_norm(w, dim=0, itr=1, eps=1e-12, test=False, u_init=None, fix_parameters=True): assert (0 <= dim and dim < len(w.shape) ), "`dim` must be `0 <= dim and dim < len(w.shape)`." assert 0 < itr, "`itr` must be greater than 0." assert 0 < eps, "`eps` must be greater than 0." if dim == len(w.shape) - 1: w_sn = _spectral_norm_outer_most_dim(w, dim=dim, itr=itr, eps=eps, test=test, u_init=u_init, fix_parameters=fix_parameters) else: w_sn = _spectral_norm(w, dim=dim, itr=itr, eps=eps, test=test, u_init=u_init, fix_parameters=fix_parameters) return w_sn
Spectral Normalization. .. math:: W_{sn} = \\frac{W}{\\sigma(W)}. where :math:`W` is the input matrix, and the :math:`\\sigma(W)` is the spectral norm of :math:`W`. The spectral norm is approximately computed by the power iteration. References: Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida, "Spectral Normalization for Generative Adversarial Networks", International Conference on Learning Representations. 2018. Args: W (~nnabla.Variable): Input N-D array with shape. This is normally network parameter. dim (`int`): Output dimension. Default is 0. If the dimension is not 0, then the specified dimension becomes the most-left dimension by transposing. itr (`int`): Number of iterations. Default is 1. eps (`float`): Epsilon for the normalization. Default is 1e-12. test (`bool`): Use test mode. Default is False. Returns: ~nnabla.Variable: Spectrally normalized :math:`W_{sn}` with the same shape as :math:`W`. Example: .. code-block:: python import nnabla as nn import nnabla.parametric_functions as PF b, c, h, w = 4, 64, 32, 32 # Spectrally normalized convolution apply_w = lambda w: PF.spectral_norm(w, dim=0) h = nn.Variable.from_numpy_array(np.random.randn(b, c, h, w)) h = PF.convolution(h, with_bias=False, apply_w=apply_w) # Spectrally normalized affine apply_w = lambda w: PF.spectral_norm(w, dim=1) h = nn.Variable.from_numpy_array(np.random.randn(b, c)) h = PF.affine(h, with_bias=False, apply_w=apply_w) # Spectrally normalized embed apply_w = lambda w: PF.spectral_norm(w, dim=1) h = nn.Variable.from_numpy_array(np.random.randn(b, c)) h = PF.embed(h, c, apply_w=apply_w)
def value(self): if self.ready is True: flag, load = self.__queue.get() if flag: return load raise load
Read-only property containing data returned from function.
def exclude(self, scheduled_operation: ScheduledOperation) -> bool: try: self.scheduled_operations.remove(scheduled_operation) return True except ValueError: return False
Omits a scheduled operation from the schedule, if present. Args: scheduled_operation: The operation to try to remove. Returns: True if the operation was present and is now removed, False if it was already not present.
def get_page_template(self, **kwargs): opts = self.object_list.model._meta return '{0}/{1}{2}{3}.html'.format( opts.app_label, opts.object_name.lower(), self.template_name_suffix, self.page_template_suffix, )
Return the template name used for this request. Only called if *page_template* is not given as a kwarg of *self.as_view*.
def parse_keys(self, sn: "DataNode") -> Dict[InstanceName, ScalarValue]: res = {} for k in self.keys: knod = sn.get_data_child(*k) if knod is None: raise NonexistentSchemaNode(sn.qual_name, *k) kval = knod.type.parse_value(self.keys[k]) if kval is None: raise InvalidKeyValue(self.keys[k]) res[knod.iname()] = kval return res
Parse key dictionary in the context of a schema node. Args: sn: Schema node corresponding to a list.
def run_suite(self): if not self.phantomjs_runner: raise JsTestException('phantomjs_runner need to be defined') url = self.get_url() self.phantomjs(self.phantomjs_runner, url, title=self.title) self.cleanup()
Run a phantomjs test suite. - ``phantomjs_runner`` is mandatory. - Either ``url`` or ``url_name`` needs to be defined.
def _configuration(self, *args, **kwargs): data = dict() self.db.open() for pkg in self.db.get(Package): configs = list() for pkg_cfg in self.db.get(PackageCfgFile, eq={'pkgid': pkg.id}): configs.append(pkg_cfg.path) data[pkg.name] = configs if not data: raise InspectorQueryException("No inspected configuration yet available.") return data
Return configuration files.
def values(self): results = {} results['use_calfile'] = self.ui.calfileRadio.isChecked() results['calname'] = str(self.ui.calChoiceCmbbx.currentText()) results['frange'] = (self.ui.frangeLowSpnbx.value(), self.ui.frangeHighSpnbx.value()) return results
Gets the values the user input to this dialog :returns: dict of inputs: | *'use_calfile'*: bool, -- whether to apply calibration at all | *'calname'*: str, -- the name of the calibration dataset to use | *'frange'*: (int, int), -- (min, max) of the frequency range to apply calibration to
def _fingerprint_files(self, filepaths): hasher = sha1() for filepath in filepaths: filepath = self._assert_in_buildroot(filepath) hasher.update(os.path.relpath(filepath, get_buildroot()).encode('utf-8')) with open(filepath, 'rb') as f: hasher.update(f.read()) return hasher.hexdigest()
Returns a fingerprint of the given filepaths and their contents. This assumes the files are small enough to be read into memory.
def draw_svg(self, svg_str): try: import rsvg except ImportError: self.draw_text(svg_str) return svg = rsvg.Handle(data=svg_str) svg_width, svg_height = svg.get_dimension_data()[:2] transx, transy = self._get_translation(svg_width, svg_height) scale_x, scale_y = self._get_scalexy(svg_width, svg_height) scale = min(scale_x, scale_y) angle = float(self.code_array.cell_attributes[self.key]["angle"]) self.context.save() self.context.rotate(-angle / 360 * 2 * math.pi) self.context.translate(transx, transy) self.context.scale(scale, scale) svg.render_cairo(self.context) self.context.restore()
Draws svg string to cell
def segments (self): for n in xrange(len(self.vertices) - 1): yield Line(self.vertices[n], self.vertices[n + 1]) yield Line(self.vertices[-1], self.vertices[0])
Return the Line segments that comprise this Polygon.
def surface(x, y, z): data = [go.Surface(x=x, y=y, z=z)] return Chart(data=data)
Surface plot. Parameters ---------- x : array-like, optional y : array-like, optional z : array-like, optional Returns ------- Chart
def add_criterion(self, name, priority, and_or, search_type, value): criterion = SearchCriteria(name, priority, and_or, search_type, value) self.criteria.append(criterion)
Add a search criteria object to a smart group. Args: name: String Criteria type name (e.g. "Application Title") priority: Int or Str number priority of criterion. and_or: Str, either "and" or "or". search_type: String Criteria search type. (e.g. "is", "is not", "member of", etc). Construct a SmartGroup with the criteria of interest in the web interface to determine what range of values are available. value: String value to search for/against.
def class_factory(name, base_class, class_dict): def __init__(self, tcex): base_class.__init__(self, tcex) for k, v in class_dict.items(): setattr(self, k, v) newclass = type(str(name), (base_class,), {'__init__': __init__}) return newclass
Internal method for dynamically building Custom Indicator classes.
def parse_all(self): tokens = self.split_tokens duration = self.duration datetime = self.datetime thread = self.thread operation = self.operation namespace = self.namespace pattern = self.pattern nscanned = self.nscanned nscannedObjects = self.nscannedObjects ntoreturn = self.ntoreturn nreturned = self.nreturned ninserted = self.ninserted ndeleted = self.ndeleted nupdated = self.nupdated numYields = self.numYields w = self.w r = self.r
Trigger extraction of all information. These values are usually evaluated lazily.
def execution_minutes_for_session(self, session_label): return self.minutes_in_range( start_minute=self.execution_time_from_open( self.schedule.at[session_label, 'market_open'], ), end_minute=self.execution_time_from_close( self.schedule.at[session_label, 'market_close'], ), )
Given a session label, return the execution minutes for that session. Parameters ---------- session_label: pd.Timestamp (midnight UTC) A session label whose session's minutes are desired. Returns ------- pd.DateTimeIndex All the execution minutes for the given session.
def clear_events(self, event_name): self.lock.acquire() try: q = self.get_event_q(event_name) q.queue.clear() except queue.Empty: return finally: self.lock.release()
Clear all events of a particular name. Args: event_name: Name of the events to be popped.
def add_text_to_image(fname, txt, opFilename): ft = ImageFont.load("T://user//dev//src//python//_AS_LIB//timR24.pil") print("Adding text ", txt, " to ", fname, " pixels wide to file " , opFilename) im = Image.open(fname) draw = ImageDraw.Draw(im) draw.text((0, 0), txt, fill=(0, 0, 0), font=ft) del draw im.save(opFilename)
convert an image by adding text
def findOne(self, query=None, mode=FindOneMode.FIRST, **kwargs): results = self.find(query, **kwargs) if len(results) is 0: return None elif len(results) is 1 or mode == FindOneMode.FIRST: return results[0] elif mode == FindOneMode.LAST: return results[-1]
Perform a find, with the same options present, but only return a maximum of one result. If find returns an empty array, then None is returned. If there are multiple results from find, the one returned depends on the mode parameter. If mode is FindOneMode.FIRST, then the first result is returned. If the mode is FindOneMode.LAST, then the last is returned. If the mode is FindOneMode.ERROR, then a SlickCommunicationError is raised.
def get_my_hostname(self, split_hostname_on_first_period=False): hostname = self.init_config.get("os_host") or self.hostname if split_hostname_on_first_period: hostname = hostname.split('.')[0] return hostname
Returns a best guess for the hostname registered with OpenStack for this host
def create_contact(self, attrs, members=None, folder_id=None, tags=None): cn = {} if folder_id: cn['l'] = str(folder_id) if tags: tags = self._return_comma_list(tags) cn['tn'] = tags if members: cn['m'] = members attrs = [{'n': k, '_content': v} for k, v in attrs.items()] cn['a'] = attrs resp = self.request_single('CreateContact', {'cn': cn}) return zobjects.Contact.from_dict(resp)
Create a contact Does not include VCARD nor group membership yet XML example : <cn l="7> ## ContactSpec <a n="lastName">MARTIN</a> <a n="firstName">Pierre</a> <a n="email">pmartin@example.com</a> </cn> Which would be in zimsoap : attrs = { 'lastname': 'MARTIN', 'firstname': 'Pierre', 'email': 'pmartin@example.com' } folder_id = 7 :param folder_id: a string of the ID's folder where to create contact. Default '7' :param tags: comma-separated list of tag names :param attrs: a dictionary of attributes to set ({key:value,...}). At least one attr is required :returns: the created zobjects.Contact
def parse_uinput_mapping(name, mapping): axes, buttons, mouse, mouse_options = {}, {}, {}, {} description = "ds4drv custom mapping ({0})".format(name) for key, attr in mapping.items(): key = key.upper() if key.startswith("BTN_") or key.startswith("KEY_"): buttons[key] = attr elif key.startswith("ABS_"): axes[key] = attr elif key.startswith("REL_"): mouse[key] = attr elif key.startswith("MOUSE_"): mouse_options[key] = attr create_mapping(name, description, axes=axes, buttons=buttons, mouse=mouse, mouse_options=mouse_options)
Parses a dict of mapping options.
def check( state_engine, nameop, block_id, checked_ops ): sender = nameop['sender'] sending_blockchain_id = None found = False blockchain_namerec = None for blockchain_id in state_engine.get_announce_ids(): blockchain_namerec = state_engine.get_name( blockchain_id ) if blockchain_namerec is None: continue if str(sender) == str(blockchain_namerec['sender']): found = True sending_blockchain_id = blockchain_id break if not found: log.warning("Announcement not sent from our whitelist of blockchain IDs") return False nameop['announcer_id'] = sending_blockchain_id process_announcement( blockchain_namerec, nameop, state_engine.working_dir ) return True
Log an announcement from the blockstack developers, but first verify that it is correct. Return True if the announcement came from the announce IDs whitelist Return False otherwise
def asdict(self): result = {} for key in self._fields: value = getattr(self, key) if isinstance(value, list): value = [i.asdict() if isinstance(i, Resource) else i for i in value] if isinstance(value, Resource): value = value.asdict() result[key] = value return result
Convert resource to dictionary
def audit_with_request(**kwargs): def wrap(fn): @audit(**kwargs) def operation(parent_object, *args, **kw): return fn(parent_object.request, *args, **kw) @functools.wraps(fn) def advice_with_request(the_request, *args, **kw): class ParentObject: request = the_request return operation(ParentObject(), *args, **kw) return advice_with_request return wrap
use this decorator to audit an operation with a request as input variable
def _validate(claims, validate_claims, expiry_seconds): if not validate_claims: return now = time() try: expiration_time = claims[CLAIM_EXPIRATION_TIME] except KeyError: pass else: _check_expiration_time(now, expiration_time) try: issued_at = claims[CLAIM_ISSUED_AT] except KeyError: pass else: if expiry_seconds is not None: _check_expiration_time(now, issued_at + expiry_seconds) try: not_before = claims[CLAIM_NOT_BEFORE] except KeyError: pass else: _check_not_before(now, not_before)
Validate expiry related claims. If validate_claims is False, do nothing. Otherwise, validate the exp and nbf claims if they are present, and validate the iat claim if expiry_seconds is provided.
def build_catalog_info(self, catalog_info): cat = SourceFactory.build_catalog(**catalog_info) catalog_info['catalog'] = cat catalog_info['catalog_table'] = cat.table catalog_info['roi_model'] =\ SourceFactory.make_fermipy_roi_model_from_catalogs([cat]) catalog_info['srcmdl_name'] =\ self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name']) return CatalogInfo(**catalog_info)
Build a CatalogInfo object
def update(cls, spec, updates, upsert=False): if 'key' in spec: previous = cls.get(spec['key']) else: previous = None if previous: current = cls(**previous.__dict__) elif upsert: current = cls(**spec) else: current = None if current: current.__dict__.update(updates) current.save() return current
The spec is used to search for the data to update, updates contains the values to be updated, and upsert specifies whether to do an insert if the original data is not found.
def should_copy(column): if not isinstance(column.type, Serial): return True if column.nullable: return True if not column.server_default: return True return False
Determine if a column should be copied.
def get_db_row(db, start, size): type_ = snap7.snap7types.wordlen_to_ctypes[snap7.snap7types.S7WLByte] data = client.db_read(db, start, type_, size) return data
Here you see and example of readying out a part of a DB Args: db (int): The db to use start (int): The index of where to start in db data size (int): The size of the db data to read
def spkapo(targ, et, ref, sobs, abcorr): targ = ctypes.c_int(targ) et = ctypes.c_double(et) ref = stypes.stringToCharP(ref) abcorr = stypes.stringToCharP(abcorr) sobs = stypes.toDoubleVector(sobs) ptarg = stypes.emptyDoubleVector(3) lt = ctypes.c_double() libspice.spkapo_c(targ, et, ref, sobs, abcorr, ptarg, ctypes.byref(lt)) return stypes.cVectorToPython(ptarg), lt.value
Return the position of a target body relative to an observer, optionally corrected for light time and stellar aberration. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkapo_c.html :param targ: Target body. :type targ: int :param et: Observer epoch. :type et: float :param ref: Inertial reference frame of observer's state. :type ref: str :param sobs: State of observer wrt. solar system barycenter. :type sobs: 6-Element Array of floats :param abcorr: Aberration correction flag. :type abcorr: str :return: Position of target, One way light time between observer and target. :rtype: tuple
def on_gtk_prefer_dark_theme_toggled(self, chk): self.settings.general.set_boolean('gtk-prefer-dark-theme', chk.get_active()) select_gtk_theme(self.settings)
Set the `gtk_prefer_dark_theme' property in dconf
def from_keys(cls, keys, loader_func, type_hint=None): return cls({k: LazyLoadedValue( lambda k=k: loader_func(k), type_hint=type_hint) for k in keys})
Factory method for `LazyLoadedDict` Accepts a ``loader_func`` that is to be applied to all ``keys``. :param keys: List of keys to create the dictionary with :type keys: iterable :param loader_func: Function to be applied to all keys :type loader_func: function :param type_hint: Expected type of lazy loaded values. Used by `LazyLoadedValue`. (Default value = None) :type type_hint: str :returns: A properly constructed lazy loaded dictionary :rtype: LazyLoadedDict
def _bits_ports_and_isrom_from_memory(mem): is_rom = False bits = 2**mem.addrwidth * mem.bitwidth read_ports = len(mem.readport_nets) try: write_ports = len(mem.writeport_nets) except AttributeError: if not isinstance(mem, RomBlock): raise PyrtlInternalError('Mem with no writeport_nets attribute' ' but not a ROM? Thats an error') write_ports = 0 is_rom = True ports = max(read_ports, write_ports) return bits, ports, is_rom
Helper to extract mem bits and ports for estimation.
def _lane_detail_to_ss(fcid, ldetail): return [fcid, ldetail["lane"], ldetail["name"], ldetail["genome_build"], ldetail["bc_index"], ldetail["description"].encode("ascii", "ignore"), "N", "", "", ldetail["project_name"]]
Convert information about a lane into Illumina samplesheet output.
def _colorize(val, color): if termcolor is not None: val = termcolor.colored(val, color) elif colorama is not None: val = TERMCOLOR2COLORAMA[color] + val + colorama.Style.RESET_ALL return val
Colorize a string using termcolor or colorama. If any of them are available.
def build_from_energy_dict(cls, ebin_name, input_dict): psf_types = input_dict.pop('psf_types') output_list = [] for psf_type, val_dict in sorted(psf_types.items()): fulldict = input_dict.copy() fulldict.update(val_dict) fulldict['evtype_name'] = psf_type fulldict['ebin_name'] = ebin_name component = cls(**fulldict) output_list += [component] return output_list
Build a list of components from a dictionary for a single energy range
def admin_link(obj): if hasattr(obj, 'get_admin_link'): return mark_safe(obj.get_admin_link()) return mark_safe(admin_link_fn(obj))
Returns a link to the admin URL of an object. No permissions checking is involved, so use with caution to avoid exposing the link to unauthorised users. Example:: {{ foo_obj|admin_link }} renders as:: <a href='/admin/foo/123'>Foo</a> :param obj: A Django model instance. :return: A safe string expressing an HTML link to the admin page for an object.
def gzip_dir(path, compresslevel=6): for f in os.listdir(path): full_f = os.path.join(path, f) if not f.lower().endswith("gz"): with open(full_f, 'rb') as f_in, \ GzipFile('{}.gz'.format(full_f), 'wb', compresslevel=compresslevel) as f_out: shutil.copyfileobj(f_in, f_out) shutil.copystat(full_f,'{}.gz'.format(full_f)) os.remove(full_f)
Gzips all files in a directory. Note that this is different from shutil.make_archive, which creates a tar archive. The aim of this method is to create gzipped files that can still be read using common Unix-style commands like zless or zcat. Args: path (str): Path to directory. compresslevel (int): Level of compression, 1-9. 9 is default for GzipFile, 6 is default for gzip.
def poly_to_pwl(self, n_points=4): assert self.pcost_model == POLYNOMIAL p_min = self.p_min p_max = self.p_max p_cost = [] if p_min > 0.0: step = (p_max - p_min) / (n_points - 2) y0 = self.total_cost(0.0) p_cost.append((0.0, y0)) x = p_min n_points -= 1 else: step = (p_max - p_min) / (n_points - 1) x = 0.0 for _ in range(n_points): y = self.total_cost(x) p_cost.append((x, y)) x += step self.pcost_model = PW_LINEAR self.p_cost = p_cost
Sets the piece-wise linear cost attribute, converting the polynomial cost variable by evaluating at zero and then at n_points evenly spaced points between p_min and p_max.
def create_event_subscription(self, instance, on_data, timeout=60): manager = WebSocketSubscriptionManager(self, resource='events') subscription = WebSocketSubscriptionFuture(manager) wrapped_callback = functools.partial( _wrap_callback_parse_event, on_data) manager.open(wrapped_callback, instance) subscription.reply(timeout=timeout) return subscription
Create a new subscription for receiving events of an instance. This method returns a future, then returns immediately. Stop the subscription by canceling the future. :param str instance: A Yamcs instance name :param on_data: Function that gets called on each :class:`.Event`. :type on_data: Optional[Callable[.Event]) :param timeout: The amount of seconds to wait for the request to complete. :type timeout: Optional[float] :return: Future that can be used to manage the background websocket subscription. :rtype: .WebSocketSubscriptionFuture
def get_context(self, **kwargs): context = self.get_context_data(form=self.form_obj, **kwargs) context.update(super(FormAdminView, self).get_context()) return context
Use this method to built context data for the template Mix django wizard context data with django-xadmin context
def validate(self) : if not self.mustValidate : return True res = {} for field in self.validators.keys() : try : if isinstance(self.validators[field], dict) and field not in self.store : self.store[field] = DocumentStore(self.collection, validators = self.validators[field], initDct = {}, subStore=True, validateInit=self.validateInit) self.validateField(field) except InvalidDocument as e : res.update(e.errors) except (ValidationError, SchemaViolation) as e: res[field] = str(e) if len(res) > 0 : raise InvalidDocument(res) return True
Validate the whole document
def analysis(self): if self._analysis is None: with open(self.path, 'rb') as f: self.read_analysis(f) return self._analysis
Get ANALYSIS segment of the FCS file.
def startLoading(self): if self._loading: return False tree = self.treeWidget() if not tree: return self._loading = True self.setText(0, '') lbl = QtGui.QLabel(self.treeWidget()) lbl.setMovie(XLoaderWidget.getMovie()) lbl.setAlignment(QtCore.Qt.AlignCenter) tree.setItemWidget(self, 0, lbl) try: tree.loadStarted.emit(self) except AttributeError: pass return True
Updates this item to mark the item as loading. This will create a QLabel with the loading ajax spinner to indicate that progress is occurring.
def get_processing_block_ids(self): _processing_block_ids = [] pattern = '*:processing_block:*' block_ids = self._db.get_ids(pattern) for block_id in block_ids: id_split = block_id.split(':')[-1] _processing_block_ids.append(id_split) return sorted(_processing_block_ids)
Get list of processing block ids using the processing block id
def _load_version(cls, state, version): from ._audio_feature_extractor import _get_feature_extractor from .._mxnet import _mxnet_utils state['_feature_extractor'] = _get_feature_extractor(state['feature_extractor_name']) num_classes = state['num_classes'] num_inputs = state['_feature_extractor'].output_length if 'custom_layer_sizes' in state: custom_layer_sizes = list(map(int, state['custom_layer_sizes'])) else: custom_layer_sizes = [100, 100] state['custom_layer_sizes'] = custom_layer_sizes net = SoundClassifier._build_custom_neural_network(num_inputs, num_classes, custom_layer_sizes) net_params = net.collect_params() ctx = _mxnet_utils.get_mxnet_context() _mxnet_utils.load_net_params_from_state(net_params, state['_custom_classifier'], ctx=ctx) state['_custom_classifier'] = net return SoundClassifier(state)
A function to load a previously saved SoundClassifier instance.
def detach_all(self): self.detach_all_classes() self.objects.clear() self.index.clear() self._keepalive[:] = []
Detach from all tracked classes and objects. Restore the original constructors and cleanse the tracking lists.
def _get_action_urls(self): actions = {} model_name = self.model._meta.model_name base_url_name = '%s_%s' % (self.model._meta.app_label, model_name) model_actions_url_name = '%s_actions' % base_url_name self.tools_view_name = 'admin:' + model_actions_url_name for action in chain(self.change_actions, self.changelist_actions): actions[action] = getattr(self, action) return [ url(r'^(?P<pk>.+)/actions/(?P<tool>\w+)/$', self.admin_site.admin_view( ChangeActionView.as_view( model=self.model, actions=actions, back='admin:%s_change' % base_url_name, current_app=self.admin_site.name, ) ), name=model_actions_url_name), url(r'^actions/(?P<tool>\w+)/$', self.admin_site.admin_view( ChangeListActionView.as_view( model=self.model, actions=actions, back='admin:%s_changelist' % base_url_name, current_app=self.admin_site.name, ) ), name=model_actions_url_name), ]
Get the url patterns that route each action to a view.
def get_config_section(self, name): if self.config.has_section(name): return self.config.items(name) return []
Get a section of a configuration
def find_all_files(self): files = self.find_files() subrepo_files = ( posixpath.join(subrepo.location, filename) for subrepo in self.subrepos() for filename in subrepo.find_files() ) return itertools.chain(files, subrepo_files)
Find files including those in subrepositories.
def one_way_portal(self, other, **stats): return self.character.new_portal( self, other, symmetrical=False, **stats )
Connect a portal from here to another node, and return it.
def _live_receivers(self, sender): receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) if receivers is NO_RECEIVERS: return [] if receivers is None: with self.lock: self._clear_dead_receivers() senderkey = _make_id(sender) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == NONE_ID or r_senderkey == senderkey: receivers.append(receiver) if self.use_caching: if not receivers: self.sender_receivers_cache[sender] = NO_RECEIVERS else: self.sender_receivers_cache[sender] = receivers non_weak_receivers = [] for receiver in receivers: if isinstance(receiver, weakref.ReferenceType): receiver = receiver() if receiver is not None: non_weak_receivers.append(receiver) else: non_weak_receivers.append(receiver) return non_weak_receivers
Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers.
def smear(idx, factor): s = [idx] for i in range(factor+1): a = i - factor/2 s += [idx + a] return numpy.unique(numpy.concatenate(s))
This function will take as input an array of indexes and return every unique index within the specified factor of the inputs. E.g.: smear([5,7,100],2) = [3,4,5,6,7,8,9,98,99,100,101,102] Parameters ----------- idx : numpy.array of ints The indexes to be smeared. factor : idx The factor by which to smear out the input array. Returns -------- new_idx : numpy.array of ints The smeared array of indexes.
def nla_put(msg, attrtype, datalen, data): nla = nla_reserve(msg, attrtype, datalen) if not nla: return -NLE_NOMEM if datalen <= 0: return 0 nla_data(nla)[:datalen] = data[:datalen] _LOGGER.debug('msg 0x%x: attr <0x%x> %d: Wrote %d bytes at offset +%d', id(msg), id(nla), nla.nla_type, datalen, nla.bytearray.slice.start - nlmsg_data(msg.nm_nlh).slice.start) return 0
Add a unspecific attribute to Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L497 Reserves room for an unspecific attribute and copies the provided data into the message as payload of the attribute. Returns an error if there is insufficient space for the attribute. Positional arguments: msg -- Netlink message (nl_msg class instance). attrtype -- attribute type (integer). datalen -- length of data to be used as payload (integer). data -- data to be used as attribute payload (bytearray). Returns: 0 on success or a negative error code.
def mod_watch(name, **kwargs): ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if kwargs['sfun'] == 'watch': for p in ['sfun', '__reqs__']: del kwargs[p] kwargs['name'] = name ret = present(**kwargs) return ret
The at watcher, called to invoke the watch command. .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. name The name of the atjob
def estimate_frequency(self, start: int, end: int, sample_rate: float): length = 2 ** int(math.log2(end - start)) data = self.data[start:start + length] try: w = np.fft.fft(data) frequencies = np.fft.fftfreq(len(w)) idx = np.argmax(np.abs(w)) freq = frequencies[idx] freq_in_hertz = abs(freq * sample_rate) except ValueError: freq_in_hertz = 100e3 return freq_in_hertz
Estimate the frequency of the baseband signal using FFT :param start: Start of the area that shall be investigated :param end: End of the area that shall be investigated :param sample_rate: Sample rate of the signal :return:
def read(self): with open(self.default_file) as json_file: try: return json.load(json_file) except Exception as e: raise 'empty file'
read default csp settings from json file
def get_development_container_name(self): if self.__prefix: return "{0}:{1}-{2}-dev".format( self.__repository, self.__prefix, self.__branch) else: return "{0}:{1}-dev".format( self.__repository, self.__branch)
Returns the development container name
def changeable(self, request): return self.apply_changeable(self.get_queryset(), request) if self.check_changeable(self.model, request) is not False else self.get_queryset().none()
Checks the both, check_changeable and apply_changeable, against the owned model and it's instance set
def is_digit(obj): return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
Check if an object is Number
def get(self, url, params={}, headers={}, auth=(), certificate_path=None): certificate_path = certificate_path if certificate_path else False return self.session.get(url, params=params, headers=headers, verify=certificate_path, auth=auth, timeout=self.timeout)
Returns the response payload from the request to the given URL. Args: url (str): The URL for the WEB API that the request is being made too. params (dict): Dictionary containing the query string parameters. headers (dict): HTTP Headers that may be needed for the request. auth (tuple): User ID and password for Basic Auth certificate_path (str): Path to the ssl certificate. Returns: response: (HttpResponse): Response object from requests.get api request
def softmax(w, t=1.0): w = [Decimal(el) for el in w] e = numpy.exp(numpy.array(w) / Decimal(t)) dist = e / numpy.sum(e) return dist
Calculate the softmax of a list of numbers w. Parameters ---------- w : list of numbers Returns ------- a list of the same length as w of non-negative numbers Examples -------- >>> softmax([0.1, 0.2]) array([ 0.47502081, 0.52497919]) >>> softmax([-0.1, 0.2]) array([ 0.42555748, 0.57444252]) >>> softmax([0.9, -10]) array([ 9.99981542e-01, 1.84578933e-05]) >>> softmax([0, 10]) array([ 4.53978687e-05, 9.99954602e-01])
def create_usuario(self): return Usuario( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of usuario services facade.
def samples(self): if self._samples is None: self._samples = SampleList( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['sid'], ) return self._samples
Access the samples :returns: twilio.rest.autopilot.v1.assistant.task.sample.SampleList :rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleList
def get_missing_required_annotations(self) -> List[str]: return [ required_annotation for required_annotation in self.required_annotations if required_annotation not in self.annotations ]
Return missing required annotations.
def service(self, name=None, pk=None, scope=None, **kwargs): _services = self.services(name=name, pk=pk, scope=scope, **kwargs) if len(_services) == 0: raise NotFoundError("No service fits criteria") if len(_services) != 1: raise MultipleFoundError("Multiple services fit criteria") return _services[0]
Retrieve single KE-chain Service. Uses the same interface as the :func:`services` method but returns only a single pykechain :class:`models.Service` instance. :param name: (optional) name to limit the search for :type name: basestring or None :param pk: (optional) primary key or id (UUID) of the service to search for :type pk: basestring or None :param scope: (optional) id (UUID) of the scope to search in :type scope: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: a single :class:`models.Service` object :raises NotFoundError: When no `Service` object is found :raises MultipleFoundError: When more than a single `Service` object is found
def options(self, parser, env=os.environ): "Add options to nosetests." parser.add_option("--%s-record" % self.name, action="store", metavar="FILE", dest="record_filename", help="Record actions to this file.") parser.add_option("--%s-playback" % self.name, action="store", metavar="FILE", dest="playback_filename", help="Playback actions from this file.")
Add options to nosetests.
def get_loss(self, y_pred, y_true, X=None, training=False): y_true = to_tensor(y_true, device=self.device) return self.criterion_(y_pred, y_true)
Return the loss for this batch. Parameters ---------- y_pred : torch tensor Predicted target values y_true : torch tensor True target values. X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether train mode should be used or not.
def uncomplete(self): args = { 'project_id': self.project.id, 'ids': [self.id] } owner = self.project.owner _perform_command(owner, 'item_uncomplete', args)
Mark the task uncomplete. >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> project = user.get_project('PyTodoist') >>> task = project.add_task('Install PyTodoist') >>> task.uncomplete()
def assert_succeeds(exception, msg_fmt="{msg}"): class _AssertSucceeds(object): def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): if exc_type and issubclass(exc_type, exception): msg = exception.__name__ + " was unexpectedly raised" fail( msg_fmt.format( msg=msg, exc_type=exception, exc_name=exception.__name__, exception=exc_val, ) ) return _AssertSucceeds()
Fail if a specific exception is raised within the context. This assertion should be used for cases, where successfully running a function signals a successful test, and raising the exception of a certain type signals a test failure. All other raised exceptions are passed on and will usually still result in a test error. This can be used to signal the intent of a block. >>> l = ["foo", "bar"] >>> with assert_succeeds(ValueError): ... i = l.index("foo") ... >>> with assert_succeeds(ValueError): ... raise ValueError() ... Traceback (most recent call last): ... AssertionError: ValueError was unexpectedly raised >>> with assert_succeeds(ValueError): ... raise TypeError("Wrong Error") ... Traceback (most recent call last): ... TypeError: Wrong Error The following msg_fmt arguments are supported: * msg - the default error message * exc_type - exception type * exc_name - exception type name * exception - exception that was raised
def add_vlan_firewall(self, vlan_id, ha_enabled=False): package = self.get_dedicated_package(ha_enabled) product_order = { 'complexType': 'SoftLayer_Container_Product_Order_Network_' 'Protection_Firewall_Dedicated', 'quantity': 1, 'packageId': 0, 'vlanId': vlan_id, 'prices': [{'id': package[0]['prices'][0]['id']}] } return self.client['Product_Order'].placeOrder(product_order)
Creates a firewall for the specified vlan. :param int vlan_id: The ID of the vlan to create the firewall for :param bool ha_enabled: If True, an HA firewall will be created :returns: A dictionary containing the VLAN firewall order
def _detect_line_ending(self): candidate_value = '\n' candidate_count = 0 for line_ending in UniversalCsvReader.line_endings: count = self._sample.count(line_ending) if count > candidate_count: candidate_value = line_ending candidate_count = count self._formatting_parameters['line_terminator'] = candidate_value
Detects the line ending in the sample data.
def ping(): if _worker_name() not in DETAILS: init() try: return DETAILS[_worker_name()].conn.isalive() except TerminalException as e: log.error(e) return False
Ping the device on the other end of the connection .. code-block: bash salt '*' onyx.cmd ping
def handle_cancellation(session: CommandSession): def control(value): if _is_cancellation(value) is True: session.finish(render_expression( session.bot.config.SESSION_CANCEL_EXPRESSION)) return value return control
If the input is a string of cancellation word, finish the command session.
def __get_connection(self) -> redis.Redis: if self.__redis_use_socket: r = redis.from_url( 'unix://{:s}?db={:d}'.format( self.__redis_host, self.__redis_db ) ) else: r = redis.from_url( 'redis://{:s}:{:d}/{:d}'.format( self.__redis_host, self.__redis_port, self.__redis_db ) ) if BlackRed.Settings.REDIS_AUTH is not None: r.execute_command('AUTH {:s}'.format(BlackRed.Settings.REDIS_AUTH)) return r
Get a Redis connection :return: Redis connection instance :rtype: redis.Redis
def load_brain_metadata(proxy, include_fields): ret = {} for index in proxy.indexes(): if index not in proxy: continue if include_fields and index not in include_fields: continue val = getattr(proxy, index) if val != Missing.Value: try: json.dumps(val) except: continue ret[index] = val return ret
Load values from the catalog metadata into a list of dictionaries
def select_single_column(engine, column): s = select([column]) return column.name, [row[0] for row in engine.execute(s)]
Select data from single column. Example:: >>> select_single_column(engine, table_user.c.id) [1, 2, 3] >>> select_single_column(engine, table_user.c.name) ["Alice", "Bob", "Cathy"]
def gopro_set_response_send(self, cmd_id, status, force_mavlink1=False): return self.send(self.gopro_set_response_encode(cmd_id, status), force_mavlink1=force_mavlink1)
Response from a GOPRO_COMMAND set request cmd_id : Command ID (uint8_t) status : Status (uint8_t)
def truth(f): @wraps(f) def check(v): t = f(v) if not t: raise ValueError return v return check
Convenience decorator to convert truth functions into validators. >>> @truth ... def isdir(v): ... return os.path.isdir(v) >>> validate = Schema(isdir) >>> validate('/') '/' >>> with raises(MultipleInvalid, 'not a valid value'): ... validate('/notavaliddir')
def parenthesize(self, expr, level, *args, strict=False, **kwargs): needs_parenths = ( (precedence(expr) < level) or (strict and precedence(expr) == level)) if needs_parenths: return ( self._parenth_left + self.doprint(expr, *args, **kwargs) + self._parenth_right) else: return self.doprint(expr, *args, **kwargs)
Render `expr` and wrap the result in parentheses if the precedence of `expr` is below the given `level` (or at the given `level` if `strict` is True. Extra `args` and `kwargs` are passed to the internal `doit` renderer
def main(): print() print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print(lorem_gotham_title().center(50)) print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print() poem = lorem_gotham() for n in range(16): if n in (4, 8, 12): print() print(next(poem)) print()
I provide a command-line interface for this module
def write(self, **kwargs): if 'id' in kwargs and 'data' in kwargs: result = self.write_raw(kwargs['id'], kwargs['data'], bus=kwargs.get('bus', None), frame_format=kwargs.get('frame_format', None)) else: result = self.write_translated(kwargs['name'], kwargs['value'], event=kwargs.get('event', None)) return result
Serialize a raw or translated write request and send it to the VI, following the OpenXC message format.
def autopage(self): while self.items: yield from self.items self.items = self.fetch_next()
Iterate through results from all pages. :return: all results :rtype: generator