code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def update1(self, key: str, data: np.ndarray, size: int) -> None: print(data) if key in self.get_keys(): self.data[key][data[0]] = data else: newdata = np.zeros((size, 6)) newdata[data[0]] = data self.data[key] = newdata
Update one entry in specific record in datastore
def open_file(filename): if sys.platform.startswith("darwin"): subprocess.call(("open", filename)) elif sys.platform == "cygwin": subprocess.call(("cygstart", filename)) elif os.name == "nt": os.system("start %s" % filename) elif os.name == "posix": subprocess.call(("xdg-open", filename))
Multi-platform way to make the OS open a file with its default application
def domain_whois_history(self, domain, limit=None): params = dict() if limit is not None: params['limit'] = limit uri = self._uris["whois_domain_history"].format(domain) resp_json = self.get_parse(uri, params) return resp_json
Gets whois history for a domain
def _create_vlanprofile(self, handle, vlan_id, ucsm_ip): vlan_name = self.make_vlan_name(vlan_id) vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX + vlan_name) try: vp1 = handle.query_dn(const.VLAN_PATH) if not vp1: LOG.warning('UCS Manager network driver Vlan Profile ' 'path at %s missing', const.VLAN_PATH) return False vp2 = self.ucsmsdk.fabricVlan( parent_mo_or_dn=vp1, name=vlan_name, compression_type=const.VLAN_COMPRESSION_TYPE, sharing=const.NONE, pub_nw_name="", id=str(vlan_id), mcast_policy_name="", default_net="no") handle.add_mo(vp2) handle.commit() if vp2: LOG.debug('UCS Manager network driver Created Vlan ' 'Profile %s at %s', vlan_name, vlan_profile_dest) return True except Exception as e: return self._handle_ucsm_exception(e, 'Vlan Profile', vlan_name, ucsm_ip)
Creates VLAN profile to able associated with the Port Profile.
def twisted_absolute_path(path, request): parsed = urlparse.urlparse(request.uri) if parsed.scheme != '': path_parts = parsed.path.lstrip('/').split('/') request.prepath = path_parts[0:1] request.postpath = path_parts[1:] path = request.prepath[0] return path, request
Hack to fix twisted not accepting absolute URIs
def extract_dms (archive, compression, cmd, verbosity, interactive, outdir): check_archive_ext(archive) cmdlist = [cmd, '-d', outdir] if verbosity > 1: cmdlist.append('-v') cmdlist.extend(['u', archive]) return cmdlist
Extract a DMS archive.
def version(): path = join("lib", _CONFIG["name"], "__version__.py") with open(path) as stream: exec(stream.read()) return __version__
Get the local package version.
def _build_receipt_table(result, billing="hourly", test=False): title = "OrderId: %s" % (result.get('orderId', 'No order placed')) table = formatting.Table(['Cost', 'Description'], title=title) table.align['Cost'] = 'r' table.align['Description'] = 'l' total = 0.000 if test: prices = result['prices'] else: prices = result['orderDetails']['prices'] for item in prices: rate = 0.000 if billing == "hourly": rate += float(item.get('hourlyRecurringFee', 0.000)) else: rate += float(item.get('recurringFee', 0.000)) total += rate table.add_row([rate, item['item']['description']]) table.add_row(["%.3f" % total, "Total %s cost" % billing]) return table
Retrieve the total recurring fee of the items prices
def reset_ball(self, x, y): self.sprite.position.x = x self.sprite.position.y = y
reset ball to set location on the screen
def load_gpi(self, gpi_path): if self.config.gpi_authority_path is not None: gpis = dict() parser = entityparser.GpiParser() with open(self.config.gpi_authority_path) as gpi_f: entities = parser.parse(file=gpi_f) for entity in entities: gpis[entity["id"]] = { "symbol": entity["label"], "name": entity["full_name"], "synonyms": entitywriter.stringify(entity["synonyms"]), "type": entity["type"] } return gpis return None
Loads a GPI as a file from the `config.gpi_authority_path`
def set(key, value, timeout = -1, adapter = MemoryAdapter): if adapter(timeout = timeout).set(key, pickle.dumps(value)): return value else: return None
set cache by code, must set timeout length
def format_currency_field(__, prec, number, locale): locale = Locale.parse(locale) currency = get_territory_currencies(locale.territory)[0] if prec is None: pattern, currency_digits = None, True else: prec = int(prec) pattern = locale.currency_formats['standard'] pattern = modify_number_pattern(pattern, frac_prec=(prec, prec)) currency_digits = False return format_currency(number, currency, pattern, locale=locale, currency_digits=currency_digits)
Formats a currency field.
def extraSelections(self, qpart, block, columnIndex): blockText = block.text() if columnIndex < len(blockText) and \ blockText[columnIndex] in self._ALL_BRACKETS and \ qpart.isCode(block, columnIndex): return self._highlightBracket(blockText[columnIndex], qpart, block, columnIndex) elif columnIndex > 0 and \ blockText[columnIndex - 1] in self._ALL_BRACKETS and \ qpart.isCode(block, columnIndex - 1): return self._highlightBracket(blockText[columnIndex - 1], qpart, block, columnIndex - 1) else: self.currentMatchedBrackets = None return []
List of QTextEdit.ExtraSelection's, which highlighte brackets
def resolve(self, value): if self is value: raise TypeError('Cannot resolve promise with itself.') if isinstance(value, Promise): value.done(self.resolve, self.reject) return if self._state != 'pending': raise RuntimeError('Promise is no longer pending.') self.value = value self._state = 'resolved' callbacks = self._callbacks self._callbacks = None for callback in callbacks: callback(value)
Resolves the promise with the given value.
def _add_recent(self, doc, logged_id): "Keep a tab on the most recent message for each channel" spec = dict(channel=doc['channel']) doc['ref'] = logged_id doc.pop('_id') self._recent.replace_one(spec, doc, upsert=True)
Keep a tab on the most recent message for each channel
async def remove_listener(self, channel, callback): if self.is_closed(): return if channel not in self._listeners: return if callback not in self._listeners[channel]: return self._listeners[channel].remove(callback) if not self._listeners[channel]: del self._listeners[channel] await self.fetch('UNLISTEN {}'.format(utils._quote_ident(channel)))
Remove a listening callback on the specified channel.
def releases(self, owner, module): resource = self.RRELEASES params = { self.PMODULE: owner + '-' + module, self.PLIMIT: self.max_items, self.PSHOW_DELETED: 'true', self.PSORT_BY: self.VRELEASE_DATE, } for page in self._fetch(resource, params): yield page
Fetch the releases of a module.
def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol: data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length, use_sequence_length=True) hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len) hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len) hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length, use_sequence_length=True) hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name="%s_rnn" % self.prefix) return hidden_concat
Bidirectionally encodes time-major data.
def write_and_return( command, ack, serial_connection, timeout=DEFAULT_WRITE_TIMEOUT): clear_buffer(serial_connection) with serial_with_temp_timeout( serial_connection, timeout) as device_connection: response = _write_to_device_and_return(command, ack, device_connection) return response
Write a command and return the response
def gc(self): gc = len([base for base in self.seq if base == 'C' or base == 'G']) return float(gc) / len(self)
Find the frequency of G and C in the current sequence.
def _siftup_max(heap, pos): 'Maxheap variant of _siftup' endpos = len(heap) startpos = pos newitem = heap[pos] childpos = 2*pos + 1 while childpos < endpos: rightpos = childpos + 1 if rightpos < endpos and not heap[rightpos] < heap[childpos]: childpos = rightpos heap[pos] = heap[childpos] pos = childpos childpos = 2*pos + 1 heap[pos] = newitem _siftdown_max(heap, startpos, pos)
Maxheap variant of _siftup
def _get_schema(): schema_path = os.path.join(os.path.dirname(__file__), 'schema', 'scheduling_block_schema.json') with open(schema_path, 'r') as file: schema_data = file.read() schema = json.loads(schema_data) return schema
Get the schema for validation
def graceful_exit(self, msg): if self.caught_error: self.print2file(self.stderr, False, False, self.caught_error) self.log(msg) sys.exit(1)
This function Tries to update the MSQL database before exiting.
def _get_main_and_json(directory): directory = os.path.normpath(os.path.abspath(directory)) checker_main = os.path.normpath(os.path.join(directory, os.path.pardir, "checker-workflow-wrapping-tool.cwl")) if checker_main and os.path.exists(checker_main): main_cwl = [checker_main] else: main_cwl = glob.glob(os.path.join(directory, "main-*.cwl")) main_cwl = [x for x in main_cwl if not x.find("-pack") >= 0] assert len(main_cwl) == 1, "Did not find main CWL in %s" % directory main_json = glob.glob(os.path.join(directory, "main-*-samples.json")) assert len(main_json) == 1, "Did not find main json in %s" % directory project_name = os.path.basename(directory).split("-workflow")[0] return main_cwl[0], main_json[0], project_name
Retrieve the main CWL and sample JSON files from a bcbio generated directory.
def getTopdir(topfile, start=None): if not start: start = os.getcwd() here = start toomany = 20 while toomany > 0: if os.path.exists(os.path.join(here, topfile)): return here next = os.path.dirname(here) if next == here: break here = next toomany -= 1 output("Unable to find topfile '{}' anywhere " "from {} upwards".format(topfile, start)) sys.exit(1)
walk upwards from the current directory until we find this topfile
def _compute_acq_withGradients(self, x): means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x) fmins = self.model.get_fmin() f_acqu = None df_acqu = None for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs): phi, Phi, u = get_quantiles(self.jitter, fmin, m, s) f = Phi df = -(phi/s)* (dmdx + dsdx * u) if f_acqu is None: f_acqu = f df_acqu = df else: f_acqu += f df_acqu += df return f_acqu/(len(means)), df_acqu/(len(means))
Integrated Expected Improvement and its derivative
def _ligotimegps(s, ns=0): from lal import LIGOTimeGPS try: return LIGOTimeGPS(s, ns) except TypeError: return LIGOTimeGPS(int(s), int(ns))
Catch TypeError and cast `s` and `ns` to `int`
def truth(val, context): try: 0 + val except TypeError: lower_val = val.lower() if lower_val in TRUE: return True elif lower_val in FALSE: return False else: raise FilterError("Bad boolean value %r in %r (expected one of '%s', or '%s')" % ( val, context, "' '".join(TRUE), "' '".join(FALSE) )) else: return bool(val)
Convert truth value in "val" to a boolean.
def radiation_values(self, location, timestep=1): sp = Sunpath.from_location(location) altitudes = [] dates = self._get_datetimes(timestep) for t_date in dates: sun = sp.calculate_sun_from_date_time(t_date) altitudes.append(sun.altitude) dir_norm, diff_horiz = ashrae_clear_sky( altitudes, self._month, self._clearness) glob_horiz = [dhr + dnr * math.sin(math.radians(alt)) for alt, dnr, dhr in zip(altitudes, dir_norm, diff_horiz)] return dir_norm, diff_horiz, glob_horiz
Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
def initiate_tasks(self): self.tasks_classes = TaskLoader().load_tasks( paths=self.configuration[Configuration.ALGORITHM][Configuration.TASKS][Configuration.PATHS])
Loads all tasks using `TaskLoader` from respective configuration option
def _cutadapt_pe_cmd(fastq_files, out_files, quality_format, base_cmd, data): fq1, fq2 = [objectstore.cl_input(x) for x in fastq_files] of1, of2 = out_files base_cmd += " --minimum-length={min_length} ".format(min_length=dd.get_min_read_length(data)) first_cmd = base_cmd + " -o {of1_tx} -p {of2_tx} " + fq1 + " " + fq2 return first_cmd + "| tee > {log_tx};"
run cutadapt in paired end mode
def find_dimension_by_name(self, dim_name): for dim in self.dimensions: if is_equal_strings_ignore_case(dim.name, dim_name): return dim return None
the method searching dimension with a given name
def write_line(self, line, count=1): self.write(line) self.write_newlines(count)
writes the line and count newlines after the line
def use_sequestered_assessment_part_view(self): self._containable_views['assessment_part'] = SEQUESTERED self._get_sub_package_provider_session('assessment_authoring', 'assessment_part_lookup_session') for session in self._provider_sessions: for provider_session_name, provider_session in self._provider_sessions[session].items(): try: provider_session.use_sequestered_assessment_part_view() except AttributeError: pass
Pass through to provider AssessmentPartLookupSession.use_sequestered_assessment_part_view
def find_config(revision): if not is_git_repo(): return None cfg_path = f"{revision}:.cherry_picker.toml" cmd = "git", "cat-file", "-t", cfg_path try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) path_type = output.strip().decode("utf-8") return cfg_path if path_type == "blob" else None except subprocess.CalledProcessError: return None
Locate and return the default config for current revison.
def list_local_plugins(plugin_type, plugins_path, plugin_details): installed_plugins = list() for plugin in next(os.walk(plugins_path))[1]: s = plugin_details(plugin) installed_plugins.append(plugin) click.secho(s) if not installed_plugins: click.secho("[*] You do not have any {0}s installed, " "try installing one with `honeycomb {0} install`".format(plugin_type)) return installed_plugins
List local plugins with details.
def acq(self, graph): mean, std = self.gpr.predict(np.array([graph.extract_descriptor()])) if self.optimizemode is OptimizeMode.Maximize: return mean + self.beta * std return mean - self.beta * std
estimate the value of generated graph
def _get_best_prediction(self, record, train=True): if not self.trees: return best = (+1e999999, None) for tree in self.trees: best = min(best, (tree.mae.mean, tree)) _, best_tree = best prediction, tree_mae = best_tree.predict(record, train=train) return prediction.mean
Gets the prediction from the tree with the lowest mean absolute error.
def parse_bind(bind): if isinstance(bind, Connection): engine = bind.engine else: engine = bind m = re.match(r"Engine\((.*?)\)", str(engine)) if m is not None: u = urlparse(m.group(1)) uses_netloc.append(u.scheme) safe_url = "" if u.password is None: safe_url = u.geturl() else: host_info = u.netloc.rpartition('@')[-1] parts = u._replace(netloc='{}@{}'.format(u.username, host_info)) safe_url = parts.geturl() sql = {} sql['database_type'] = u.scheme sql['url'] = safe_url if u.username is not None: sql['user'] = "{}".format(u.username) return sql
Parses a connection string and creates SQL trace metadata
def update(self, **kwargs): ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs) post_bulk_operation.send(sender=self.model, model=self.model) return ret_val
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
def _direct_render(self, name, attrs): context = { 'image': self.image_url(), 'name': name, 'key': self._key, 'id': u'%s_%s' % (self.id_prefix, attrs.get('id')) if self.id_prefix else attrs.get('id'), 'audio': self.audio_url(), } self.image_and_audio = render_to_string(settings.CAPTCHA_IMAGE_TEMPLATE, context) self.hidden_field = render_to_string(settings.CAPTCHA_HIDDEN_FIELD_TEMPLATE, context) self.text_field = render_to_string(settings.CAPTCHA_TEXT_FIELD_TEMPLATE, context) return self.format_output(None)
Render the widget the old way - using field_template or output_format.
def _clear_strobes(self): self['SEQ']['GLOBAL_SHIFT_EN'].setall(False) self['SEQ']['GLOBAL_CTR_LD'].setall(False) self['SEQ']['GLOBAL_DAC_LD'].setall(False) self['SEQ']['PIXEL_SHIFT_EN'].setall(False) self['SEQ']['INJECTION'].setall(False)
Resets the "enable" and "load" output streams to all 0.
def eglGetDisplay(display=EGL_DEFAULT_DISPLAY): res = _lib.eglGetDisplay(display) if not res or res == EGL_NO_DISPLAY: raise RuntimeError('Could not create display') return res
Connect to the EGL display server.
def factorial(n): if isinstance(n, float): if n.is_integer(): n = int(n) if not isinstance(n, INT_TYPES): raise TypeError("Non-integer input (perhaps you need Euler Gamma " "function or Gauss Pi function)") if n < 0: raise ValueError("Input shouldn't be negative") return reduce(operator.mul, it.takewhile(lambda m: m <= n, it.count(2)), 1)
Factorial function that works with really big numbers.
def print_device_aldb(self, addr): if Address(addr).id == self.plm.address.id: device = self.plm else: dev_addr = Address(addr) device = self.plm.devices[dev_addr.id] if device: if device.aldb.status in [ALDBStatus.LOADED, ALDBStatus.PARTIAL]: if device.aldb.status == ALDBStatus.PARTIAL: _LOGGING.info('ALDB partially loaded for device %s', addr) for mem_addr in device.aldb: record = device.aldb[mem_addr] _LOGGING.debug('mem_addr: %s', mem_addr) _LOGGING.info('ALDB record: %s', record) else: _LOGGING.info('ALDB not loaded. ' 'Use `load_aldb %s` first.', device.address.id) else: _LOGGING.info('Device not found.')
Diplay the All-Link database for a device.
def Reverse(self, copy=False): numPoints = self.GetN() if copy: revGraph = self.Clone() else: revGraph = self X = self.GetX() EXlow = self.GetEXlow() EXhigh = self.GetEXhigh() Y = self.GetY() EYlow = self.GetEYlow() EYhigh = self.GetEYhigh() for i in range(numPoints): index = numPoints - 1 - i revGraph.SetPoint(i, X[index], Y[index]) revGraph.SetPointError( i, EXlow[index], EXhigh[index], EYlow[index], EYhigh[index]) return revGraph
Reverse the order of the points
def _determine_username(self, ip): ssh = subprocess.Popen([ "ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "root@%s" % ip], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) first_line = ssh.stdout.readline() ssh.kill() ssh.wait() if first_line: match = re.search( r"Please login as the user \"(\w+)\" rather than " r"the user \"root\".", first_line.decode('utf-8')) if match: return match.groups()[0] else: return None
SSH in as root and determine the username.
def parallel_variantcall_region(samples, run_parallel): to_process, extras = _dup_samples_by_variantcaller(samples) split_fn = _split_by_ready_regions(".vcf.gz", "work_bam", get_variantcaller) samples = _collapse_by_bam_variantcaller( grouped_parallel_split_combine(to_process, split_fn, multi.group_batches, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"])) return extras + samples
Perform variant calling and post-analysis on samples by region.
def delete_notification_rule(self, id, **kwargs): endpoint = '{0}/{1}/notification_rules/{2}'.format( self.endpoint, self['id'], id, ) return self.request('DELETE', endpoint=endpoint, query_params=kwargs)
Get a notification rule for this user.
def com_google_fonts_check_family_tnum_horizontal_metrics(fonts): from fontbakery.constants import RIBBI_STYLE_NAMES from fontTools.ttLib import TTFont RIBBI_ttFonts = [TTFont(f) for f in fonts if style(f) in RIBBI_STYLE_NAMES] tnum_widths = {} for ttFont in RIBBI_ttFonts: glyphs = ttFont.getGlyphSet() tnum_glyphs = [(glyph_id, glyphs[glyph_id]) for glyph_id in glyphs.keys() if glyph_id.endswith(".tnum")] for glyph_id, glyph in tnum_glyphs: if glyph.width not in tnum_widths: tnum_widths[glyph.width] = [glyph_id] else: tnum_widths[glyph.width].append(glyph_id) if len(tnum_widths.keys()) > 1: max_num = 0 most_common_width = None for width, glyphs in tnum_widths.items(): if len(glyphs) > max_num: max_num = len(glyphs) most_common_width = width del tnum_widths[most_common_width] yield FAIL, (f"The most common tabular glyph width is {most_common_width}." " But there are other tabular glyphs with different widths" f" such as the following ones:\n\t{tnum_widths}.") else: yield PASS, "OK"
All tabular figures must have the same width across the RIBBI-family.
def _is_non_string_iterable(value): if isinstance(value, str): return False if hasattr(value, '__iter__'): return True if isinstance(value, collections.abc.Sequence): return True return False
Whether a value is iterable.
def cycle_focus(self): windows = self.windows() new_index = (windows.index(self.active_window) + 1) % len(windows) self.active_window = windows[new_index]
Cycle through all windows.
def map_grounding(): if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) stmts_out = ac.map_grounding(stmts) return _return_stmts(stmts_out)
Map grounding on a list of INDRA Statements.
def batch_predict(dataset, model_dir, output_csv, output_bq_table): import apache_beam as beam from google.datalab.utils import LambdaJob from . import _predictor if output_csv is None and output_bq_table is None: raise ValueError('output_csv and output_bq_table cannot both be None.') job_id = ('batch-predict-image-classification-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')) options = { 'project': _util.default_project(), } opts = beam.pipeline.PipelineOptions(flags=[], **options) p = beam.Pipeline('DirectRunner', options=opts) _predictor.configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table) job = LambdaJob(lambda: p.run().wait_until_finish(), job_id) return job
Batch predict running locally.
def to_log(x1, x2, x1err, x2err): logx1 = numpy.log10(numpy.array(x1)) logx2 = numpy.log10(numpy.array(x2)) x1err = numpy.log10(numpy.array(x1)+numpy.array(x1err)) - logx1 x2err = numpy.log10(numpy.array(x2)+numpy.array(x2err)) - logx2 return logx1, logx2, x1err, x2err
Take linear measurements and uncertainties and transform to log values.
def parse_uinput_mapping(name, mapping): axes, buttons, mouse, mouse_options = {}, {}, {}, {} description = "ds4drv custom mapping ({0})".format(name) for key, attr in mapping.items(): key = key.upper() if key.startswith("BTN_") or key.startswith("KEY_"): buttons[key] = attr elif key.startswith("ABS_"): axes[key] = attr elif key.startswith("REL_"): mouse[key] = attr elif key.startswith("MOUSE_"): mouse_options[key] = attr create_mapping(name, description, axes=axes, buttons=buttons, mouse=mouse, mouse_options=mouse_options)
Parses a dict of mapping options.
def version(): import pkg_resources version = pkg_resources.require(PROJECT_NAME)[0].version floyd_logger.info(version)
View the current version of the CLI.
def read_dir_tree(self, file_hash): json_d = self.read_index_object(file_hash, 'tree') node = {'files' : json_d['files'], 'dirs' : {}} for name, hsh in json_d['dirs'].iteritems(): node['dirs'][name] = self.read_dir_tree(hsh) return node
Recursively read the directory structure beginning at hash
def _create_arc(self, inst, destination, placeable=None): this_container = None if isinstance(placeable, containers.Well): this_container = placeable.get_parent() elif isinstance(placeable, containers.WellSeries): this_container = placeable.get_parent() elif isinstance(placeable, containers.Container): this_container = placeable if this_container and self._prev_container == this_container: arc_top = self.max_placeable_height_on_deck(this_container) arc_top += TIP_CLEARANCE_LABWARE elif self._use_safest_height: arc_top = inst._max_deck_height() else: arc_top = self.max_deck_height() + TIP_CLEARANCE_DECK self._prev_container = this_container _, _, pip_z = pose_tracker.absolute(self.poses, inst) arc_top = max(arc_top, destination[2], pip_z) arc_top = min(arc_top, inst._max_deck_height()) strategy = [ {'z': arc_top}, {'x': destination[0], 'y': destination[1]}, {'z': destination[2]} ] return strategy
Returns a list of coordinates to arrive to the destination coordinate
def bartlett(timeseries, segmentlength, **kwargs): kwargs.pop('noverlap', None) return welch(timeseries, segmentlength, noverlap=0, **kwargs)
Calculate a PSD using Bartlett's method
def _deployment_menu_entry(deployment): paths = ", ".join([_module_name_for_display(module) for module in deployment['modules']]) regions = ", ".join(deployment.get('regions', [])) return "%s - %s (%s)" % (deployment.get('name'), paths, regions)
Build a string to display in the 'select deployment' menu.
def simple( child, shared, parent ): return ( not child.get('refs',()) and ( not shared.get(child['address']) or shared.get(child['address']) == [parent['address']] ) )
Return sub-set of children who are "simple" in the sense of group_children
def create_noise(length, level=4): noise = bytearray(length) adjust = 128 - int(level / 2) i = 0 while i < length: v = random.randint(0, 256) noise[i] = v % level + adjust i += 1 return noise
Create white noise for background
def _grid_col(self): tr = self._tr idx = tr.tc_lst.index(self) preceding_tcs = tr.tc_lst[:idx] return sum(tc.grid_span for tc in preceding_tcs)
The grid column at which this cell begins.
def reset(yes): ok = yes or confirm('Do you really want to destroy all your data? (y/n) ') if not ok: return dbpath = os.path.realpath(os.path.expanduser(config.dbserver.file)) if os.path.isfile(dbpath) and os.access(dbpath, os.W_OK): if dbserver.get_status() == 'running': if config.dbserver.multi_user: sys.exit('The oq dbserver must be stopped ' 'before proceeding') else: pid = logs.dbcmd('getpid') os.kill(pid, signal.SIGTERM) time.sleep(.5) assert dbserver.get_status() == 'not-running' print('dbserver stopped') try: os.remove(dbpath) print('Removed %s' % dbpath) except OSError as exc: print(exc, file=sys.stderr) purge_all(fast=True)
Remove all the datastores and the database of the current user
def create_trainer(self, username, team, start_date=None, has_cheated=None, last_cheated=None, currently_cheats=None, statistics=True, daily_goal=None, total_goal=None, prefered=True, account=None, verified=False): args = locals() url = api_url+'trainers/' payload = { 'username': username, 'faction': team, 'statistics': statistics, 'prefered': prefered, 'last_modified': maya.now().iso8601(), 'owner': account, 'verified': verified } for i in args: if args[i] is not None and i not in ['self', 'username', 'team', 'account', 'start_date']: payload[i] = args[i] elif args[i] is not None and i=='start_date': payload[i] = args[i].date().isoformat() r = requests.post(url, data=json.dumps(payload), headers=self.headers) print(request_status(r)) r.raise_for_status() return Trainer(r.json())
Add a trainer to the database
def kube_resourcequota(self, metric, scraper_config): metric_base_name = scraper_config['namespace'] + '.resourcequota.{}.{}' suffixes = {'used': 'used', 'hard': 'limit'} if metric.type in METRIC_TYPES: for sample in metric.samples: mtype = sample[self.SAMPLE_LABELS].get("type") resource = sample[self.SAMPLE_LABELS].get("resource") tags = [ self._label_to_tag("namespace", sample[self.SAMPLE_LABELS], scraper_config), self._label_to_tag("resourcequota", sample[self.SAMPLE_LABELS], scraper_config), ] + scraper_config['custom_tags'] self.gauge(metric_base_name.format(resource, suffixes[mtype]), sample[self.SAMPLE_VALUE], tags) else: self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
Quota and current usage by resource type.
def _try_open(cls): handle = None for usb_cls, subcls, protocol in [(adb_device.CLASS, adb_device.SUBCLASS, adb_device.PROTOCOL), (fastboot_device.CLASS, fastboot_device.SUBCLASS, fastboot_device.PROTOCOL)]: try: handle = local_usb.LibUsbHandle.open( serial_number=cls.serial_number, interface_class=usb_cls, interface_subclass=subcls, interface_protocol=protocol) cls.serial_number = handle.serial_number return True except usb_exceptions.DeviceNotFoundError: pass except usb_exceptions.MultipleInterfacesFoundError: _LOG.warning('Multiple Android devices found, ignoring!') finally: if handle: handle.close() return False
Try to open a USB handle.
def save_program(self, title, bytes, line=32768): self.standard_program_header(title, len(bytes), line) bytes = [self.BLOCK_TYPE_DATA] + [(int(x) & 0xFF) for x in bytes] self.standard_block(bytes)
Saves the given bytes as a BASIC program.
def _is_partial_index(gbi_file): with open(gbi_file) as in_handle: for i, _ in enumerate(in_handle): if i > 2: return False return True
Check for truncated output since grabix doesn't write to a transactional directory.
def _schema_from_verb(verb, partial=False): from .verbs import Verbs return getattr(Verbs, verb)(partial=partial)
Return an instance of schema for given verb.
def parse_config(file_path): config_parser = configparser.ConfigParser() config_parser.read(file_path) plugin_config = {} options = config_parser.options(CONFIG_OPTION) for option in options: try: plugin_config[option] = config_parser.get(CONFIG_OPTION, option) if plugin_config[option] == -1: print("skip: %s" % option) except Exception as e: print("exception on %s!" % option) print(e.message) plugin_config[option] = None return plugin_config
Loads the configuration file given as parameter
def type_name(self) -> T.Optional[str]: return self.args[1] if len(self.args) > 1 else None
Return type name associated with given docstring metadata.
def _weights_callback(self, msg): weights = np.array(msg.data) if len(self._weight_buffers) == 0: self._weight_buffers = [[] for i in range(len(weights))] for i, w in enumerate(weights): if len(self._weight_buffers[i]) == self._ntaps: self._weight_buffers[i].pop(0) self._weight_buffers[i].append(w)
Callback for recording weights from sensor.
def flatten_container(self, container): for names in ARG_MAP.values(): if names[TransformationTypes.MARATHON.value]['name'] and \ '.' in names[TransformationTypes.MARATHON.value]['name']: marathon_dotted_name = names[TransformationTypes.MARATHON.value]['name'] parts = marathon_dotted_name.split('.') if parts[-2] == 'parameters': common_type = names[TransformationTypes.MARATHON.value].get('type') result = self._lookup_parameter(container, parts[-1], common_type) if result: container[marathon_dotted_name] = result else: result = lookup_nested_dict(container, *parts) if result: container[marathon_dotted_name] = result return container
Accepts a marathon container and pulls out the nested values into the top level
def docstring(self, servicer): s = getattr(servicer, to_lower_camel_case(self.name)).__doc__ \ or "TODO: no docstring in .proto file" if self.uses_request: s += "\n" for field in get_fields(self.request_type): if field != self.field_name: type_info = get_field_description( self.request_type.DESCRIPTOR.fields_by_name[field]) s += " :param {}: {}\n".format(field, field) s += " :type {0}: {1}\n".format(field, type_info) return s
Generate a doc-string.
def dynamodb_autoscaling_policy(tables): return Policy( Statement=[ Statement( Effect=Allow, Resource=dynamodb_arns(tables), Action=[ dynamodb.DescribeTable, dynamodb.UpdateTable, ] ), Statement( Effect=Allow, Resource=['*'], Action=[ cloudwatch.PutMetricAlarm, cloudwatch.DescribeAlarms, cloudwatch.GetMetricStatistics, cloudwatch.SetAlarmState, cloudwatch.DeleteAlarms, ] ), ] )
Policy to allow AutoScaling a list of DynamoDB tables.
def authenticate_credentials(self, token: bytes, request=None): user = AuthToken.get_user_for_token(token) if user is None: raise AuthenticationFailed(_('Invalid auth token.')) if not user.is_active: raise AuthenticationFailed(_('User inactive or deleted.')) return user, token
Authenticate the token with optional request for context.
def move_point_cat(point, ipoint, to_clust, from_clust, cl_attr_freq, membship, centroids): membship[to_clust, ipoint] = 1 membship[from_clust, ipoint] = 0 for iattr, curattr in enumerate(point): to_attr_counts = cl_attr_freq[to_clust][iattr] from_attr_counts = cl_attr_freq[from_clust][iattr] to_attr_counts[curattr] += 1 current_attribute_value_freq = to_attr_counts[curattr] current_centroid_value = centroids[to_clust][iattr] current_centroid_freq = to_attr_counts[current_centroid_value] if current_centroid_freq < current_attribute_value_freq: centroids[to_clust][iattr] = curattr from_attr_counts[curattr] -= 1 old_centroid_value = centroids[from_clust][iattr] if old_centroid_value == curattr: centroids[from_clust][iattr] = get_max_value_key(from_attr_counts) return cl_attr_freq, membship, centroids
Move point between clusters, categorical attributes.
def allow_network_access_grading(self): vals = self._hook_manager.call_hook('task_network_grading', course=self.get_course(), task=self, default=self._network_grading) return vals[0] if len(vals) else self._network_grading
Return True if the grading container should have access to the network
def lookup(self, mac): oui = ":".join(mac.split(":")[:3]).upper() return self[oui]
Find OUI name matching to a MAC
def init_app(self, app): assert 'zodb' not in app.extensions, \ 'app already initiated for zodb' app.extensions['zodb'] = _ZODBState(self, app) app.teardown_request(self.close_db)
Configure a Flask application to use this ZODB extension.
def validate_hex(value): try: binascii.unhexlify(value) except Exception: raise vol.Invalid( '{} is not of hex format'.format(value)) return value
Validate that value has hex format.
def current_grid_empty(self): empty = True if not any(self.magic_dataframe.df.index): empty = True elif len(self.grid.row_labels) > 1: empty = False else: non_null_vals = [val for val in self.magic_dataframe.df.values[0] if cb.not_null(val, False)] for val in non_null_vals: if not isinstance(val, str): empty = False break if val.lower() not in ['this study', 'g', 'i']: empty = False break return empty
Check to see if grid is empty except for default values
def _assert_valid_permission(self, perm_str): if perm_str not in ORDERED_PERM_LIST: raise d1_common.types.exceptions.InvalidRequest( 0, 'Permission must be one of {}. perm_str="{}"'.format( ', '.join(ORDERED_PERM_LIST), perm_str ), )
Raise D1 exception if ``perm_str`` is not a valid permission.
def strip_wsgi(request): meta = copy(request.META) for key in meta: if key[:4] == 'wsgi': meta[key] = None return meta
Strip WSGI data out of the request META data.
async def jsk_vc_youtube_dl(self, ctx: commands.Context, *, url: str): if not youtube_dl: return await ctx.send("youtube_dl is not installed.") voice = ctx.guild.voice_client if voice.is_playing(): voice.stop() url = url.lstrip("<").rstrip(">") voice.play(discord.PCMVolumeTransformer(BasicYouTubeDLSource(url))) await ctx.send(f"Playing in {voice.channel.name}.")
Plays audio from youtube_dl-compatible sources.
def download_log_of_current_script(): try: src_file = get_workbench().get_current_editor().get_filename(False) if src_file is None: return download_log(src_file) except Exception: error_msg = traceback.format_exc(0)+'\n' showerror("Error", error_msg)
download log of current python script from EV3
def plot_sphere(ax, center, radius, color='black', alpha=1., linspace_count=_LINSPACE_COUNT): u = np.linspace(0, 2 * np.pi, linspace_count) v = np.linspace(0, np.pi, linspace_count) sin_v = np.sin(v) x = center[0] + radius * np.outer(np.cos(u), sin_v) y = center[1] + radius * np.outer(np.sin(u), sin_v) z = center[2] + radius * np.outer(np.ones_like(u), np.cos(v)) ax.plot_surface(x, y, z, linewidth=0.0, color=color, alpha=alpha)
Plots a 3d sphere, given the center and the radius.
def override_env_variables(): env_vars = ("LOGNAME", "USER", "LNAME", "USERNAME") old = [os.environ[v] if v in os.environ else None for v in env_vars] for v in env_vars: os.environ[v] = "test" yield for i, v in enumerate(env_vars): if old[i]: os.environ[v] = old[i]
Override user environmental variables with custom one.
def filter(self, run_counts, criteria): correctness = criteria['correctness'] assert correctness.dtype == np.bool filtered_counts = deep_copy(run_counts) for key in filtered_counts: filtered_counts[key] = filtered_counts[key][correctness] return filtered_counts
Return run counts only for examples that are still correctly classified
def remove_cron(self, client, event): for index, cron in enumerate(self.crons): if cron.event == event: _log.info("De-registering cron '%s'.", event) self.crons.pop(index) break return True
Remove a cron entry by event name.
def execute(self): if self._decode_output: with Popen(self.command, shell=True, stdout=PIPE) as process: self._output = [i.decode("utf-8").strip() for i in process.stdout] self._success = True else: os.system(self.command) self._success = True return self
Execute a system command.
def _completed_families(self, reference_name, rightmost_boundary): in_progress = self._right_coords_in_progress[reference_name] while len(in_progress): right_coord = in_progress[0] if right_coord < rightmost_boundary: in_progress.pop(0) left_families = self._coordinate_family.pop((reference_name, right_coord), {}) for family in sorted(left_families.values(), key=lambda x:x[0].left.reference_start): family.sort(key=lambda x: x.query_name) self.pending_pair_count -= len(family) yield family else: break
returns one or more families whose end < rightmost boundary
def handle_error(self, error, response): query_params = {"error": error.error} query = urlencode(query_params) location = "%s?%s" % (self.client.redirect_uri, query) response.status_code = 302 response.body = "" response.add_header("Location", location) return response
Redirects the client in case an error in the auth process occurred.
async def _ask_queue_update(self): try: while True: await asyncio.sleep(self._queue_update_timer) if self._queue_update_last_attempt == 0 or self._queue_update_last_attempt > self._queue_update_last_attempt_max: if self._queue_update_last_attempt: self._logger.error("Asking for a job queue update despite previous update not yet received") else: self._logger.debug("Asking for a job queue update") self._queue_update_last_attempt = 1 await self._simple_send(ClientGetQueue()) else: self._logger.error("Not asking for a job queue update as previous update not yet received") except asyncio.CancelledError: return except KeyboardInterrupt: return
Send a ClientGetQueue message to the backend, if one is not already sent
def read_static_uplink(self): if self.node_list is None or self.node_uplink_list is None: return for node, port in zip(self.node_list.split(','), self.node_uplink_list.split(',')): if node.strip() == self.host_name: self.static_uplink = True self.static_uplink_port = port.strip() return
Read the static uplink from file, if given.
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent, credentials_filename=None, api_key=None, client=None, oauth2client_args=None, **kwds): scopes = util.NormalizeScopes(scopes) client_info = { 'client_id': client_id, 'client_secret': client_secret, 'scope': ' '.join(sorted(scopes)), 'user_agent': user_agent or '%s-generated/0.1' % package_name, } for method in _CREDENTIALS_METHODS: credentials = method(client_info, **kwds) if credentials is not None: return credentials credentials_filename = credentials_filename or os.path.expanduser( '~/.apitools.token') credentials = CredentialsFromFile(credentials_filename, client_info, oauth2client_args=oauth2client_args) if credentials is not None: return credentials raise exceptions.CredentialsError('Could not create valid credentials')
Attempt to get credentials, using an oauth dance as the last resort.
def connect(self): ctx = Context(self.module, self.create_mediator()) ctx.logger = self.logger ctx.default_factory = self.default_factory return ctx
Returns a context that uses this pool as a connection source.
def Indirect(self, off): N.enforce_number(off, N.UOffsetTFlags) return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
Indirect retrieves the relative offset stored at `offset`.