code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def _bits_to_geohash(value): ret = [] for i in (value[i:i+5] for i in xrange(0, len(value), 5)): total = sum([(bit*2**count) for count,bit in enumerate(i[::-1])]) ret.append(BASE32MAPR[total]) return "".join(ret)
Convert a list of GeoHash bits to a GeoHash.
def encrypt(source, dest, keys): cmd = flatten([gnupg_bin(), "--armor", "--output", dest, gnupg_verbose(), gnupg_home(), recipients_args(keys), "--encrypt", source]) stderr_output(cmd) return True
Encrypts a file using the given keys
def generate(self, api): for namespace in api.namespaces.values(): with self.output_to_relative_path('%s.stone' % namespace.name): self.emit('namespace %s' % namespace.name) for data_type in namespace.linearize_data_types(): self.generate_data_type(data_type) for route in namespace.routes: self.generate_route(route)
Main code generator entry point.
def format(self): self.max_lengths = [] for row in self.rows: if len(self.max_lengths) < len(row): self.max_lengths += [0] * (len(row) - len(self.max_lengths)) for idx, val in enumerate(row): len_cell = len(str(val)) if val else 0 if self.max_lengths[idx] < len_cell: self.max_lengths[idx] = len_cell return self.max_lengths
Format table to print out
def build_block(self): header_bytes = self.block_header.SerializeToString() block = Block(header=header_bytes, header_signature=self._header_signature) block.batches.extend(self.batches) return block
Assembles the candidate block into it's finalized form for broadcast.
def download(args): accessions, infoFTP = set(args['g']), args['i'] search, exclude = args['s'], args['e'] FTPs = getFTPs(accessions, infoFTP, search, exclude, threads = args['t'], convert = args['convert']) if args['test'] is True: for genome in FTPs: print('found:', ';'.join(genome[-1]), genome[0]) return FTPs pool = Pool(args['t']) pool = pool.imap_unordered(wgetGenome, FTPs) files = [] for f in tqdm(pool, total = len(accessions)): files.append(f) return files
download genomes from NCBI
def debug(self, request, message, extra_tags='', fail_silently=False): add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags, fail_silently=fail_silently)
Add a message with the ``DEBUG`` level.
def close(self): from matplotlib.pyplot import close for ax in self.axes[::-1]: ax.set_xscale('linear') ax.set_yscale('linear') ax.cla() close(self)
Close the plot and release its memory.
def push(self, obj): rv = getattr(self._local, "stack", None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv
Pushes a new item to the stack
def filter_repeated(self): defaults = { 'occurrences': 1, 'interval': 30, 'refresh': 1800 } if isinstance(self.settings['sensu_plugin'], dict): defaults.update(self.settings['sensu_plugin']) occurrences = int(self.event['check'].get( 'occurrences', defaults['occurrences'])) interval = int(self.event['check'].get( 'interval', defaults['interval'])) refresh = int(self.event['check'].get( 'refresh', defaults['refresh'])) if self.event['occurrences'] < occurrences: self.bail('not enough occurrences') if (self.event['occurrences'] > occurrences and self.event['action'] == 'create'): return number = int(refresh / interval) if (number == 0 or (self.event['occurrences'] - occurrences) % number == 0): return self.bail('only handling every ' + str(number) + ' occurrences')
Determine whether a check is repeating.
def scale_image(image, new_width): (original_width, original_height) = image.size aspect_ratio = original_height/float(original_width) new_height = int(aspect_ratio * new_width) new_image = image.resize((new_width*2, new_height)) return new_image
Resizes an image preserving the aspect ratio.
def run(bam_file, sample, out_dir): out = {} peaks = sample.get("peaks_files", {}).get("main") if peaks: out.update(_reads_in_peaks(bam_file, peaks, sample)) return out
Standard QC metrics for chipseq
def saveFormatFileEnc(self, filename, encoding, format): ret = libxml2mod.xmlSaveFormatFileEnc(filename, self._o, encoding, format) return ret
Dump an XML document to a file or an URL.
def save_file(fullpath, entry): with tempfile.NamedTemporaryFile('w', delete=False) as file: tmpfile = file.name for key, val in entry.items(): print('{}: {}'.format(key, str(val)), file=file) print('', file=file) file.write(entry.get_payload()) shutil.move(tmpfile, fullpath)
Save a message file out, without mangling the headers
def unfix_parameters(self): for W, b in zip(self.W_list, self.b_list): W.unfix() b.unfix()
Helper function that unfixes all parameters
def secure(self): log.debug('ConCache securing sockets') if os.path.exists(self.cache_sock): os.chmod(self.cache_sock, 0o600) if os.path.exists(self.update_sock): os.chmod(self.update_sock, 0o600) if os.path.exists(self.upd_t_sock): os.chmod(self.upd_t_sock, 0o600)
secure the sockets for root-only access
def _get_snpeff_cmd(cmd_name, datadir, data, out_file): resources = config_utils.get_resources("snpeff", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3g"]) jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": max(2, dd.get_cores(data))}}}) memory = " ".join(jvm_opts) snpeff = config_utils.get_program("snpEff", data["config"]) java_args = "-Djava.io.tmpdir=%s" % utils.safe_makedir(os.path.join(os.path.dirname(out_file), "tmp")) export = "unset JAVA_HOME && export PATH=%s:\"$PATH\" && " % (utils.get_java_binpath()) cmd = "{export} {snpeff} {memory} {java_args} {cmd_name} -dataDir {datadir}" return cmd.format(**locals())
Retrieve snpEff base command line.
def delay_1(year): months = trunc(((235 * year) - 234) / 19) parts = 12084 + (13753 * months) day = trunc((months * 29) + parts / 25920) if ((3 * (day + 1)) % 7) < 3: day += 1 return day
Test for delay of start of new year and to avoid
def draw(self): if not self.visible: return if self.focus: pygame.draw.rect(self.window, self.focusColor, self.focusedImageRect, 1) self.window.blit(self.textImage, self.loc) if self.focus: self.cursorMsCounter = self.cursorMsCounter + self.clock.get_time() if self.cursorMsCounter >= self.cursorSwitchMs: self.cursorMsCounter = self.cursorMsCounter % self.cursorSwitchMs self.cursorVisible = not self.cursorVisible if self.cursorVisible: cursorOffset = self.font.size(self.text[:self.cursorPosition])[0] if self.cursorPosition > 0: cursorOffset = cursorOffset - 1 if cursorOffset < self.width: self.cursorLoc[0] = self.loc[0] + cursorOffset self.window.blit(self.cursorSurface, self.cursorLoc) self.clock.tick()
Draws the Text in the window.
def convert(input_file_name, **kwargs): delimiter = kwargs["delimiter"] or "," quotechar = kwargs["quotechar"] or "|" if six.PY2: delimiter = delimiter.encode("utf-8") quotechar = quotechar.encode("utf-8") with open(input_file_name, "rb") as input_file: reader = csv.reader(input_file, encoding="utf-8", delimiter=delimiter, quotechar=quotechar) csv_headers = [] if not kwargs.get("no_header"): csv_headers = next(reader) csv_rows = [row for row in reader if row] if not csv_headers and len(csv_rows) > 0: end = len(csv_rows[0]) + 1 csv_headers = ["Column {}".format(n) for n in range(1, end)] html = render_template(csv_headers, csv_rows, **kwargs) return freeze_js(html)
Convert CSV file to HTML table
def determine_2(self, container_name, container_alias, meta, val): if container_alias is not NotSpecified: return container_alias return container_name[container_name.rfind(":")+1:].replace('/', '-')
Default the alias to the name of the container
def read_nb_content(cells, mod_name): "Build a dictionary containing the position of the `cells`." doc_fns = {} for i, cell in enumerate(cells): if cell['cell_type'] == 'code': for match in SHOW_DOC_RE.findall(cell['source']): doc_fns[match] = i return doc_fns
Build a dictionary containing the position of the `cells`.
def _read_single_point_data(self): temp_dict = read_pattern( self.text, { "final_energy": r"\s*SCF\s+energy in the final basis set\s+=\s*([\d\-\.]+)" }) if temp_dict.get('final_energy') == None: self.data['final_energy'] = None else: self.data['final_energy'] = float(temp_dict.get('final_energy')[-1][0])
Parses final free energy information from single-point calculations.
def paused_partitions(self): return set(partition for partition in self.assignment if self.is_paused(partition))
Return current set of paused TopicPartitions.
def csvtolist(inputstr): reader = csv.reader([inputstr], skipinitialspace=True) output = [] for r in reader: output += r return output
converts a csv string into a list
def benchmark(N, gates): qubits = list(range(0, N)) ket = qf.zero_state(N) for n in range(0, N): ket = qf.H(n).run(ket) for _ in range(0, (gates-N)//3): qubit0, qubit1 = random.sample(qubits, 2) ket = qf.X(qubit0).run(ket) ket = qf.T(qubit1).run(ket) ket = qf.CNOT(qubit0, qubit1).run(ket) return ket.vec.tensor
Create and run a circuit with N qubits and given number of gates
def load(self, path): path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest
Load a manifest at path or return a suitable manifest already loaded.
def to_snake_case(s): return re.sub('([^_A-Z])([A-Z])', lambda m: m.group(1) + '_' + m.group(2).lower(), s)
Converts camel-case identifiers to snake-case.
def cleanup(self): for future in self.futures: future.cancel() self.executor.shutdown(wait=10) if self.ssh.get_transport() != None: self.ssh.close()
Release resources used during shell execution
def division_content(self, election_day, division, special=False): from electionnight.models import PageType division_type = ContentType.objects.get_for_model(division) page_type = PageType.objects.get( model_type=division_type, election_day=election_day, division_level=division.level, ) page_content = self.get( content_type__pk=division_type.pk, object_id=division.pk, election_day=election_day, special_election=special, ) page_type_content = self.get( content_type=ContentType.objects.get_for_model(page_type), object_id=page_type.pk, election_day=election_day, ) return { "site": self.site_content(election_day)["site"], "page_type": self.serialize_content_blocks(page_type_content), "page": self.serialize_content_blocks(page_content), }
Return serialized content for a division page.
def filter_objects_by_section(self, rels, section): subtree = section.get_descendants(include_self=True) kwargs_list = [{'%s__in' % rel.field.name: subtree} for rel in rels] q = Q(**kwargs_list[0]) for kwargs in kwargs_list[1:]: q |= Q(**kwargs) return self.get_manager(get_item_model_class()).filter(q).distinct()
Build a queryset containing all objects in the section subtree.
def _checkout(self): cmd = ["atomic", "mount", "--storage", "ostree", self.ref_image_name, self.mount_point] self._run_and_log(cmd, self.ostree_path, "Failed to mount selected image as an ostree repo.")
check out the image filesystem on self.mount_point
def _get_distance_term(self, C, rjb, mag): c_3 = self._get_anelastic_coeff(C) rval = np.sqrt(rjb ** 2. + C["h"] ** 2.) return (C["c1"] + C["c2"] * (mag - self.CONSTS["Mref"])) *\ np.log(rval / self.CONSTS["Rref"]) +\ c_3 * (rval - self.CONSTS["Rref"])
Returns the general distance scaling term - equation 2
def parallel_combine_variants(orig_files, out_file, ref_file, config, run_parallel): file_key = "vcf_files" def split_by_region(data): base, ext = utils.splitext_plus(os.path.basename(out_file)) args = [] for region in [x.name for x in ref.file_contigs(ref_file, config)]: region_out = os.path.join(os.path.dirname(out_file), "%s-regions" % base, "%s-%s%s" % (base, region, ext)) utils.safe_makedir(os.path.dirname(region_out)) args.append((region_out, ref_file, config, region)) return out_file, args config = copy.deepcopy(config) config["file_key"] = file_key prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) items = [[{file_key: prep_files}]] parallel_split_combine(items, split_by_region, run_parallel, "merge_variant_files", "concat_variant_files", file_key, ["region", "sam_ref", "config"], split_outfile_i=0) return out_file
Combine variants in parallel by chromosome, concatenating final outputs.
def graph_response(graph, format): fmt = guess_format(format) if not fmt: abort(404) headers = { 'Content-Type': RDF_MIME_TYPES[fmt] } kwargs = {} if fmt == 'json-ld': kwargs['context'] = context if isinstance(graph, RdfResource): graph = graph.graph return graph.serialize(format=fmt, **kwargs), 200, headers
Return a proper flask response for a RDF resource given an expected format.
def assertpath(path_, msg='', **kwargs): if NO_ASSERTS: return if path_ is None: raise AssertionError('path is None! %s' % (path_, msg)) if path_ == '': raise AssertionError('path=%r is the empty string! %s' % (path_, msg)) if not checkpath(path_, **kwargs): raise AssertionError('path=%r does not exist! %s' % (path_, msg))
Asserts that a patha exists
def to_dict(self): return {'schema': self.schema, 'table': self.table, 'name': self.name, 'type': self.type}
Serialize representation of the column for local caching.
def apply(self, coordinates): for i in self.affected_atoms: coordinates[i] = self.transformation*coordinates[i]
Apply this distortion to Cartesian coordinates
def hard_reset(self): if self.seq is not None and self.shuffle: random.shuffle(self.seq) if self.imgrec is not None: self.imgrec.reset() self.cur = 0 self._allow_read = True self._cache_data = None self._cache_label = None self._cache_idx = None
Resets the iterator and ignore roll over data
def GenerateFile(self, input_filename=None, output_filename=None): if input_filename is None: input_filename = output_filename + ".in" if output_filename[-3:] == ".in": output_filename = output_filename[:-3] logging.debug("Generating file %s from %s", output_filename, input_filename) with io.open(input_filename, "r") as fd: data = fd.read() with io.open(output_filename, "w") as fd: fd.write(config.CONFIG.InterpolateValue(data, context=self.context))
Generates a file from a template, interpolating config values.
def to_task(self): task_args = self.get_task_args() name = task_args.get('name', MESSAGE_PROCESSOR_NAME) if not 'countdown' in task_args: task_args['countdown'] = self.frequency task_args['name'] = "%s-%s-%s-%s" % ( name, self.tag, self.current_batch, self.time_throttle) self.update_options(task_args=task_args) return super(MessageProcessor, self).to_task()
Return a task object representing this MessageProcessor job.
def skull_strip(dset,suffix='_ns',prefix=None,unifize=True): if prefix==None: prefix = nl.suffix(dset,suffix) unifize_dset = nl.suffix(dset,'_u') cmd = bet2 if bet2 else 'bet2' if unifize: info = nl.dset_info(dset) if info==None: nl.notify('Error: could not read info for dset %s' % dset,level=nl.level.error) return False cmd = os.path.join(fsl_dir,cmd) if fsl_dir else cmd cutoff_value = nl.max(dset) * 0.05 nl.run(['3dUnifize','-prefix',unifize_dset,nl.calc(dset,'step(a-%f)*a' % cutoff_value)],products=unifize_dset) else: unifize_dset = dset nl.run([cmd,unifize_dset,prefix,'-w',0.5],products=prefix)
use bet to strip skull from given anatomy
def generate_table(self, rows): table = PrettyTable(**self.kwargs) for row in self.rows: if len(row[0]) < self.max_row_width: appends = self.max_row_width - len(row[0]) for i in range(1, appends): row[0].append("-") if row[1] is True: self.make_fields_unique(row[0]) table.field_names = row[0] else: table.add_row(row[0]) return table
Generates from a list of rows a PrettyTable object.
def view(self): print("") description, count = "", 0 if self.repo == "sbo": description = SBoGrep(self.name).description() else: PACKAGES_TXT = Utils().read_file(self.lib) for line in PACKAGES_TXT.splitlines(): if line.startswith(self.name + ":"): description += line[len(self.name) + 2:] + "\n" count += 1 if count == 11: break if description: print("{0}{1}{2}".format(self.COLOR, description, self.meta.color["ENDC"])) else: self.msg.pkg_not_found("", self.name, "No matching", "\n") raise SystemExit(1) if description and self.repo == "sbo": print("")
Print package description by repository
def hash(self): if not hasattr(self, '_hash'): self._hash = conf.lib.clang_hashCursor(self) return self._hash
Returns a hash of the cursor as an int.
def get(self, key): return self._object_class(json.loads(self._db[key]))
Get data associated with provided key.
def dumps(self): return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
Override the default to avoid duplicate dump.
def calculate_hash_of_dir(directory, file_list=None): md5_hash = md5() if not os.path.exists(directory): return -1 try: for subdir, dirs, files in os.walk(directory): for _file in files: file_path = os.path.join(subdir, _file) if file_list is not None and file_path not in file_list: continue try: _file_object = open(file_path, 'rb') except Exception: _file_object.close() return -1 while 1: buf = _file_object.read(4096) if not buf: break md5_hash.update(md5(buf).hexdigest().encode()) _file_object.close() except Exception: return -1 return md5_hash.hexdigest()
Calculate hash of directory.
def first_seen(self, first_seen): self._group_data['firstSeen'] = self._utils.format_datetime( first_seen, date_format='%Y-%m-%dT%H:%M:%SZ' )
Set Document first seen.
def cublasCtpsv(handle, uplo, trans, diag, n, AP, x, incx): status = _libcublas.cublasCtpsv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Solve complex triangular-packed system with one right-hand side.
def open_python(self, message, namespace): from code import InteractiveConsole import readline import rlcompleter readline.set_completer(rlcompleter.Completer(namespace).complete) readline.parse_and_bind('tab: complete') console = InteractiveConsole(namespace) console.interact(message)
Open interactive python console
def example(fn): @functools.wraps(fn) def wrapped(): try: sys.stdout.write('Running: %s\n' % fn.__name__) fn() sys.stdout.write('\n') except KeyboardInterrupt: sys.stdout.write('\nSkipping example.\n\n') time.sleep(0.2) examples.append(wrapped) return wrapped
Wrap the examples so they generate readable output
def unit_conversion(thing, units, length=False): if 'n/a' == thing: return 'n/a' try: thing = round(thing * CONVERSION[units][0 + length], 2) except TypeError: thing = 'fubar' return thing, CONVERSION[units][2 + length]
converts base data between metric, imperial, or nautical units
def run_tensorboard(run_id, tflog_id): data = current_app.config["data"] run = data.get_run_dao().get(run_id) base_dir = Path(run["experiment"]["base_dir"]) log_dir = Path(run["info"]["tensorflow"]["logdirs"][tflog_id]) if log_dir.is_absolute(): path_to_log_dir = log_dir else: path_to_log_dir = base_dir.joinpath(log_dir) port = int(tensorboard.run_tensorboard(str(path_to_log_dir))) url_root = request.url_root url_parts = re.search("://([^:/]+)", url_root) redirect_to_address = url_parts.group(1) return redirect("http://%s:%d" % (redirect_to_address, port))
Launch TensorBoard for a given run ID and log ID of that run.
def _create_placeholders(self): self.input_data = tf.placeholder( tf.int32, [self.batch_size, self.num_steps]) self.input_labels = tf.placeholder( tf.int32, [self.batch_size, self.num_steps])
Create the computational graph's placeholders.
def cfitsio_version(asfloat=False): ver = '%0.3f' % _fitsio_wrap.cfitsio_version() if asfloat: return float(ver) else: return ver
Return the cfitsio version as a string.
def findrec(s, data): datablock = [] for rec in data: if s == rec[0]: datablock.append([rec[1], rec[2], rec[3], rec[4]]) return datablock
finds all the records belonging to s in data
def remove(id_): with LOCK: thread = REGISTRY.pop(id_, None) if thread is not None: thread.cancel() return thread
Remove the callback and its schedule
def right(ctx, text, num_chars): num_chars = conversions.to_integer(num_chars, ctx) if num_chars < 0: raise ValueError("Number of chars can't be negative") elif num_chars == 0: return '' else: return conversions.to_string(text, ctx)[-num_chars:]
Returns the last characters in a text string
def fetcher_loop_v1(data_queue, data_buffer, pin_memory=False, pin_device_id=0, data_buffer_lock=None): while True: idx, batch = data_queue.get() if idx is None: break if pin_memory: batch = _as_in_context(batch, context.cpu_pinned(pin_device_id)) else: batch = _as_in_context(batch, context.cpu()) if data_buffer_lock is not None: with data_buffer_lock: data_buffer[idx] = batch else: data_buffer[idx] = batch
Fetcher loop for fetching data from queue and put in reorder dict.
def transform_aglistener_output(result): from collections import OrderedDict from msrestazure.tools import parse_resource_id try: resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group'] output = OrderedDict([('id', result.id), ('name', result.name), ('provisioningState', result.provisioning_state), ('port', result.port), ('resourceGroup', resource_group)]) if result.load_balancer_configurations is not None: output['loadBalancerConfigurations'] = format_load_balancer_configuration_list(result.load_balancer_configurations) return output except AttributeError: return result
Transforms the result of Availability Group Listener to eliminate unnecessary parameters.
def hparams_to_batching_scheme(hparams, drop_long_sequences=False, shard_multiplier=1, length_multiplier=1): return batching_scheme( batch_size=hparams.batch_size, min_length=hparams.min_length, max_length=hparams.max_length, min_length_bucket=hparams.min_length_bucket, length_bucket_step=hparams.length_bucket_step, drop_long_sequences=drop_long_sequences, shard_multiplier=shard_multiplier, length_multiplier=length_multiplier)
Wrapper around _batching_scheme with hparams.
def _ident(*elements): try: return len(set(elements)) == 1 except TypeError: for e1, e2 in zip(elements, elements[1:]): if e1 != e2: return False return True
Return True if all sequences are equal.
def ps(): containers = get_containers() containers = [pretty_container(c) for c in containers] print_table(containers, ['name', 'ip', 'ports', 'created', 'image'], sort='name')
Print a table of all running containers on a host
def _next_move_direction(self): nmoves = len(self.moves) move = np.random.randint(1, nmoves+1) while self.prev_move == (move + 3) % nmoves: move = np.random.randint(1, nmoves+1) self.prev_move = move return np.array(self.moves[move])
pick a move at random from the list of moves
def insert(self, resource, doc_or_docs, **kwargs): ids = [] kwargs.update(self._es_args(resource)) for doc in doc_or_docs: self._update_parent_args(resource, kwargs, doc) _id = doc.pop('_id', None) res = self.elastic(resource).index(body=doc, id=_id, **kwargs) doc.setdefault('_id', res.get('_id', _id)) ids.append(doc.get('_id')) self._refresh_resource_index(resource) return ids
Insert document, it must be new if there is ``_id`` in it.
def _get_domain_text_of_authoritative_zone(self): from bs4 import BeautifulSoup zones_response = self.session.get(self.URLS['domain_list']) self._log('Zone', zones_response) assert zones_response.status_code == 200, \ 'Could not retrieve domain list due to a network error.' html = BeautifulSoup(zones_response.content, 'html.parser') self._log('Zone', html) domain_table = html.find('table', {'id': 'cp_domain_table'}) assert domain_table is not None, 'Could not find domain table' domain = self.domain or '' domain_text = None subdomains = domain.split('.') while True: domain = '.'.join(subdomains) LOGGER.debug('Check if %s has own zone', domain) domain_text = domain_table.find(string=domain) if domain_text is not None or len(subdomains) < 3: break subdomains.pop(0) self.domain = domain assert domain_text is not None, \ 'The domain does not exist on Easyname.' return domain_text
Get the authoritative name zone.
def _getID(self): id = [] for key in self._sqlPrimary: value = self.__dict__[key] if isinstance(value, Forgetter): if value._new: value.save() try: (value,) = value._getID() except: raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value) id.append(value) return id
Get the ID values as a tuple annotated by sqlPrimary
def pop_trigger(data): trigger_name = data.pop('trigger') trigger_args = {} if trigger_name == 'date': trigger_arg_names = ('run_date', 'timezone') elif trigger_name == 'interval': trigger_arg_names = ('weeks', 'days', 'hours', 'minutes', 'seconds', 'start_date', 'end_date', 'timezone') elif trigger_name == 'cron': trigger_arg_names = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second', 'start_date', 'end_date', 'timezone') else: raise Exception('Trigger %s is not supported.' % trigger_name) for arg_name in trigger_arg_names: if arg_name in data: trigger_args[arg_name] = data.pop(arg_name) return trigger_name, trigger_args
Pops trigger and trigger args from a given dict.
def timex_ends(self): if not self.is_tagged(TIMEXES): self.tag_timexes() return self.ends(TIMEXES)
The list of end positions of ``timexes`` layer elements.
def add_definitions(definitions, operations): for definition_schema in iter_definitions(definitions, operations): if definition_schema is None: continue if isinstance(definition_schema, str): continue for name, schema in iter_schemas(definition_schema): definitions.setdefault(name, swagger.Schema(schema))
Add definitions to swagger.
def reminders_list(self, **kwargs) -> SlackResponse: self._validate_xoxp_token() return self.api_call("reminders.list", http_verb="GET", params=kwargs)
Lists all reminders created by or for a given user.
def make_valid_xml_name(key, attr): LOG.info('Inside make_valid_xml_name(). Testing key "%s" with attr "%s"' % ( unicode_me(key), unicode_me(attr)) ) key = escape_xml(key) attr = escape_xml(attr) if key_is_valid_xml(key): return key, attr if key.isdigit(): return 'n%s' % (key), attr if key_is_valid_xml(key.replace(' ', '_')): return key.replace(' ', '_'), attr attr['name'] = key key = 'key' return key, attr
Tests an XML name and fixes it if invalid
def shortDescription(self): cd = getattr(self,'classDescription',None) if cd: sd = getattr(cd,'shortDescription','') d = getattr(cd,'description','') return sd if sd else d return ''
Overrides property from Event base class.
def write_to_file(self, f): for section, values in self._values.iteritems(): try: section_name, subsection_name = section except ValueError: (section_name, ) = section subsection_name = None if subsection_name is None: f.write("[%s]\n" % section_name) else: f.write("[%s \"%s\"]\n" % (section_name, subsection_name)) for key, value in values.iteritems(): f.write("%s = %s\n" % (key, _escape_value(value)))
Write configuration to a file-like object.
def switch_or_run(cmd, venv_name=None): if cmd: return _run(venv_name, cmd) inenv = InenvManager() if not os.getenv(INENV_ENV_VAR): activator_warn(inenv) return else: venv = inenv.get_prepped_venv(venv_name) inenv.clear_extra_source_file() inenv.write_extra_source_file("source {}".format(venv.activate_shell_file)) inenv.write_extra_source_file(override_envars_and_deactivate(inenv.get_envvars(venv_name))) if autojump_enabled(): directory = inenv.guess_contents_dir(venv_name) inenv.write_extra_source_file('cd {}'.format(directory)) click.secho("Jumping to {}".format(directory), fg='green') sys.exit(EVAL_EXIT_CODE)
Switch or run in this env
def from_statement(cls, statement): if statement[0] in ["TS", "TIMESTAMP", "UTCTIMESTAMP", "UTCTS"]: return TimestampFunction.from_statement(statement) elif statement[0] in ["NOW", "UTCNOW"]: return NowFunction.from_statement(statement) else: raise SyntaxError("Unknown function %r" % statement[0])
Create a selection function from a statement
def Start(self): if self.hunt_obj.Get(self.hunt_obj.Schema.STATE) == "STARTED": return self.context.duration = self.runner_args.expiry_time self.context.next_client_due = rdfvalue.RDFDatetime.Now() self._CreateAuditEvent("HUNT_STARTED") self.hunt_obj.Set(self.hunt_obj.Schema.STATE("STARTED")) self.hunt_obj.Flush() if self.runner_args.add_foreman_rules: self._AddForemanRule()
This uploads the rules to the foreman and, thus, starts the hunt.
def _is_modification_or_activity(feature): if not (isinstance(feature, _bp('ModificationFeature')) or \ isinstance(feature, _bpimpl('ModificationFeature'))): return None mf_type = feature.getModificationType() if mf_type is None: return None mf_type_terms = mf_type.getTerm().toArray() for term in mf_type_terms: if term in ('residue modification, active', 'residue modification, inactive', 'active', 'inactive'): return 'activity' return 'modification'
Return True if the feature is a modification
def _debug(self, out, print_prefix=True): if self.debug: if print_prefix: pre = self.__class__.__name__ if hasattr(self, 'debug_prefix'): pre = getattr(self, 'debug_prefix') sys.stderr.write("%s: " % pre) sys.stderr.write(out)
Print out to stderr, if debugging is enabled.
def lookup(self): assert not self.is_existing try: a = SocialAccount.objects.get(provider=self.account.provider, uid=self.account.uid) a.extra_data = self.account.extra_data self.account = a self.user = self.account.user a.save() if app_settings.STORE_TOKENS and self.token: assert not self.token.pk try: t = SocialToken.objects.get(account=self.account, app=self.token.app) t.token = self.token.token if self.token.token_secret: t.token_secret = self.token.token_secret t.expires_at = self.token.expires_at t.save() self.token = t except SocialToken.DoesNotExist: self.token.account = a self.token.save() except SocialAccount.DoesNotExist: pass
Lookup existing account, if any.
def _get_data_collections(dnr_values, dhr_values, metadata, timestep, is_leap_year): analysis_period = AnalysisPeriod(timestep=timestep, is_leap_year=is_leap_year) dnr_header = Header(data_type=DirectNormalIrradiance(), unit='W/m2', analysis_period=analysis_period, metadata=metadata) direct_norm_rad = HourlyContinuousCollection(dnr_header, dnr_values) dhr_header = Header(data_type=DiffuseHorizontalIrradiance(), unit='W/m2', analysis_period=analysis_period, metadata=metadata) diffuse_horiz_rad = HourlyContinuousCollection(dhr_header, dhr_values) return direct_norm_rad, diffuse_horiz_rad
Return two data collections for Direct Normal , Diffuse Horizontal
def dumps(): d = {} for k, v in FILTERS.items(): d[dr.get_name(k)] = list(v) return _dumps(d)
Returns a string representation of the FILTERS dictionary.
def process_documentline(line, nanopubs_metadata): matches = re.match('SET DOCUMENT\s+(\w+)\s+=\s+"?(.*?)"?$', line) key = matches.group(1) val = matches.group(2) nanopubs_metadata[key] = val return nanopubs_metadata
Process SET DOCUMENT line in BEL script
def total_core(self): corefile = os.path.join(self.reffilepath, self.analysistype, 'Escherichia', 'core_combined.fasta') for record in SeqIO.parse(corefile, 'fasta'): gene_name = record.id.split('-')[0] if gene_name not in self.coregenomes: self.coregenomes.append(gene_name)
Determine the total number of core genes present
def fai_from_bam(ref_file, bam_file, out_file, data): contigs = set([x.contig for x in idxstats(bam_file, data)]) if not utils.file_uptodate(out_file, bam_file): with open(ref.fasta_idx(ref_file, data["config"])) as in_handle: with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for line in (l for l in in_handle if l.strip()): if line.split()[0] in contigs: out_handle.write(line) return out_file
Create a fai index with only contigs in the input BAM file.
def clear_document(self): self._components.clear() self._sequences.clear() self._namespaces.clear() self._models.clear() self._modules.clear() self._collections.clear() self._annotations.clear() self._functional_component_store.clear() self._collection_store.clear()
Clears ALL items from document, reseting it to clean
def process_cpp(self, path, suffix): _cpplint_state.ResetErrorCounts() cpplint.ProcessFile(str(path), _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() errors = _cpplint_state.errors_by_category.copy() if suffix == 'h': self.cpp_header_map[str(path)] = errors else: self.cpp_src_map[str(path)] = errors
Process a cpp file.
def rollforward(self, date): if self.onOffset(date): return date else: return date + YearBegin(month=self.month)
Roll date forward to nearest start of year
def cyvcf_add_filter(rec, name): if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
Add a FILTER value to a cyvcf2 record
def lstm_init_states(batch_size): hp = Hyperparams() init_shapes = lstm.init_states(batch_size=batch_size, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden) init_names = [s[0] for s in init_shapes] init_arrays = [mx.nd.zeros(x[1]) for x in init_shapes] return init_names, init_arrays
Returns a tuple of names and zero arrays for LSTM init states
def write_json_plan(self, proposed_layout, proposed_plan_file): with open(proposed_plan_file, 'w') as output: json.dump(proposed_layout, output)
Dump proposed json plan to given output file for future usage.
def transformer_parsing_big(): hparams = transformer_big() hparams.max_length = 512 hparams.shared_source_target_embedding = False hparams.learning_rate_warmup_steps = 4000 hparams.layer_prepostprocess_dropout = 0.1 hparams.batch_size = 2048 hparams.learning_rate = 0.05 return hparams
HParams for parsing on WSJ semi-supervised.
def tag(self, *tag, **kwtags): if not tag: pass elif len(tag) == 1 and isinstance(tag[0], dict): self._meta.update(tag[0]) else: raise TypeError('Tags must be provided as key-word arguments or ' 'a dictionary') self._meta.update(kwtags) return self
Tag a Property instance with metadata dictionary
def cli(env, identifier, allocation, port, routing_type, routing_method): mgr = SoftLayer.LoadBalancerManager(env.client) _, loadbal_id = loadbal.parse_id(identifier) mgr.add_service_group(loadbal_id, allocation=allocation, port=port, routing_type=routing_type, routing_method=routing_method) env.fout('Load balancer service group is being added!')
Adds a new load_balancer service.
def read_chunked(self): self._load_metadata() for i in range(self.npartitions): yield self._get_partition(i)
Return iterator over container fragments of data source
def init(self): if not self.export_enable: return None try: parameters = pika.URLParameters( 'amqp://' + self.user + ':' + self.password + '@' + self.host + ':' + self.port + '/') connection = pika.BlockingConnection(parameters) channel = connection.channel() return channel except Exception as e: logger.critical("Connection to rabbitMQ failed : %s " % e) return None
Init the connection to the rabbitmq server.
def delete_os_dummy_rtr_nwk(self, rtr_id, net_id, subnet_id): subnet_lst = set() subnet_lst.add(subnet_id) ret = self.os_helper.delete_intf_router(None, None, rtr_id, subnet_lst) if not ret: return ret return self.os_helper.delete_network_all_subnets(net_id)
Delete the dummy interface to the router.
def sponsor_or_site(self, value): if value not in Comment.VALID_SPONSOR_OR_SITE_RESPONSES: raise AttributeError("%s sponsor_or_site value of %s is not valid" % (self.__class__.__name__, value)) self._sponsor_or_site = value
Set Originator with validation of input
def cli(env, volume_id, reason, immediate): file_storage_manager = SoftLayer.FileStorageManager(env.client) if not (env.skip_confirmations or formatting.no_going_back(volume_id)): raise exceptions.CLIAbort('Aborted') cancelled = file_storage_manager.cancel_snapshot_space( volume_id, reason, immediate) if cancelled: if immediate: click.echo('File volume with id %s has been marked' ' for immediate snapshot cancellation' % volume_id) else: click.echo('File volume with id %s has been marked' ' for snapshot cancellation' % volume_id) else: click.echo('Unable to cancel snapshot space for file volume %s' % volume_id)
Cancel existing snapshot space for a given volume.