code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def print_env_info(key, out=sys.stderr): value = os.getenv(key) if value is not None: print(key, "=", repr(value), file=out)
If given environment key is defined, print it out.
def _urls(self): info = ( self.model._meta.app_label, self.model._meta.model_name, self.name, ) urlpatterns = [ url(r'^%s/$' % self.name, self._view, name='%s_%s_%s' % info) ] return urlpatterns
URL patterns for tool linked to _view method.
def recvall(sock, size): data = b"" while len(data) < size: s = sock.recv(size - len(data)) data += s if not s: raise RuntimeError("did not receive data from socket " "(size {}, got only {!r})".format(size, data)) return data
Receive data of given size from a socket connection
def ReadRemoteFile(remote_file_path, hostname, ssh_key): cmd = 'sudo cat %s' % remote_file_path exit_code, output = RunCommandOnHost(cmd, hostname, ssh_key) if exit_code: raise IOError('Can not read remote path: %s' % (remote_file_path)) return output
Reads a remote file into a string.
def auction(self, symbol='btcusd'): url = self.base_url + '/v1/auction/' + symbol return requests.get(url)
Send a request for latest auction info, return the response.
def _get_one(self, id): try: return self._to_dict(models.User.objects.get(pk=id)) except models.User.DoesNotExist: raise errors.NotFound()
Get one user from db and turn into dict
def wrap(node, tag): wrap_node = node.ownerDocument.createElement(tag) parent = node.parentNode if parent: parent.replaceChild(wrap_node, node) wrap_node.appendChild(node) return wrap_node
Wrap the given tag around a node.
def register(cls): if not cls.IS_LOADED: cls.IS_LOADED = True yaml.add_constructor('!param', Parameter.parameter_constructor, Loader=yaml.SafeLoader) yaml.add_constructor('!env', EnvironmentVariable.parameter_constructor, Loader=yaml.SafeLoader)
Register variable handling in YAML
def precursor_sequence(loci, reference): region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor = pybedtools.BedTool(str(region), from_string=True).sequence(fi=reference, s=True) return open(precursor.seqfn).read().split("\n")[1]
Get sequence from genome
def search(self, pattern, start=None, limit=None, include_category=None): params = dict() if start is None: start = datetime.timedelta(days=30) if isinstance(start, datetime.timedelta): params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000) elif isinstance(start, datetime.datetime): params['start'] = int(time.mktime(start.timetuple()) * 1000) else: raise Investigate.SEARCH_ERR if limit is not None and isinstance(limit, int): params['limit'] = limit if include_category is not None and isinstance(include_category, bool): params['includeCategory'] = str(include_category).lower() uri = self._uris['search'].format(quote_plus(pattern)) return self.get_parse(uri, params)
Searches for domains that match a given pattern
def create_job_template(self, template): endpoint = self._build_url('jobTemplates') data = self._query_api('POST', endpoint, None, {'Content-Type': 'application/json'}, json.dumps(template)) return data['results']
Creates a job template
def aggregate(self, **filters): url = URL.aggregate.format(**locals()) return self.get_pages(url, **filters)
Conduct an aggregate query
def execute(self, argv=None): if self.completion: self.autocomplete() parser = self.get_parser() namespace = parser.parse_args(argv) if hasattr(namespace, 'func'): self.run_command(namespace)
Executes command based on given arguments.
def _pfp__restore_snapshot(self, recurse=True): super(Struct, self)._pfp__restore_snapshot(recurse=recurse) if recurse: for child in self._pfp__children: child._pfp__restore_snapshot(recurse=recurse)
Restore the snapshotted value without triggering any events
def _inject_target(self, target_adaptor): target_cls = self._target_types[target_adaptor.type_alias] declared_deps = target_adaptor.dependencies implicit_deps = (Address.parse(s, relative_to=target_adaptor.address.spec_path, subproject_roots=self._address_mapper.subproject_roots) for s in target_cls.compute_dependency_specs(kwargs=target_adaptor.kwargs())) for dep in declared_deps: self._dependent_address_map[dep].add(target_adaptor.address) for dep in implicit_deps: self._implicit_dependent_address_map[dep].add(target_adaptor.address)
Inject a target, respecting all sources of dependencies.
def filter_by_IDs(self, ids, ID=None): fil = lambda x: x in ids return self.filter_by_attr('ID', fil, ID)
Keep only Measurements with given IDs.
def big_endian(self): if not self._ptr: raise BfdException("BFD not initialized") return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.IS_BIG_ENDIAN)
Return the big endian attribute of the BFD file being processed.
def to_struct(cls, name=None): if name is None: name = cls.__name__ basic_attrs = dict([(attr_name, value) for attr_name, value in cls.get_attrs() if isinstance(value, Column)]) if not basic_attrs: return None src = 'struct {0} {{'.format(name) for attr_name, value in basic_attrs.items(): src += '{0} {1};'.format(value.type.typename, attr_name) src += '};' if ROOT.gROOT.ProcessLine(src) != 0: return None return getattr(ROOT, name, None)
Convert the TreeModel into a compiled C struct
def msg2usernames(msg, processor=None, legacy=False, **config): return processor.usernames(msg, **config)
Return a set of FAS usernames associated with a message.
def destroy(self): self._observable.release(self._key, self._callback) self._observable = None self._key = None self._callback = None
Unsubscribes callback from observable
def comparison_operator_query(comparison_operator): def _comparison_operator_query(expression): def _apply_comparison_operator(index, expression=expression): ev = expression() if callable(expression) else expression return [ store_key for value, store_keys in index.get_index().items() if comparison_operator(value, ev) for store_key in store_keys ] return _apply_comparison_operator return _comparison_operator_query
Generate comparison operator checking function.
def solve_sort(expr, vars): lhs_values = repeated.getvalues(__solve_for_repeated(expr.lhs, vars)[0]) sort_expression = expr.rhs def _key_func(x): return solve(sort_expression, __nest_scope(expr.lhs, vars, x)).value results = ordered.ordered(lhs_values, key_func=_key_func) return Result(repeated.meld(*results), ())
Sort values on the LHS by the value they yield when passed to RHS.
def _enable_notifications_failed(self, dbus_error): if ((dbus_error.get_dbus_name() == 'org.bluez.Error.Failed') and ((dbus_error.get_dbus_message() == "Already notifying") or (dbus_error.get_dbus_message() == "No notify session started"))): return error = _error_from_dbus_error(dbus_error) self.service.device.characteristic_enable_notifications_failed(characteristic=self, error=error)
Called when notification enabling has failed.
def add_handler(cls, level, fmt, colorful, **kwargs): global g_logger if isinstance(level, str): level = getattr(logging, level.upper(), logging.DEBUG) handler = cls(**kwargs) handler.setLevel(level) if colorful: formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S') else: formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) g_logger.addHandler(handler) return handler
Add a configured handler to the global logger.
def _ssid_inventory(self, inventory, ssid): matching_hosts = {} for host in inventory: if inventory[host]['comment'] == ssid: matching_hosts[host] = inventory[host] return matching_hosts
Filters an inventory to only return servers matching ssid
def parse_buffer_to_ppm(data): images = [] index = 0 while index < len(data): code, size, rgb = tuple(data[index:index + 40].split(b'\n')[0:3]) size_x, size_y = tuple(size.split(b' ')) file_size = len(code) + len(size) + len(rgb) + 3 + int(size_x) * int(size_y) * 3 images.append(Image.open(BytesIO(data[index:index + file_size]))) index += file_size return images
Parse PPM file bytes to Pillow Image
def run(self, cmd): import __main__ main_dict = __main__.__dict__ return self.runctx(cmd, main_dict, main_dict)
Profile a single executable statement in the main namespace.
def _calc_q_h0(n, x, h, nt, n_jobs=1, verbose=0, random_state=None): rng = check_random_state(random_state) par, func = parallel_loop(_calc_q_statistic, n_jobs, verbose) q = par(func(rng.permutation(x.T).T, h, nt) for _ in range(n)) return np.array(q)
Calculate q under the null hypothesis of whiteness.
def install_monitor(self, mon): assert self.binded self._monitor = mon for mod in self._buckets.values(): mod.install_monitor(mon)
Installs monitor on all executors
def simulate(self): return [t.simulate() for t in itertools.islice(self, random.choice(range(10)))]
Simulates a stream of types.
def heads(directory=None, verbose=False, resolve_dependencies=False): if alembic_version >= (0, 7, 0): config = current_app.extensions['migrate'].migrate.get_config( directory) command.heads(config, verbose=verbose, resolve_dependencies=resolve_dependencies) else: raise RuntimeError('Alembic 0.7.0 or greater is required')
Show current available heads in the script directory
def filter_req_paths(paths, func): if not isinstance(paths, list): raise ValueError("Paths must be a list of paths.") libs = set() junk = set(['\n']) for p in paths: with p.open(mode='r') as reqs: lines = set([line for line in reqs if func(line)]) libs.update(lines) return list(libs - junk)
Return list of filtered libs.
def eventFilter(self, widget, event): if event.type() == QEvent.KeyPress and event.key() == Qt.Key_Delete: index = self.view().currentIndex().row() if index >= EXTERNAL_PATHS: self.removeItem(index) self.showPopup() new_index = min(self.count() - 1, index) new_index = 0 if new_index < EXTERNAL_PATHS else new_index self.view().setCurrentIndex(self.model().index(new_index, 0)) self.setCurrentIndex(new_index) return True return QComboBox.eventFilter(self, widget, event)
Used to handle key events on the QListView of the combobox.
def LReLU_function( signal, derivative=False, leakage = 0.01 ): if derivative: return np.clip(signal > 0, leakage, 1.0) else: output = np.copy( signal ) output[ output < 0 ] *= leakage return output
Leaky Rectified Linear Unit
def shutdown_notebook(request, username): manager = get_notebook_manager(request) if manager.is_running(username): manager.stop_notebook(username)
Stop any running notebook for a user.
def save(self): self.session.add(self) self.session.flush() return self
Saves the updated model to the current entity db.
def code_block(self, node, entering): if self.use_pygments: self.cr() info_words = node.info.split() if node.info else [] if len(info_words) > 0 and len(info_words[0]) > 0: try: lexer = get_lexer_by_name(info_words[0]) except ValueError: lexer = TextLexer() else: lexer = TextLexer() formatter = HtmlFormatter(**self.pygments_options) parsed = highlight(node.literal, lexer, formatter) self.lit(parsed) self.cr() else: super().code_block(node, entering)
Output Pygments if required else use default html5 output
def _set_instance_prop(self, attr_name, config_prop, value): setattr(self, attr_name, value) if not config_prop.exclude_from_varz: self.varz[attr_name] = value
Set instance property to a value and add it varz if needed
def multi_split(s, split): for r in split: s = s.replace(r, "|") return [i for i in s.split("|") if len(i) > 0]
Splits on multiple given separators.
def finalize_target(self): shutit_global.shutit_global_object.yield_to_draw() self.pause_point('\nFinalizing the target module (' + self.shutit_main_dir + '/shutit_setup.py)', print_input=False, level=3) for mod in self.conn_modules: if mod.module_id == self.build['conn_module']: conn_module = mod break conn_module.finalize(self)
Finalize the target using the core finalize method.
def index(self, value): if self.__modified_data__ is not None: return self.__modified_data__.index(value) return self.__original_data__.index(value)
Gets the index in the list for a value
def collapse_all(self): for MT in self.messagetrees(): MT.collapse(MT.root) self.focus_selected_message()
collapse all messages in thread
def remove(self, key): item = self.item_finder.pop(key) item[-1] = None self.removed_count += 1
remove the value found at key from the queue
def tenant_path(cls, project, tenant): return google.api_core.path_template.expand( "projects/{project}/tenants/{tenant}", project=project, tenant=tenant )
Return a fully-qualified tenant string.
def _key_values(self, sn: "SequenceNode") -> Union[EntryKeys, EntryValue]: try: keys = self.up_to("/") except EndOfInput: keys = self.remaining() if not keys: raise UnexpectedInput(self, "entry value or keys") if isinstance(sn, LeafListNode): return EntryValue(unquote(keys)) ks = keys.split(",") try: if len(ks) != len(sn.keys): raise UnexpectedInput(self, f"exactly {len(sn.keys)} keys") except AttributeError: raise BadSchemaNodeType(sn.qual_name, "list") sel = {} for j in range(len(ks)): knod = sn.get_data_child(*sn.keys[j]) val = unquote(ks[j]) sel[(knod.name, None if knod.ns == sn.ns else knod.ns)] = val return EntryKeys(sel)
Parse leaf-list value or list keys.
def hasCamera(self, nDeviceIndex): fn = self.function_table.hasCamera pHasCamera = openvr_bool() result = fn(nDeviceIndex, byref(pHasCamera)) return result, pHasCamera
For convenience, same as tracked property request Prop_HasCamera_Bool
def known_pipettes() -> Sequence[str]: return [fi.stem for fi in CONFIG['pipette_config_overrides_dir'].iterdir() if fi.is_file() and '.json' in fi.suffixes]
List pipette IDs for which we have known overrides
def device_state_attributes(self): attr = {} attr['active_time'] = self.smartplug.active_time attr['voltage'] = self.smartplug.voltage attr['active_time'] = self.smartplug.active_time attr['weekly_energy_total'] = self.smartplug.weekly_energy_total attr['monthly_energy_total'] = self.smartplug.monthly_energy_total attr['yearly_energy_total'] = self.smartplug.yearly_energy_total return attr
Return the state attributes of the device.
def _run_env(self): env = dict(os.environ) env.update( getattr(self, 'env', {}), PYTHONUSERBASE=self.env_path, PIP_USER="1", ) self._disable_venv(env) return env
Augment the current environment providing the PYTHONUSERBASE.
def profile_v3_to_proofs(profile, fqdn, refresh=False, address = None): proofs = [] try: test = profile.items() except: return proofs if 'account' in profile: accounts = profile['account'] else: return proofs for account in accounts: if 'service' in account and account['service'].lower() not in SITES: continue if 'proofType' in account and account['proofType'] == "http": try: proof = {"service": account['service'], "proof_url": account['proofUrl'], "identifier": account['identifier'], "valid": False} if is_valid_proof(account['service'], account['identifier'], fqdn, account['proofUrl'], address = address): proof["valid"] = True proofs.append(proof) except Exception as e: pass return proofs
Convert profile format v3 to proofs
def write_events(self, outname): self.make_output_dir(outname) if '.hdf' in outname: self.write_to_hdf(outname) else: raise ValueError('Cannot write to this format')
Write the found events to a sngl inspiral table
def lighting(im, b, c): if b==0 and c==1: return im mu = np.average(im) return np.clip((im-mu)*c+mu+b,0.,1.).astype(np.float32)
Adjust image balance and contrast
def color_square(self, x, y, unit_coords=False): if unit_coords: col = x row = y else: col = x//self.col_width row = y//self.row_height if not self.tiles[row][col]: self.tiles[row][col] = \ self.c.create_rectangle(col*self.col_width, row*self.row_height, (col+1)*self.col_width, (row+1)*self.row_height, fill="black") self.cells.append(row*self.cols + col) else: self.c.delete(self.tiles[row][col]) self.tiles[row][col] = None self.cells.remove(row*self.cols + col)
Handles actually coloring the squares
def _bd_(self): if not getattr(self, '__bd__', False): self.__bd = BetterDictLookUp(self) return self.__bd
Property that allows dot lookups of otherwise hidden attributes.
def dump_simulation(simulation, directory): parent_directory = os.path.abspath(os.path.join(directory, os.pardir)) if not os.path.isdir(parent_directory): os.mkdir(parent_directory) if not os.path.isdir(directory): os.mkdir(directory) if os.listdir(directory): raise ValueError("Directory '{}' is not empty".format(directory)) entities_dump_dir = os.path.join(directory, "__entities__") os.mkdir(entities_dump_dir) for entity in simulation.populations.values(): _dump_entity(entity, entities_dump_dir) for holder in entity._holders.values(): _dump_holder(holder, directory)
Write simulation data to directory, so that it can be restored later.
def split(self, encoded): maxlen = len(encoded) - self.sig_size message = encoded[:maxlen] signature = encoded[-self.sig_size:] return message, signature
Split into signature and message
def _generate_initial_score(self): self.current_energy = self.eval_fn(self.polypeptide, *self.eval_args) self.best_energy = copy.deepcopy(self.current_energy) self.best_model = copy.deepcopy(self.polypeptide) return
Runs the evaluation function for the initial pose.
def features(self, expand=False): featvals = [] for term in self._terms: if isinstance(term, AVM): featvals.extend(term.features(expand=expand)) return featvals
Return the list of feature-value pairs in the conjunction.
def construct_FAO_ontology(): df = pd.read_csv("south_sudan_data_fao.csv") gb = df.groupby("Element") d = [ { "events": [ { k: [ {e: [process_variable_name(k, e)]} for e in list(set(gb.get_group(k)["Item"].tolist())) ] } for k in gb.groups.keys() ] } ] yaml = YAML() yaml.default_flow_style = False with open("fao_variable_ontology.yml", "w") as f: yaml.dump(d, f)
Construct FAO variable ontology for use with Eidos.
def are_forms_valid(self, forms): for form in six.itervalues(forms): if not form.is_valid(): return False return True
Check if all forms defined in `form_classes` are valid.
def create_albaran_automatic(pk, list_lines): line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk') if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]): if line_bd.count() != 0: for x in line_bd[0]: list_lines.pop(list_lines.index(x)) GenLineProduct.create_albaran_from_order(pk, list_lines)
creamos de forma automatica el albaran
def ind2sub(ind, dimensions): if ind >= np.prod(dimensions): raise RuntimeError("ind2sub: index exceeds array size") cum_dims = list(dimensions) cum_dims.reverse() m = 1 mult = [] for d in cum_dims: m = m*d mult.append(m) mult.pop() mult.reverse() mult.append(1) indices = [] for d in mult: indices.append((ind/d)+1) ind = ind - (ind/d)*d return indices
Calculates subscripts for indices into regularly spaced matrixes.
def _compute_sigma_b(self, C, mag, vs30measured): sigma_0 = self._compute_sigma_0(C, mag, vs30measured) sigma_amp = self.CONSTS['sigma_amp'] return np.sqrt(sigma_0 ** 2 - sigma_amp ** 2)
Equation 23, page 81.
def weekday(cls, year, month, day): return NepDate.from_bs_date(year, month, day).weekday()
Returns the weekday of the date. 0 = aaitabar
def _validate_slice(self, start, end): if start is None: start = 0 elif start < 0: start += self.len if end is None: end = self.len elif end < 0: end += self.len if not 0 <= end <= self.len: raise ValueError("end is not a valid position in the bitstring.") if not 0 <= start <= self.len: raise ValueError("start is not a valid position in the bitstring.") if end < start: raise ValueError("end must not be less than start.") return start, end
Validate start and end and return them as positive bit positions.
def _build_type_validator(value_type): def type_validator(data): if isinstance(data, value_type): return data raise NotValid('%r is not of type %r' % (data, value_type)) return type_validator
Build a validator that only checks the type of a value.
def transform_y(self, tfms:TfmList=None, **kwargs): "Set `tfms` to be applied to the targets only." _check_kwargs(self.y, tfms, **kwargs) self.tfm_y=True if tfms is None: self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms))) self.tfmargs_y = {**self.tfmargs, **kwargs} else: tfms = list(filter(lambda t: t.use_on_y, tfms)) self.tfms_y,self.tfmargs_y = tfms,kwargs return self
Set `tfms` to be applied to the targets only.
def _write_to_datastore(self): roots_and_submissions = zip([ATTACKS_ENTITY_KEY, TARGET_ATTACKS_ENTITY_KEY, DEFENSES_ENTITY_KEY], [self._attacks, self._targeted_attacks, self._defenses]) client = self._datastore_client with client.no_transact_batch() as batch: for root_key, submissions in roots_and_submissions: batch.put(client.entity(client.key(*root_key))) for k, v in iteritems(submissions): entity = client.entity(client.key( *(root_key + [KIND_SUBMISSION, k]))) entity['submission_path'] = v.path entity.update(participant_from_submission_path(v.path)) batch.put(entity)
Writes all submissions to datastore.
def _line_start_indexes(self): if self._cache.line_indexes is None: line_lengths = map(len, self.lines) indexes = [0] append = indexes.append pos = 0 for line_length in line_lengths: pos += line_length + 1 append(pos) if len(indexes) > 1: indexes.pop() self._cache.line_indexes = indexes return self._cache.line_indexes
Array pointing to the start indexes of all the lines.
def autocomplete(self): params = self.set_lay_params() logging.info("PARAMS="+str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_layperson_results(results)
Execute solr query for autocomplete
def clear_recovery_range(working_dir): recovery_range_path = os.path.join(working_dir, '.recovery') if os.path.exists(recovery_range_path): os.unlink(recovery_range_path)
Clear out our recovery hint
def validate_token_age(callback_token): try: token = CallbackToken.objects.get(key=callback_token, is_active=True) seconds = (timezone.now() - token.created_at).total_seconds() token_expiry_time = api_settings.PASSWORDLESS_TOKEN_EXPIRE_TIME if seconds <= token_expiry_time: return True else: token.is_active = False token.save() return False except CallbackToken.DoesNotExist: return False
Returns True if a given token is within the age expiration limit.
def make_server(application, conf_dir=None): if conf_dir: load_config(conf_dir) configure_syslog() log_config() if options.use_ssl: ssl_options = ssl_server_options() server = tornado.httpserver.HTTPServer( application, ssl_options=ssl_options) general_logger.info( 'start tornado https server at https://%s:%s' ' with ssl_options: %s', options.ip, options.port, ssl_options) else: server = tornado.httpserver.HTTPServer(application) general_logger.info('start tornado http server at http://{0}:{1}'.format( options.ip, options.port)) server.bind(options.port, options.ip) return server
Configure the server return the server instance
def load(self): hdf_filename = os.path.join(self._dump_dirname, 'result.h5') if os.path.isfile(hdf_filename): store = pd.HDFStore(hdf_filename, mode='r') keys = store.keys() if keys == ['/df']: self.result = store['df'] else: if set(keys) == set(map(lambda i: '/%s' % i, range(len(keys)))): self.result = [store[str(k)] for k in range(len(keys))] else: self.result = {k[1:]: store[k] for k in keys} else: self.result = joblib.load( os.path.join(self._output_dirname, 'dump', 'result.pkl'))
Load this step's result from its dump directory
def fail(self, message, status=500, **kw): self.request.response.setStatus(status) result = {"success": False, "errors": message, "status": status} result.update(kw) return result
Set a JSON error object and a status to the response
def prettyprint(d): print(json.dumps(d, sort_keys=True, indent=4, separators=("," , ": ")))
Print dicttree in Json-like format. keys are sorted
def render_relation(self, r, **args): if r is None: return "." m = self.config.relsymbolmap if r in m: return m[r] return r
Render an object property
def readRGB(self): self.reset_bits_pending(); r = self.readUI8() g = self.readUI8() b = self.readUI8() return (0xff << 24) | (r << 16) | (g << 8) | b
Read a RGB color
def _update_params(self, constants): for k, v in constants.items(): self.params[k]['value'] *= v influence = self._calculate_influence(self.params['infl']['value']) return influence * self.params['lr']['value']
Update params and return new influence.
def new_action(self, method='GET', **kwargs): if method not in self.methods: raise TypeError('{} not in valid method(s): {}.'.format(method, self.methods)) return Action(self, method, **kwargs)
Create a new Action linked to this endpoint with the given args.
def go_next_thumbnail(self): if self.current_thumbnail is not None: index = self._thumbnails.index(self.current_thumbnail) + 1 index = 0 if index >= len(self._thumbnails) else index self.set_current_index(index) self.scroll_to_item(index)
Select thumbnail next to the currently selected one.
def accept_re(regexp, buf, pos): match = regexp.match(buf, pos) if not match: return None, pos return buf[match.start(1):match.end(1)], match.end(0)
Accept a regular expression at the current buffer position.
def tgread_bool(self): value = self.read_int(signed=False) if value == 0x997275b5: return True elif value == 0xbc799737: return False else: raise RuntimeError('Invalid boolean code {}'.format(hex(value)))
Reads a Telegram boolean value.
def _preprocess_Y(self, Y, k=None): if not isinstance(Y, list): if self.t != 1: msg = "For t > 1, Y must be a list of n-dim or [n, K_t] tensors" raise ValueError(msg) Y = [Y] if not len(Y) == self.t: msg = f"Expected Y to be a t-length list (t={self.t}), not {len(Y)}" raise ValueError(msg) return [EndModel._preprocess_Y(self, Y_t, self.K[t]) for t, Y_t in enumerate(Y)]
Convert Y to t-length list of probabilistic labels if necessary
def pretty_print(source, dest): parser = etree.XMLParser(remove_blank_text=True) if not isinstance(source, str): source = str(source) tree = etree.parse(source, parser) docinfo = tree.docinfo with open(dest, 'wb') as fp: fp.write(etree.tostring(tree, pretty_print=True, encoding=docinfo.encoding, standalone=docinfo.standalone))
Pretty print the XML file
def simulate(args): if args.fasta: name = None seq = "" reads = dict() with open(args.fasta) as in_handle: for line in in_handle: if line.startswith(">"): if name: reads.update(_generate_reads(seq, name)) seq = "" name = line[1:-1] else: seq += line.strip() reads.update(_generate_reads(seq, name)) _write_reads(reads, args.out)
Main function that manage simulatin of small RNAs
def parse(self, argument): if isinstance(argument, list): return argument elif not argument: return [] else: try: return [s.strip() for s in list(csv.reader([argument], strict=True))[0]] except csv.Error as e: raise ValueError('Unable to parse the value %r as a %s: %s' % (argument, self.flag_type(), e))
Parses argument as comma-separated list of strings.
def start(ctx, debug, version, config): ctx.obj = {} ctx.DEBUG = debug if os.path.isfile(config): with open(config) as fp: agile = json.load(fp) else: agile = {} ctx.obj['agile'] = agile if version: click.echo(__version__) ctx.exit(0) if not ctx.invoked_subcommand: click.echo(ctx.get_help())
Commands for devops operations
def _cldf2wld(dataset): header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID'] D = {0: ['lid'] + [h.lower() for h in header]} for idx, row in enumerate(dataset.objects['FormTable']): row = deepcopy(row) row['Segments'] = ' '.join(row['Segments']) D[idx + 1] = [row['ID']] + [row[h] for h in header] return D
Make lingpy-compatible dictinary out of cldf main data.
def playstate_str(state): if state == const.PLAY_STATE_NO_MEDIA: return 'No media' if state == const.PLAY_STATE_IDLE: return 'Idle' if state == const.PLAY_STATE_LOADING: return 'Loading' if state == const.PLAY_STATE_PAUSED: return 'Paused' if state == const.PLAY_STATE_PLAYING: return 'Playing' if state == const.PLAY_STATE_FAST_FORWARD: return 'Fast forward' if state == const.PLAY_STATE_FAST_BACKWARD: return 'Fast backward' return 'Unsupported'
Convert internal API playstate to string.
def remove_prohibited_element(tag_name, document_element): elements = document_element.getElementsByTagName(tag_name) for element in elements: p = element.parentNode p.removeChild(element)
To fit the Evernote DTD need, drop this tag name
def append_volume(runtime, source, target, writable=False): runtime.append(u"--volume={}:{}:{}".format( docker_windows_path_adjust(source), target, "rw" if writable else "ro"))
Add binding arguments to the runtime list.
def continueLine(self): if not (self.isLong and self.is_regular): self.line_conv = self.line_conv.rstrip() + " &\n" else: temp = self.line_conv[:72].rstrip() + " &" self.line_conv = temp.ljust(72) + self.excess_line
Insert line continuation symbol at end of line.
def data_filler_detailed_registration(self, number_of_rows, pipe): try: for i in range(number_of_rows): pipe.hmset('detailed_registration:%s' % i, { 'id': rnd_id_generator(self), 'email': self.faker.safe_email(), 'password': self.faker.md5(raw_output=False), 'lastname': self.faker.last_name(), 'name': self.faker.first_name(), 'address': self.faker.address(), 'phone': self.faker.phone_number() }) pipe.execute() logger.warning('detailed_registration Commits are successful after write job!', extra=d) except Exception as e: logger.error(e, extra=d)
creates keys with detailed regis. information
def bam_needs_processing(data): return ((data.get("work_bam") or data.get("align_bam")) and (any(tz.get_in(["config", "algorithm", x], data) for x in ["variantcaller", "mark_duplicates", "recalibrate", "realign", "svcaller", "jointcaller", "variant_regions"]) or any(k in data for k in ["cwl_keys", "output_cwl_keys"])))
Check if a work input needs processing for parallelization.
def setDebugActions( self, startAction, successAction, exceptionAction ): self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) self.debug = True return self
Enable display of debugging messages while doing pattern matching.
def reindent(tokens, indent=' '): old_levels = [] old_level = 0 new_level = 0 for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens: if typ == tokenize.INDENT: old_levels.append(old_level) old_level = len(tok) new_level += 1 tok = indent * new_level elif typ == tokenize.DEDENT: old_level = old_levels.pop() new_level -= 1 start_col = max(0, start_col - old_level + new_level) if start_row == end_row: end_col = start_col + len(tok) yield typ, tok, (start_row, start_col), (end_row, end_col), line
Replace existing indentation in a token steam, with `indent`.
def _set_aws_environment(task: Task = None): current_zone = os.environ.get('NCLUSTER_ZONE', '') current_region = os.environ.get('AWS_DEFAULT_REGION', '') def log(*args): if task: task.log(*args) else: util.log(*args) if current_region and current_zone: assert current_zone.startswith( current_region), f'Current zone "{current_zone}" ($NCLUSTER_ZONE) is not ' \ f'in current region "{current_region} ($AWS_DEFAULT_REGION)' assert u.get_session().region_name == current_region if current_zone and not current_region: current_region = current_zone[:-1] os.environ['AWS_DEFAULT_REGION'] = current_region if not current_region: current_region = u.get_session().region_name if not current_region: log(f"No default region available, using {NCLUSTER_DEFAULT_REGION}") current_region = NCLUSTER_DEFAULT_REGION os.environ['AWS_DEFAULT_REGION'] = current_region log(f"Using account {u.get_account_number()}, region {current_region}, " f"zone {current_zone}")
Sets up AWS environment from NCLUSTER environment variables
def exception_log_and_respond(exception, logger, message, status_code): logger.error(message, exc_info=True) return make_response( message, status_code, dict(exception_type=type(exception).__name__, exception_message=str(exception)), )
Log an error and send jsonified respond.
def exception(self): code, _, message = self.data.partition(' ') return self.find(code)(message)
Return an instance of the corresponding exception