code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get(self, key): try: layers = key.split('.') value = self.registrar for key in layers: value = value[key] return value except: return None
Function deeply gets the key with "." notation Args ---- key (string): A key with the "." notation. Returns ------- reg (unknown type): Returns a dict or a primitive type.
def set_timestamp(self,timestamp=None): if timestamp is None: import time timestamp = time.strftime('%Y-%m-%dT%H:%M:%S%Z') self.node.set('timestamp',timestamp)
Set the timestamp of the linguistic processor, set to None for the current time @type timestamp:string @param timestamp: version of the linguistic processor
def upload(self, local_path): camerafile_p = ffi.new("CameraFile**") with open(local_path, 'rb') as fp: lib.gp_file_new_from_fd(camerafile_p, fp.fileno()) lib.gp_camera_folder_put_file( self._cam._cam, self.path.encode() + b"/", os.path.basename(local_path).encode(), backend.FILE_TYPES['normal'], camerafile_p[0], self._cam.ctx)
Upload a file to the camera's permanent storage. :param local_path: Path to file to copy :type local_path: str/unicode
def mcscanx(args): p = OptionParser(mcscanx.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) blastfile = args[0] bedfiles = args[1:] prefix = "_".join(op.basename(x)[:2] for x in bedfiles) symlink(blastfile, prefix + ".blast") allbedfile = prefix + ".gff" fw = open(allbedfile, "w") for i, bedfile in enumerate(bedfiles): prefix = chr(ord('A') + i) make_gff(bedfile, prefix, fw) fw.close()
%prog mcscanx athaliana.athaliana.last athaliana.bed Wrap around MCScanX.
def parse_line(self, line): prefix = "" if line.startswith(","): line, prefix = line[1:], "," j = json.loads(line) yield j self.io.write_line(prefix + json.dumps(j))
Parse a single line of JSON and write modified JSON back.
def singularize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}): w = word.lower().capitalize() if word in custom: return custom[word] if word in singular: return singular[word] if pos == NOUN: for a, b in singular_inflections: if w.endswith(a): return w[:-len(a)] + b for suffix in ("nen", "en", "n", "e", "er", "s"): if w.endswith(suffix): w = w[:-len(suffix)] break if w.endswith(("rr", "rv", "nz")): return w + "e" return w return w
Returns the singular of a given word. The inflection is based on probability rather than gender and role.
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('SF record not yet initialized!') length = 12 if self.virtual_file_size_high is not None: length = 21 ret = b'SF' + struct.pack('=BB', length, SU_ENTRY_VERSION) if self.virtual_file_size_high is not None and self.table_depth is not None: ret += struct.pack('=LLLLB', self.virtual_file_size_high, utils.swab_32bit(self.virtual_file_size_high), self.virtual_file_size_low, utils.swab_32bit(self.virtual_file_size_low), self.table_depth) else: ret += struct.pack('=LL', self.virtual_file_size_low, utils.swab_32bit(self.virtual_file_size_low)) return ret
Generate a string representing the Rock Ridge Sparse File record. Parameters: None. Returns: String containing the Rock Ridge record.
def _load_history_from_file(path, size=-1): if size == 0: return [] if os.path.exists(path): with codecs.open(path, 'r', encoding='utf-8') as histfile: lines = [line.rstrip('\n') for line in histfile] if size > 0: lines = lines[-size:] return lines else: return []
Load a history list from a file and split it into lines. :param path: the path to the file that should be loaded :type path: str :param size: the number of lines to load (0 means no lines, < 0 means all lines) :type size: int :returns: a list of history items (the lines of the file) :rtype: list(str)
def _is_num_param(names, values, to_float=False): fun = to_float and float or int out_params = [] for (name, val) in zip(names, values): if val is None: out_params.append(val) elif isinstance(val, number_or_string_types): try: out_params.append(fun(val)) except ValueError: raise VdtParamError(name, val) else: raise VdtParamError(name, val) return out_params
Return numbers from inputs or raise VdtParamError. Lets ``None`` pass through. Pass in keyword argument ``to_float=True`` to use float for the conversion rather than int. >>> _is_num_param(('', ''), (0, 1.0)) [0, 1] >>> _is_num_param(('', ''), (0, 1.0), to_float=True) [0.0, 1.0] >>> _is_num_param(('a'), ('a')) # doctest: +SKIP Traceback (most recent call last): VdtParamError: passed an incorrect value "a" for parameter "a".
def cmd_gimbal_mode(self, args): if len(args) != 1: print("usage: gimbal mode <GPS|MAVLink>") return if args[0].upper() == 'GPS': mode = mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT elif args[0].upper() == 'MAVLINK': mode = mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING elif args[0].upper() == 'RC': mode = mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING else: print("Unsupported mode %s" % args[0]) self.master.mav.mount_configure_send(self.target_system, self.target_component, mode, 1, 1, 1)
control gimbal mode
def match(self, location): if self.ssh_alias != location.ssh_alias: return False elif self.have_wildcards: return fnmatch.fnmatch(location.directory, self.directory) else: self = os.path.normpath(self.directory) other = os.path.normpath(location.directory) return self == other
Check if the given location "matches". :param location: The :class:`Location` object to try to match. :returns: :data:`True` if the two locations are on the same system and the :attr:`directory` can be matched as a filename pattern or a literal match on the normalized pathname.
def memory_read_bytes(self, start_position: int, size: int) -> bytes: return self._memory.read_bytes(start_position, size)
Read and return ``size`` bytes from memory starting at ``start_position``.
async def build_get_revoc_reg_request(submitter_did: Optional[str], revoc_reg_def_id: str, timestamp: int) -> str: logger = logging.getLogger(__name__) logger.debug("build_get_revoc_reg_request: >>> submitter_did: %r, revoc_reg_def_id: %r, timestamp: %r", submitter_did, revoc_reg_def_id, timestamp) if not hasattr(build_get_revoc_reg_request, "cb"): logger.debug("build_get_revoc_reg_request: Creating callback") build_get_revoc_reg_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None c_revoc_reg_def_id = c_char_p(revoc_reg_def_id.encode('utf-8')) c_timestamp = c_int64(timestamp) request_json = await do_call('indy_build_get_revoc_reg_request', c_submitter_did, c_revoc_reg_def_id, c_timestamp, build_get_revoc_reg_request.cb) res = request_json.decode() logger.debug("build_get_revoc_reg_request: <<< res: %r", res) return res
Builds a GET_REVOC_REG request. Request to get the accumulated state of the Revocation Registry by ID. The state is defined by the given timestamp. :param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used). :param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger. :param timestamp: Requested time represented as a total number of seconds from Unix Epoch :return: Request result as json.
def assign_operation_ids(spec, operids): empty_dict = {} for path_name, path_data in six.iteritems(spec['paths']): for method, method_data in six.iteritems(path_data): oper_id = operids.get(path_name, empty_dict).get(method) if oper_id: method_data['operationId'] = oper_id
used to assign caller provided operationId values into a spec
def kernel(): print('================================') print(' WARNING: upgrading the kernel') print('================================') time.sleep(5) print('-[kernel]----------') cmd('rpi-update', True) print(' >> You MUST reboot to load the new kernel <<')
Handle linux kernel update
def disassociate_route_table(association_id, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if conn.disassociate_route_table(association_id): log.info('Route table with association id %s has been disassociated.', association_id) return {'disassociated': True} else: log.warning('Route table with association id %s has not been disassociated.', association_id) return {'disassociated': False} except BotoServerError as e: return {'disassociated': False, 'error': __utils__['boto.get_error'](e)}
Dissassociates a route table. association_id The Route Table Association ID to disassociate CLI Example: .. code-block:: bash salt myminion boto_vpc.disassociate_route_table 'rtbassoc-d8ccddba'
def wrapper(cls, func, height=None, catch_interrupt=False, arguments=None, unicode_aware=None): screen = Screen.open(height, catch_interrupt=catch_interrupt, unicode_aware=unicode_aware) restore = True try: try: if arguments: return func(screen, *arguments) else: return func(screen) except ResizeScreenError: restore = False raise finally: screen.close(restore)
Construct a new Screen for any platform. This will initialize the Screen, call the specified function and then tidy up the system as required when the function exits. :param func: The function to call once the Screen has been created. :param height: The buffer height for this Screen (only for test purposes). :param catch_interrupt: Whether to catch and prevent keyboard interrupts. Defaults to False to maintain backwards compatibility. :param arguments: Optional arguments list to pass to func (after the Screen object). :param unicode_aware: Whether the application can use unicode or not. If None, try to detect from the environment if UTF-8 is enabled.
def map_(input_layer, fn): if not input_layer.is_sequence(): raise ValueError('Can only map a sequence.') return [fn(x) for x in input_layer]
Maps the given function across this sequence. To map an entire template across the sequence, use the `as_fn` method on the template. Args: input_layer: The input tensor. fn: A function of 1 argument that is applied to each item in the sequence. Returns: A new sequence Pretty Tensor. Raises: ValueError: If the input_layer does not hold a sequence.
def _get_auth_challenge(self, exc): response = HttpResponse(content=exc.content, status=exc.get_code_num()) response['WWW-Authenticate'] = 'Basic realm="%s"' % REALM return response
Returns HttpResponse for the client.
def get_saver(scope, collections=(tf.GraphKeys.GLOBAL_VARIABLES,), context=None, **kwargs): variable_map = {} for collection in collections: variable_map.update(get_normalized_variable_map(scope, collection, context)) return tf.train.Saver(var_list=variable_map, **kwargs)
Builds a `tf.train.Saver` for the scope or module, with normalized names. The names of the variables are normalized to remove the scope prefix. This allows the same variables to be restored into another similar scope or module using a complementary `tf.train.Saver` object. Args: scope: Scope or module. Variables within will be saved or restored. collections: Sequence of collections of variables to restrict `tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES` which includes moving averages variables as well as trainable variables. context: Scope or module, identical to or parent of `scope`. If given, this will be used as the stripped prefix. **kwargs: Extra keyword arguments to pass to tf.train.Saver. Returns: A `tf.train.Saver` object for Variables in the scope or module.
def disenriched(self, thresh=0.05, idx=True): return self.downregulated(thresh=thresh, idx=idx)
Disenriched features. {threshdoc}
def get_installed_packages(site_packages, site_packages_64): import pkg_resources package_to_keep = [] if os.path.isdir(site_packages): package_to_keep += os.listdir(site_packages) if os.path.isdir(site_packages_64): package_to_keep += os.listdir(site_packages_64) package_to_keep = [x.lower() for x in package_to_keep] installed_packages = {package.project_name.lower(): package.version for package in pkg_resources.WorkingSet() if package.project_name.lower() in package_to_keep or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]} return installed_packages
Returns a dict of installed packages that Zappa cares about.
def pretty_objname(self, obj=None, maxlen=50, color="boldcyan"): parent_name = lambda_sub("", get_parent_name(obj) or "") objname = get_obj_name(obj) if color: objname += colorize("<{}>".format(parent_name), color, close=False) else: objname += "<{}>".format(parent_name) objname = objname if len(objname) < maxlen else \ objname[:(maxlen-1)]+"…>" if color: objname += colors.RESET return objname
Pretty prints object name @obj: the object whose name you want to pretty print @maxlen: #int maximum length of an object name to print @color: your choice of :mod:colors or |None| -> #str pretty object name .. from vital.debug import Look print(Look.pretty_objname(dict)) # -> 'dict\x1b[1;36m<builtins>\x1b[1;m' ..
def get_path_matching(name): p = os.path.join(os.path.expanduser("~"), name) if not os.path.isdir(p): p = None drive, folders = os.path.splitdrive(os.getcwd()) folders = folders.split(os.sep) folders.insert(0, os.sep) if name in folders: p = os.path.join(drive, *folders[: folders.index(name) + 1]) return p
Get path matching a name. Parameters ---------- name : string Name to search for. Returns ------- string Full filepath.
def _get_domain(conn, *vms, **kwargs): ret = list() lookup_vms = list() all_vms = [] if kwargs.get('active', True): for id_ in conn.listDomainsID(): all_vms.append(conn.lookupByID(id_).name()) if kwargs.get('inactive', True): for id_ in conn.listDefinedDomains(): all_vms.append(id_) if not all_vms: raise CommandExecutionError('No virtual machines found.') if vms: for name in vms: if name not in all_vms: raise CommandExecutionError('The VM "{name}" is not present'.format(name=name)) else: lookup_vms.append(name) else: lookup_vms = list(all_vms) for name in lookup_vms: ret.append(conn.lookupByName(name)) return len(ret) == 1 and not kwargs.get('iterable') and ret[0] or ret
Return a domain object for the named VM or return domain object for all VMs. :params conn: libvirt connection object :param vms: list of domain names to look for :param iterable: True to return an array in all cases
def create_model_package_from_algorithm(self, name, description, algorithm_arn, model_data): request = { 'ModelPackageName': name, 'ModelPackageDescription': description, 'SourceAlgorithmSpecification': { 'SourceAlgorithms': [ { 'AlgorithmName': algorithm_arn, 'ModelDataUrl': model_data } ] } } try: LOGGER.info('Creating model package with name: {}'.format(name)) self.sagemaker_client.create_model_package(**request) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] if ( error_code == 'ValidationException' and 'ModelPackage already exists' in message ): LOGGER.warning('Using already existing model package: {}'.format(name)) else: raise
Create a SageMaker Model Package from the results of training with an Algorithm Package Args: name (str): ModelPackage name description (str): Model Package description algorithm_arn (str): arn or name of the algorithm used for training. model_data (str): s3 URI to the model artifacts produced by training
def get_host(self, hostname): if hostname in self.get_hosts(): return self.load_ssh_conf().lookup(hostname) logger.warn('Tried to find host with name {0}, but host not found'.format(hostname)) return None
Returns a Host dict with config options, or None if none exists
def get_contract_by_hash(self, contract_hash): contract_address = self.db.reader._get_address_by_hash(contract_hash) if contract_address is not None: return contract_address else: raise AddressNotFoundError
get mapped contract_address by its hash, if not found try indexing.
def _write_field(self, value): class_name = str(value.__class__) if class_name not in self.handlers: raise ValueError('No handler has been registered for class: {0!s}'.format(class_name)) handler = self.handlers[class_name] handler(value, self._file)
Write a single field to the destination file. :param T value: The value of the field.
def _do_merge(orig_files, out_file, config, region): if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: _check_samples_nodups(orig_files) prep_files = run_multicore(p_bgzip_and_index, [[x, config] for x in orig_files], config) input_vcf_file = "%s-files.txt" % utils.splitext_plus(out_file)[0] with open(input_vcf_file, "w") as out_handle: for fname in prep_files: out_handle.write(fname + "\n") bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" region_str = "-r {}".format(region) if region else "" cmd = "{bcftools} merge -O {output_type} {region_str} `cat {input_vcf_file}` > {tx_out_file}" do.run(cmd.format(**locals()), "Merge variants") if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
Do the actual work of merging with bcftools merge.
def _run_module_code(code, init_globals=None, mod_name=None, mod_fname=None, mod_loader=None, pkg_name=None): with _ModifiedArgv0(mod_fname): with _TempModule(mod_name) as temp_module: mod_globals = temp_module.module.__dict__ _run_code(code, mod_globals, init_globals, mod_name, mod_fname, mod_loader, pkg_name) return mod_globals.copy()
Helper to run code in new namespace with sys modified
def _run_node_distribution_command(self, command, workunit): process = command.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr')) return process.wait()
Runs a NodeDistribution.Command for _execute_command and returns its return code. Passes any additional kwargs to command.run (which passes them, modified, to subprocess.Popen). Override this in a Task subclass to do something more complicated than just calling command.run() and returning the result of wait(). :param NodeDistribution.Command command: The command to run. :param WorkUnit workunit: The WorkUnit the command is running under. :returns: returncode :rtype: int
def deleteICM(uuid: str): _metadata = ICMMetadata.query.filter_by(id=uuid).first() db.session.delete(_metadata) db.session.commit() return ("", 204)
Deletes an ICM
def marketPrice(self) -> float: price = self.last if ( self.hasBidAsk() and self.bid <= self.last <= self.ask) else \ self.midpoint() if isNan(price): price = self.close return price
Return the first available one of * last price if within current bid/ask; * average of bid and ask (midpoint); * close price.
def get_tags(): tags = getattr(flask.g, 'bukudb', get_bukudb()).get_tag_all() result = { 'tags': tags[0] } if request.path.startswith('/api/'): res = jsonify(result) else: res = render_template('bukuserver/tags.html', result=result) return res
get tags.
def _read_nowait(self, n: int) -> bytes: chunks = [] while self._buffer: chunk = self._read_nowait_chunk(n) chunks.append(chunk) if n != -1: n -= len(chunk) if n == 0: break return b''.join(chunks) if chunks else b''
Read not more than n bytes, or whole buffer is n == -1
def firstAttr(self, *attrs): for attr in attrs: value = self.__dict__.get(attr) if value is not None: return value
Return the first attribute in attrs that is not None.
def fixchars(self, text): keys = ''.join(Config.CHARFIXES.keys()) values = ''.join(Config.CHARFIXES.values()) fixed = text.translate(str.maketrans(keys, values)) if fixed != text: self.modified = True return fixed
Find and replace problematic characters.
def get_logger(name): logger = logging.getLogger(name) logger.setLevel(logging.INFO) file_handler = logging.FileHandler(log_path) file_handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s %(name)12s %(levelname)8s %(lineno)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
Return a logger with a file handler.
def on_right_click_listctrl(self, event): g_index = event.GetIndex() if self.Data[self.s]['measurement_flag'][g_index] == 'g': self.mark_meas_bad(g_index) else: self.mark_meas_good(g_index) if self.data_model == 3.0: self.con.tables['measurements'].write_magic_file(dir_path=self.WD) else: pmag.magic_write(os.path.join( self.WD, "magic_measurements.txt"), self.mag_meas_data, "magic_measurements") self.recalculate_current_specimen_interpreatations() if self.ie_open: self.ie.update_current_fit_data() self.calculate_high_levels_data() self.update_selection()
right click on the listctrl toggles measurement bad
def delete_service(resource_root, name, cluster_name="default"): return call(resource_root.delete, "%s/%s" % (SERVICES_PATH % (cluster_name,), name), ApiService)
Delete a service by name @param resource_root: The root Resource object. @param name: Service name @param cluster_name: Cluster name @return: The deleted ApiService object
def _read_and_batch_from_files( file_pattern, batch_size, max_length, num_cpu_cores, shuffle, repeat): dataset = tf.data.Dataset.list_files(file_pattern) if shuffle: mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER) dataset = dataset.shuffle(buffer_size=_FILE_SHUFFLE_BUFFER) dataset = dataset.apply( tf.contrib.data.parallel_interleave( _load_records, sloppy=shuffle, cycle_length=num_cpu_cores)) dataset = dataset.map(_parse_example, num_parallel_calls=num_cpu_cores) dataset = dataset.filter(lambda x, y: _filter_max_length((x, y), max_length)) mlperf_log.transformer_print(key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size) mlperf_log.transformer_print(key=mlperf_log.INPUT_MAX_LENGTH, value=max_length) dataset = _batch_examples(dataset, batch_size, max_length) dataset = dataset.repeat(repeat) dataset = dataset.prefetch(1) return dataset
Create dataset where each item is a dict of "inputs" and "targets". Args: file_pattern: String used to match the input TFRecord files. batch_size: Maximum number of tokens per batch of examples max_length: Maximum number of tokens per example num_cpu_cores: Number of cpu cores for parallel input processing. shuffle: If true, randomizes order of elements. repeat: Number of times to repeat the dataset. If None, the dataset is repeated forever. Returns: tf.data.Dataset object containing examples loaded from the files.
def add_leaves(self, values_array, do_hash=False): self.tree['is_ready'] = False [self._add_leaf(value, do_hash) for value in values_array]
Add leaves to the tree. Similar to chainpoint merkle tree library, this accepts hash values as an array of Buffers or hex strings. :param values_array: array of values to add :param do_hash: whether to hash the values before inserting
def get_default_config(self): config = super(XFSCollector, self).get_default_config() config.update({ 'path': 'xfs' }) return config
Returns the xfs collector settings
def push_reindex_to_actions_pool(obj, idxs=None): indexes = idxs and idxs or [] pool = ActionHandlerPool.get_instance() pool.push(obj, "reindex", success=True, idxs=indexes)
Push a reindex job to the actions handler pool
def map_sequence(stmts_in, **kwargs): from indra.preassembler.sitemapper import SiteMapper, default_site_map logger.info('Mapping sites on %d statements...' % len(stmts_in)) kwarg_list = ['do_methionine_offset', 'do_orthology_mapping', 'do_isoform_mapping'] sm = SiteMapper(default_site_map, use_cache=kwargs.pop('use_cache', False), **_filter(kwargs, kwarg_list)) valid, mapped = sm.map_sites(stmts_in) correctly_mapped_stmts = [] for ms in mapped: correctly_mapped = all([mm.has_mapping() for mm in ms.mapped_mods]) if correctly_mapped: correctly_mapped_stmts.append(ms.mapped_stmt) stmts_out = valid + correctly_mapped_stmts logger.info('%d statements with valid sites' % len(stmts_out)) dump_pkl = kwargs.get('save') if dump_pkl: dump_statements(stmts_out, dump_pkl) del sm return stmts_out
Map sequences using the SiteMapper. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to map. do_methionine_offset : boolean Whether to check for off-by-one errors in site position (possibly) attributable to site numbering from mature proteins after cleavage of the initial methionine. If True, checks the reference sequence for a known modification at 1 site position greater than the given one; if there exists such a site, creates the mapping. Default is True. do_orthology_mapping : boolean Whether to check sequence positions for known modification sites in mouse or rat sequences (based on PhosphoSitePlus data). If a mouse/rat site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. do_isoform_mapping : boolean Whether to check sequence positions for known modifications in other human isoforms of the protein (based on PhosphoSitePlus data). If a site is found that is linked to a site in the human reference sequence, a mapping is created. Default is True. use_cache : boolean If True, a cache will be created/used from the laction specified by SITEMAPPER_CACHE_PATH, defined in your INDRA config or the environment. If False, no cache is used. For more details on the cache, see the SiteMapper class definition. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of mapped statements.
def load(path, use_nep8=True): Compiler.__instance = None compiler = Compiler.instance() compiler.nep8 = use_nep8 compiler.entry_module = Module(path) return compiler
Call `load` to load a Python file to be compiled but not to write to .avm :param path: the path of the Python file to compile :return: The instance of the compiler The following returns the compiler object for inspection. .. code-block:: python from boa.compiler import Compiler compiler = Compiler.load('path/to/your/file.py')
def _update_context_field_binary_composition(present_locations, expression): if not any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u'Received a BinaryComposition {} without any ContextField ' u'operands. This should never happen.'.format(expression)) if isinstance(expression.left, ContextField): context_field = expression.left location_name, _ = context_field.location.get_location_name() if location_name not in present_locations: return TrueLiteral if isinstance(expression.right, ContextField): context_field = expression.right location_name, _ = context_field.location.get_location_name() if location_name not in present_locations: return TrueLiteral return expression
Lower BinaryCompositions involving non-existent ContextFields to True. Args: present_locations: set of all locations in the current MatchQuery that have not been pruned expression: BinaryComposition with at least one ContextField operand Returns: TrueLiteral iff either ContextField operand is not in `present_locations`, and the original expression otherwise
def find(self, header, list_type=None): for chunk in self: if chunk.header == header and (list_type is None or (header in list_headers and chunk.type == list_type)): return chunk elif chunk.header in list_headers: try: result = chunk.find(header, list_type) return result except chunk.NotFound: pass if list_type is None: raise self.NotFound('Chunk \'{0}\' not found.'.format(header)) else: raise self.NotFound('List \'{0} {1}\' not found.'.format(header, list_type))
Find the first chunk with specified header and optional list type.
def summary(app): r = requests.get('https://{}.herokuapp.com/summary'.format(app)) summary = r.json()['summary'] click.echo("\nstatus \t| count") click.echo("----------------") for s in summary: click.echo("{}\t| {}".format(s[0], s[1])) num_101s = sum([s[1] for s in summary if s[0] == 101]) num_10xs = sum([s[1] for s in summary if s[0] >= 100]) if num_10xs > 0: click.echo("\nYield: {:.2%}".format(1.0 * num_101s / num_10xs))
Print a summary of a deployed app's status.
def to_array(self): array = super(WebhookInfo, self).to_array() array['url'] = u(self.url) array['has_custom_certificate'] = bool(self.has_custom_certificate) array['pending_update_count'] = int(self.pending_update_count) if self.last_error_date is not None: array['last_error_date'] = int(self.last_error_date) if self.last_error_message is not None: array['last_error_message'] = u(self.last_error_message) if self.max_connections is not None: array['max_connections'] = int(self.max_connections) if self.allowed_updates is not None: array['allowed_updates'] = self._as_array(self.allowed_updates) return array
Serializes this WebhookInfo to a dictionary. :return: dictionary representation of this object. :rtype: dict
def is_stalemate(self) -> bool: if self.is_check(): return False if self.is_variant_end(): return False return not any(self.generate_legal_moves())
Checks if the current position is a stalemate.
async def listen(self): "Listen for messages on channels this client has been subscribed to" if self.subscribed: return self.handle_message(await self.parse_response(block=True))
Listen for messages on channels this client has been subscribed to
def _symbol_bars( self, symbols, size, _from=None, to=None, limit=None): assert size in ('day', 'minute') query_limit = limit if query_limit is not None: query_limit *= 2 @skip_http_error((404, 504)) def fetch(symbol): df = self._api.polygon.historic_agg( size, symbol, _from, to, query_limit).df if size == 'minute': df.index += pd.Timedelta('1min') mask = self._cal.minutes_in_range( df.index[0], df.index[-1], ).tz_convert(NY) df = df.reindex(mask) if limit is not None: df = df.iloc[-limit:] return df return parallelize(fetch)(symbols)
Query historic_agg either minute or day in parallel for multiple symbols, and return in dict. symbols: list[str] size: str ('day', 'minute') _from: str or pd.Timestamp to: str or pd.Timestamp limit: str or int return: dict[str -> pd.DataFrame]
def encode_task(task): task = task.copy() if 'tags' in task: task['tags'] = ','.join(task['tags']) for k in task: for unsafe, safe in six.iteritems(encode_replacements): if isinstance(task[k], six.string_types): task[k] = task[k].replace(unsafe, safe) if isinstance(task[k], datetime.datetime): task[k] = task[k].strftime("%Y%m%dT%M%H%SZ") return "[%s]\n" % " ".join([ "%s:\"%s\"" % (k, v) for k, v in sorted(task.items(), key=itemgetter(0)) ])
Convert a dict-like task to its string representation
def from_columns(columns): data = [ [ column[i] for i in range(len(column)) ] for column in columns ] return Matrix(data)
Parses raw columns :param columns: matrix divided into columns :return: Matrix: Merge the columns to form a matrix
def alphabetical_sort(list_to_sort: Iterable[str]) -> List[str]: return sorted(list_to_sort, key=norm_fold)
Sorts a list of strings alphabetically. For example: ['a1', 'A11', 'A2', 'a22', 'a3'] To sort a list in place, don't call this method, which makes a copy. Instead, do this: my_list.sort(key=norm_fold) :param list_to_sort: the list being sorted :return: the sorted list
def select(self, field_paths): query = query_mod.Query(self) return query.select(field_paths)
Create a "select" query with this collection as parent. See :meth:`~.firestore_v1beta1.query.Query.select` for more information on this method. Args: field_paths (Iterable[str, ...]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the query results. Returns: ~.firestore_v1beta1.query.Query: A "projected" query.
def to_text(path): import subprocess from distutils import spawn if spawn.find_executable("pdftotext"): out, err = subprocess.Popen( ["pdftotext", '-layout', '-enc', 'UTF-8', path, '-'], stdout=subprocess.PIPE ).communicate() return out else: raise EnvironmentError( 'pdftotext not installed. Can be downloaded from https://poppler.freedesktop.org/' )
Wrapper around Poppler pdftotext. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- out : str returns extracted text from pdf Raises ------ EnvironmentError: If pdftotext library is not found
def _link_replace(self, match, **context): url = match.group(0) if self.linker: if self.linker_takes_context: return self.linker(url, context) else: return self.linker(url) else: href = url if '://' not in href: href = 'http://' + href return self.url_template.format(href=href.replace('"', '%22'), text=url)
Callback for re.sub to replace link text with markup. Turns out using a callback function is actually faster than using backrefs, plus this lets us provide a hook for user customization. linker_takes_context=True means that the linker gets passed context like a standard format function.
def transform_annotation(self, ann, duration): intervals = np.asarray([[0, 1]]) values = list([obs.value for obs in ann]) intervals = np.tile(intervals, [len(values), 1]) tags = [v for v in values if v in self._classes] if len(tags): target = self.encoder.transform([tags]).astype(np.bool).max(axis=0) else: target = np.zeros(len(self._classes), dtype=np.bool) return {'tags': target}
Transform an annotation to static label encoding. Parameters ---------- ann : jams.Annotation The annotation to convert duration : number > 0 The duration of the track Returns ------- data : dict data['tags'] : np.ndarray, shape=(n_labels,) A static binary encoding of the labels
def start(self, execution_history, backward_execution=False, generate_run_id=True): self.execution_history = execution_history if generate_run_id: self._run_id = run_id_generator() self.backward_execution = copy.copy(backward_execution) self.thread = threading.Thread(target=self.run) self.thread.start()
Starts the execution of the state in a new thread. :return:
def vote_choice_address(self) -> List[str]: if self.vote_id is None: raise Exception("vote_id is required") addresses = [] vote_init_txid = unhexlify(self.vote_id) for choice in self.choices: vote_cast_privkey = sha256(vote_init_txid + bytes( list(self.choices).index(choice)) ).hexdigest() addresses.append(Kutil(network=self.deck.network, privkey=bytearray.fromhex(vote_cast_privkey)).address) return addresses
calculate the addresses on which the vote is casted.
def requires_public_key(func): def func_wrapper(self, *args, **kwargs): if hasattr(self, "public_key"): func(self, *args, **kwargs) else: self.generate_public_key() func(self, *args, **kwargs) return func_wrapper
Decorator for functions that require the public key to be defined. By definition, this includes the private key, as such, it's enough to use this to effect definition of both public and private key.
def calculate_error(self): self.numvars.error = 0. fluxes = self.sequences.fluxes for flux in fluxes.numerics: results = getattr(fluxes.fastaccess, '_%s_results' % flux.name) diff = (results[self.numvars.idx_method] - results[self.numvars.idx_method-1]) self.numvars.error = max(self.numvars.error, numpy.max(numpy.abs(diff)))
Estimate the numerical error based on the fluxes calculated by the current and the last method. >>> from hydpy.models.test_v1 import * >>> parameterstep() >>> model.numvars.idx_method = 2 >>> results = numpy.asarray(fluxes.fastaccess._q_results) >>> results[:4] = 0., 3., 4., 0. >>> model.calculate_error() >>> from hydpy import round_ >>> round_(model.numvars.error) 1.0
def mlt2mlon(self, mlt, datetime, ssheight=50*6371): ssglat, ssglon = helpers.subsol(datetime) ssalat, ssalon = self.geo2apex(ssglat, ssglon, ssheight) return (15*np.float64(mlt) - 180 + ssalon + 360) % 360
Computes the magnetic longitude at the specified magnetic local time and UT. Parameters ========== mlt : array_like Magnetic local time datetime : :class:`datetime.datetime` Date and time ssheight : float, optional Altitude in km to use for converting the subsolar point from geographic to magnetic coordinates. A high altitude is used to ensure the subsolar point is mapped to high latitudes, which prevents the South-Atlantic Anomaly (SAA) from influencing the MLT. Returns ======= mlon : ndarray or float Magnetic longitude [0, 360) (apex and quasi-dipole longitude are always equal) Notes ===== To compute the magnetic longitude, we find the apex longitude of the subsolar point at the given time. Then the magnetic longitude of the given point will be computed from the separation in magnetic local time from this point (1 hour = 15 degrees).
def flatten_dir_tree(self, tree): result = {} def helper(tree, leading_path = ''): dirs = tree['dirs']; files = tree['files'] for name, file_info in files.iteritems(): file_info['path'] = leading_path + '/' + name result[file_info['path']] = file_info for name, contents in dirs.iteritems(): helper(contents, leading_path +'/'+ name) helper(tree); return result
Convert a file tree back into a flat dict
def load_weights(self): if self.network_weights_loader: self.network_weights = self.network_weights_loader() self.network_weights_loader = None
Load weights by evaluating self.network_weights_loader, if needed. After calling this, self.network_weights_loader will be None and self.network_weights will be the weights list, if available.
def get_serializer(self, node): return self.options['serializers'].get(type(node), None) if type(node) in self.options['serializers']: return self.options['serializers'][type(node)] return None
Returns serializer for specific element. :Args: - node (:class:`ooxml.doc.Element`): Element object :Returns: Returns reference to a function which will be used for serialization.
def make_initial_frame_chooser( real_env, frame_stack_size, simulation_random_starts, simulation_flip_first_random_for_beginning, split=tf.estimator.ModeKeys.TRAIN, ): initial_frame_rollouts = real_env.current_epoch_rollouts( split=split, minimal_rollout_frames=frame_stack_size, ) def initial_frame_chooser(batch_size): deterministic_initial_frames =\ initial_frame_rollouts[0][:frame_stack_size] if not simulation_random_starts: initial_frames = [deterministic_initial_frames] * batch_size else: initial_frames = random_rollout_subsequences( initial_frame_rollouts, batch_size, frame_stack_size ) if simulation_flip_first_random_for_beginning: initial_frames[0] = deterministic_initial_frames return np.stack([ [frame.observation.decode() for frame in initial_frame_stack] for initial_frame_stack in initial_frames ]) return initial_frame_chooser
Make frame chooser. Args: real_env: T2TEnv to take initial frames from. frame_stack_size (int): Number of consecutive frames to extract. simulation_random_starts (bool): Whether to choose frames at random. simulation_flip_first_random_for_beginning (bool): Whether to flip the first frame stack in every batch for the frames at the beginning. split (tf.estimator.ModeKeys or None): Data split to take the frames from, None means use all frames. Returns: Function batch_size -> initial_frames.
def _read_from_sql(self, request, db_name): with contextlib.closing(sqlite3.connect("{}.db".format(db_name))) as con: return sql.read_sql(sql=request, con=con)
Using the contextlib, I hope to close the connection to database when not in use
def protect(self, password=None, read_protect=False, protect_from=0): return super(FelicaLite, self).protect( password, read_protect, protect_from)
Protect a FeliCa Lite Tag. A FeliCa Lite Tag can be provisioned with a custom password (or the default manufacturer key if the password is an empty string or bytearray) to ensure that data retrieved by future read operations, after authentication, is genuine. Read protection is not supported. A non-empty *password* must provide at least 128 bit key material, in other words it must be a string or bytearray of length 16 or more. The memory unit for the value of *protect_from* is 16 byte, thus with ``protect_from=2`` bytes 0 to 31 are not protected. If *protect_from* is zero (the default value) and the Tag has valid NDEF management data, the NDEF RW Flag is set to read only.
def ParseApplicationUsageRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) application_name = self._GetRowValue(query_hash, row, 'event') usage = 'Application {0:s}'.format(application_name) event_data = MacOSApplicationUsageEventData() event_data.application = self._GetRowValue(query_hash, row, 'app_path') event_data.app_version = self._GetRowValue(query_hash, row, 'app_version') event_data.bundle_id = self._GetRowValue(query_hash, row, 'bundle_id') event_data.count = self._GetRowValue(query_hash, row, 'number_times') event_data.query = query timestamp = self._GetRowValue(query_hash, row, 'last_time') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, usage) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an application usage row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
def get_max_dim(self, obj): try: iter(obj) except TypeError: return 0 try: for o in obj: iter(o) break except TypeError: return 1 return 2
Returns maximum dimensionality over which obj is iterable <= 2
def from_isodatetime(value, strict=False): if value or strict: return arrow.get(value).datetime
Convert an ISO formatted datetime into a Date object. :param value: The ISO formatted datetime. :param strict: If value is ``None``, then if strict is ``True`` it returns the Date object of today, otherwise it returns ``None``. (Default: ``False``) :returns: The Date object or ``None``.
def from_gene_ids_and_names(cls, gene_names: Dict[str, str]): genes = [ExpGene(id_, name=name) for id_, name in gene_names.items()] return cls.from_genes(genes)
Initialize instance from gene IDs and names.
def put(self, endpoint, **kwargs): kwargs.update(self.kwargs.copy()) if "data" in kwargs: kwargs["headers"].update( {"Content-Type": "application/json;charset=UTF-8"}) response = requests.put(self.make_url(endpoint), **kwargs) return _decode_response(response)
Send HTTP PUT to the endpoint. :arg str endpoint: The endpoint to send to. :returns: JSON decoded result. :raises: requests.RequestException on timeout or connection error.
def parse_task_rawcommand(self, rawcommand_subAST): command_array = [] for code_snippet in rawcommand_subAST.attributes["parts"]: if isinstance(code_snippet, wdl_parser.Terminal): command_var = "r" if isinstance(code_snippet, wdl_parser.Ast): if code_snippet.name == 'CommandParameter': code_expr = self.parse_declaration_expressn(code_snippet.attr('expr'), es='') code_attributes = self.parse_task_rawcommand_attributes(code_snippet.attr('attributes')) command_var = self.modify_cmd_expr_w_attributes(code_expr, code_attributes) if isinstance(code_snippet, wdl_parser.AstList): raise NotImplementedError command_array.append(command_var) return command_array
Parses the rawcommand section of the WDL task AST subtree. Task "rawcommands" are divided into many parts. There are 2 types of parts: normal strings, & variables that can serve as changeable inputs. The following example command: 'echo ${variable1} ${variable2} > output_file.txt' Has 5 parts: Normal String: 'echo ' Variable Input: variable1 Normal String: ' ' Variable Input: variable2 Normal String: ' > output_file.txt' Variables can also have additional conditions, like 'sep', which is like the python ''.join() function and in WDL looks like: ${sep=" -V " GVCFs} and would be translated as: ' -V '.join(GVCFs). :param rawcommand_subAST: A subAST representing some bash command. :return: A list=[] of tuples=() representing the parts of the command: e.g. [(command_var, command_type, additional_conditions_list), ...] Where: command_var = 'GVCFs' command_type = 'variable' command_actions = {'sep': ' -V '}
def process_message_notification(request, messages_path): if not messages_path: return global _MESSAGES_CACHE global _MESSAGES_MTIME if (_MESSAGES_CACHE is None or _MESSAGES_MTIME != os.path.getmtime(messages_path)): _MESSAGES_CACHE = _get_processed_messages(messages_path) _MESSAGES_MTIME = os.path.getmtime(messages_path) for msg in _MESSAGES_CACHE: msg.send_message(request)
Process all the msg file found in the message directory
def getHeader(self): return {"technician": self.getTechnician(), "recording_additional": self.getRecordingAdditional(), "patientname": self.getPatientName(), "patient_additional": self.getPatientAdditional(), "patientcode": self.getPatientCode(), "equipment": self.getEquipment(), "admincode": self.getAdmincode(), "gender": self.getGender(), "startdate": self.getStartdatetime(), "birthdate": self.getBirthdate()}
Returns the file header as dict Parameters ---------- None
def dot(self, w): return sum([x * y for x, y in zip(self, w)])
Return the dotproduct between self and another vector.
def open_handle(self, dwDesiredAccess = win32.THREAD_ALL_ACCESS): hThread = win32.OpenThread(dwDesiredAccess, win32.FALSE, self.dwThreadId) if not hasattr(self.hThread, '__del__'): self.close_handle() self.hThread = hThread
Opens a new handle to the thread, closing the previous one. The new handle is stored in the L{hThread} property. @warn: Normally you should call L{get_handle} instead, since it's much "smarter" and tries to reuse handles and merge access rights. @type dwDesiredAccess: int @param dwDesiredAccess: Desired access rights. Defaults to L{win32.THREAD_ALL_ACCESS}. See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx} @raise WindowsError: It's not possible to open a handle to the thread with the requested access rights. This tipically happens because the target thread belongs to system process and the debugger is not runnning with administrative rights.
def read_10x_h5(filename, genome=None, gex_only=True) -> AnnData: logg.info('reading', filename, r=True, end=' ') with tables.open_file(str(filename), 'r') as f: v3 = '/matrix' in f if v3: adata = _read_v3_10x_h5(filename) if genome: if genome not in adata.var['genome'].values: raise ValueError( "Could not find data corresponding to genome '{genome}' in '{filename}'. " "Available genomes are: {avail}." .format( genome=genome, filename=filename, avail=list(adata.var["genome"].unique()), ) ) adata = adata[:, list(map(lambda x: x == str(genome), adata.var['genome']))] if gex_only: adata = adata[:, list(map(lambda x: x == 'Gene Expression', adata.var['feature_types']))] return adata else: return _read_legacy_10x_h5(filename, genome=genome)
Read 10x-Genomics-formatted hdf5 file. Parameters ---------- filename : `str` | :class:`~pathlib.Path` Filename. genome : `str`, optional (default: ``None``) Filter expression to this genes within this genome. For legacy 10x h5 files, this must be provided if the data contains more than one genome. gex_only : `bool`, optional (default: `True`) Only keep 'Gene Expression' data and ignore other feature types, e.g. 'Antibody Capture', 'CRISPR Guide Capture', or 'Custom' Returns ------- Annotated data matrix, where obsevations/cells are named by their barcode and variables/genes by gene name. The data matrix is stored in `adata.X`, cell names in `adata.obs_names` and gene names in `adata.var_names`. The gene IDs are stored in `adata.var['gene_ids']`. The feature types are stored in `adata.var['feature_types']`
def getsubtables(self): keyset = self.getkeywords() names = [] for key, value in keyset.items(): if isinstance(value, str) and value.find('Table: ') == 0: names.append(_do_remove_prefix(value)) return names
Get the names of all subtables.
def tabSeparatedSummary(self, sortOn=None): result = [] for titleSummary in self.summary(sortOn): result.append('\t'.join([ '%(coverage)f', '%(medianScore)f', '%(bestScore)f', '%(readCount)d', '%(hspCount)d', '%(subjectLength)d', '%(subjectTitle)s', ]) % titleSummary) return '\n'.join(result)
Summarize all the alignments for this title as multi-line string with TAB-separated values on each line. @param sortOn: A C{str} attribute to sort titles on. One of 'length', 'maxScore', 'medianScore', 'readCount', or 'title'. @raise ValueError: If an unknown C{sortOn} value is given. @return: A newline-separated C{str}, each line with a summary of a title. Each summary line is TAB-separated.
def get_object(self, request, object_id, *args, **kwargs): obj = super(TranslatableAdmin, self).get_object(request, object_id, *args, **kwargs) if obj is not None and self._has_translatable_model(): obj.set_current_language(self._language(request, obj), initialize=True) return obj
Make sure the object is fetched in the correct language.
def _ParseLastRunTime(self, parser_mediator, fixed_length_section): systemtime_struct = fixed_length_section.last_run_time system_time_tuple = ( systemtime_struct.year, systemtime_struct.month, systemtime_struct.weekday, systemtime_struct.day_of_month, systemtime_struct.hours, systemtime_struct.minutes, systemtime_struct.seconds, systemtime_struct.milliseconds) date_time = None if system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE: try: date_time = dfdatetime_systemtime.Systemtime( system_time_tuple=system_time_tuple) except ValueError: parser_mediator.ProduceExtractionWarning( 'invalid last run time: {0!s}'.format(system_time_tuple)) return date_time
Parses the last run time from a fixed-length data section. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. fixed_length_section (job_fixed_length_data_section): a Windows Scheduled Task job fixed-length data section. Returns: dfdatetime.DateTimeValues: last run date and time or None if not available.
def default(self, node): if hasattr(node, 'linestart'): if node.linestart: self.source_linemap[self.current_line_number] = node.linestart return super(LineMapWalker, self).default(node)
Augment write default routine to record line number changes
def get_details(cls, node, as_model=False): rest_job = RestJob( process_name=node.process_name, timeperiod=node.timeperiod, time_qualifier=node.time_qualifier, number_of_children=len(node.children), number_of_failures='NA' if not node.job_record else node.job_record.number_of_failures, state='NA' if not node.job_record else node.job_record.state, event_log=[] if not node.job_record else node.job_record.event_log) if as_model: return rest_job else: return rest_job.document
method returns either RestJob instance or corresponding document, depending on the as_model argument
def AddTableColumn(self, table, column): if column not in self._table_columns[table]: self._table_columns[table].append(column)
Add column to table if it is not already there.
def update(self): con = self.subpars.pars.control self(con.eqd1*con.tind)
Update |KD1| based on |EQD1| and |TInd|. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> eqd1(0.5) >>> tind.value = 10.0 >>> derived.kd1.update() >>> derived.kd1 kd1(5.0)
def FetchCompletedRequests(self, session_id, timestamp=None): if timestamp is None: timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now()) for request, status in self.data_store.ReadCompletedRequests( session_id, timestamp=timestamp, limit=self.request_limit): yield request, status
Fetch all the requests with a status message queued for them.
def split(string, callback=None, sep=None): if callback is not None: return [callback(i) for i in string.split(sep)] else: return string.split(sep)
Split the string and execute the callback function on each part. >>> string = "1 2 3 4" >>> parts = split(string, int) >>> parts [1, 2, 3, 4]
def import_submodules(package, recursive=True): if isinstance(package, str): package = importlib.import_module(package) results = {} for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): full_name = package.__name__ + "." + name results[name] = importlib.import_module(full_name) if recursive and is_pkg: results.update(import_submodules(full_name)) return results
Import all submodules of a module, recursively, including subpackages :param package: package (name or actual module) :type package: str | module :rtype: dict[str, types.ModuleType]
def pad(self, pad_length): self.pianoroll = np.pad( self.pianoroll, ((0, pad_length), (0, 0)), 'constant')
Pad the pianoroll with zeros at the end along the time axis. Parameters ---------- pad_length : int The length to pad with zeros along the time axis.
def delete_set(self, x): if x not in self._parents: return members = list(self.members(x)) for v in members: del self._parents[v] del self._weights[v] del self._prev_next[v] del self._min_values[v]
Removes the equivalence class containing `x`.
def copy_r(src, dst): abssrc = os.path.abspath(src) absdst = os.path.abspath(dst) try: os.makedirs(absdst) except OSError: pass for f in os.listdir(abssrc): fpath = os.path.join(abssrc, f) if os.path.isfile(fpath): shutil.copy(fpath, absdst) elif not absdst.startswith(fpath): copy_r(fpath, os.path.join(absdst, f)) else: warnings.warn("Cannot copy %s to itself" % fpath)
Implements a recursive copy function similar to Unix's "cp -r" command. Surprisingly, python does not have a real equivalent. shutil.copytree only works if the destination directory is not present. Args: src (str): Source folder to copy. dst (str): Destination folder.
def rdf(self, rdf: Optional[Union[str, Graph]]) -> None: if isinstance(rdf, Graph): self.g = rdf else: self.g = Graph() if isinstance(rdf, str): if '\n' in rdf or '\r' in rdf: self.g.parse(data=rdf, format=self.rdf_format) elif ':' in rdf: self.g.parse(location=rdf, format=self.rdf_format) else: self.g.parse(source=rdf, format=self.rdf_format)
Set the RDF DataSet to be evaulated. If ``rdf`` is a string, the presence of a return is the indicator that it is text instead of a location. :param rdf: File name, URL, representation of rdflib Graph
def serve_protected_thumbnail(request, path): source_path = thumbnail_to_original_filename(path) if not source_path: raise Http404('File not found') try: file_obj = File.objects.get(file=source_path) except File.DoesNotExist: raise Http404('File not found %s' % path) if not file_obj.has_read_permission(request): if settings.DEBUG: raise PermissionDenied else: raise Http404('File not found %s' % path) try: thumbnail = ThumbnailFile(name=path, storage=file_obj.file.thumbnail_storage) return thumbnail_server.serve(request, thumbnail, save_as=False) except Exception: raise Http404('File not found %s' % path)
Serve protected thumbnails to authenticated users. If the user doesn't have read permissions, redirect to a static image.
def _compute_global_interested_rts(self): interested_rts = set() for rtfilter in self._peer_to_rtfilter_map.values(): interested_rts.update(rtfilter) interested_rts.update(self._vrfs_conf.vrf_interested_rts) interested_rts.add(RouteTargetMembershipNLRI.DEFAULT_RT) interested_rts.remove(RouteTargetMembershipNLRI.DEFAULT_RT) return interested_rts
Computes current global interested RTs for global tables. Computes interested RTs based on current RT filters for peers. This filter should be used to check if for RTs on a path that is installed in any global table (expect RT Table).