code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def langids(self): if self._langids is None: try: self._langids = util.get_langids(self) except USBError: self._langids = () return self._langids
Return the USB device's supported language ID codes. These are 16-bit codes familiar to Windows developers, where for example instead of en-US you say 0x0409. USB_LANGIDS.pdf on the usb.org developer site for more info. String requests using a LANGID not in this array should not be sent to the device. This property will cause some USB traffic the first time it is accessed and cache the resulting value for future use.
def go_env(self, gopath=None): no_gopath = os.devnull return OrderedDict(GOROOT=self.goroot, GOPATH=gopath or no_gopath)
Return an env dict that represents a proper Go environment mapping for this distribution.
def volume_delete(name, profile=None, **kwargs): conn = _auth(profile, **kwargs) return conn.volume_delete(name)
Destroy the volume name Name of the volume profile Profile to build on CLI Example: .. code-block:: bash salt '*' nova.volume_delete myblock profile=openstack
def recv(self, timeout=None): try: return self.rx_queue.get(timeout is None or timeout > 0, timeout) except queue.Empty: return None
Receive an ISOTP frame, blocking if none is available in the buffer for at most 'timeout' seconds.
def mask_umi(umi, umi_quals, quality_encoding, quality_filter_threshold): below_threshold = get_below_threshold( umi_quals, quality_encoding, quality_filter_threshold) new_umi = "" for base, test in zip(umi, below_threshold): if test: new_umi += "N" else: new_umi += base return new_umi
Mask all positions where quals < threshold with "N"
def resource( self, token: dict = None, id_resource: str = None, subresource=None, include: list = [], prot: str = "https", ) -> dict: if isinstance(subresource, str): subresource = "/{}".format(checker._check_subresource(subresource)) else: subresource = "" include = checker._check_filter_includes(include) payload = {"id": id_resource, "_include": include} md_url = "{}://v1.{}.isogeo.com/resources/{}{}".format( prot, self.api_url, id_resource, subresource ) resource_req = self.get( md_url, headers=self.header, params=payload, proxies=self.proxies, verify=self.ssl, ) checker.check_api_response(resource_req) return resource_req.json()
Get complete or partial metadata about one specific resource. :param str token: API auth token :param str id_resource: metadata UUID to get :param list include: subresources that should be included. Must be a list of strings. Available values: 'isogeo.SUBRESOURCES' :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs).
def request_withdrawal(self, amount: Number, address: str, subtract_fee: bool=False, **params) -> Withdrawal: self.log.debug(f'Requesting {self.currency} withdrawal from {self.name} to {address}') amount = self._parse_money(amount) if self.dry_run: withdrawal = Withdrawal.create_default(TxType.WITHDRAWAL, self.currency, amount, address) self.log.warning(f'DRY RUN: Withdrawal requested on {self.name}: {withdrawal}') return withdrawal try: withdrawal = self._withdraw(amount, address, subtract_fee, **params) except Exception as e: msg = f'Failed requesting withdrawal on {self.name}!: amount={amount}, address={address}' raise self.exception(InvalidWithdrawal, msg, e) from e self.log.info(f'Withdrawal requested on {self.name}: {withdrawal}') return withdrawal
Request a withdrawal.
def restore_defaults(self): for key in self.default_values: self.restore_default(key) for section in self.sections: self[section].restore_defaults()
Recursively restore default values to all members that have them. This method will only work for a ConfigObj that was created with a configspec and has been validated. It doesn't delete or modify entries without default values.
def label_total_duration(self): durations = collections.defaultdict(float) for label in self: durations[label.value] += label.duration return durations
Return for each distinct label value the total duration of all occurrences. Returns: dict: A dictionary containing for every label-value (key) the total duration in seconds (value). Example: >>> ll = LabelList(labels=[ >>> Label('a', 3, 5), >>> Label('b', 5, 8), >>> Label('a', 8, 10), >>> Label('b', 10, 14), >>> Label('a', 15, 18.5) >>> ]) >>> ll.label_total_duration() {'a': 7.5 'b': 7.0}
def xyzlabel(labelx, labely, labelz): xlabel(labelx) ylabel(labely) zlabel(labelz)
Set all labels at once.
def _parseTagName(self): for el in self._element.split(): el = el.replace("/", "").replace("<", "").replace(">", "") if el.strip(): self._tagname = el.rstrip() return
Parse name of the tag. Result is saved to the :attr:`_tagname` property.
async def release_cursor(self, cursor, in_transaction=False): conn = cursor.connection await cursor.close() if not in_transaction: self.release(conn)
Release cursor coroutine. Unless in transaction, the connection is also released back to the pool.
def _slugify_title(self): self.slug = slugify(self.title) while len(self.slug) > 255: self.slug = '-'.join(self.slug.split('-')[:-1]) if Entry.objects.filter(slug=self.slug).exclude(id=self.id).exists(): self.slug = self._insert_timestamp(self.slug)
Slugify the Entry title, but ensure it's less than the maximum number of characters. This method also ensures that a slug is unique by appending a timestamp to any duplicate slugs.
def parse_request(self, request, parameters=None, fake_method=None): return (request.method, request.url, request.headers, request.form.copy())
Parse Flask request
def before_update(mapper, conn, target): target.name = Table.mangle_name(target.name) if isinstance(target, Column): raise TypeError('Got a column instead of a table') target.update_id(target.sequence_id, False)
Set the Table ID based on the dataset number and the sequence number for the table.
def _blocked_connection(self, frame_in): self.is_blocked = True LOGGER.warning( 'Connection is blocked by remote server: %s', try_utf8_decode(frame_in.reason) )
Connection is Blocked. :param frame_in: :return:
def tag_and_push(context): tag_option = '--annotate' if probe.has_signing_key(context): tag_option = '--sign' shell.dry_run( TAG_TEMPLATE % (tag_option, context.new_version, context.new_version), context.dry_run, ) shell.dry_run('git push --tags', context.dry_run)
Tags your git repo with the new version number
def pic_inflow_v2(self): flu = self.sequences.fluxes.fastaccess inl = self.sequences.inlets.fastaccess flu.inflow = inl.q[0]+inl.s[0]+inl.r[0]
Update the inlet link sequences. Required inlet sequences: |dam_inlets.Q| |dam_inlets.S| |dam_inlets.R| Calculated flux sequence: |Inflow| Basic equation: :math:`Inflow = Q + S + R`
def set_permissions(self, object, replace=False): if isinstance(self.config.origin, S3Origin): if self.config.origin.origin_access_identity: id = self.config.origin.origin_access_identity.split('/')[-1] oai = self.connection.get_origin_access_identity_info(id) policy = object.get_acl() if replace: policy.acl = ACL() policy.acl.add_user_grant('READ', oai.s3_user_id) object.set_acl(policy) else: object.set_canned_acl('public-read')
Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity.
def get_category_by_id(self, cid): self._cache_init() return self._cache_get_entry(self.CACHE_NAME_IDS, cid)
Returns Category object by its id. :param str cid: :rtype: Category :return: category object
def is_primary_vehicle(self, msg): sysid = msg.get_srcSystem() if self.target_system == 0 or self.target_system == sysid: return True return False
see if a msg is from our primary vehicle
def watched_file_handler(name, logname, filename, mode='a', encoding=None, delay=False): return wrap_log_handler(logging.handlers.WatchedFileHandler( filename, mode=mode, encoding=encoding, delay=delay))
A Bark logging handler logging output to a named file. If the file has changed since the last log message was written, it will be closed and reopened. Similar to logging.handlers.WatchedFileHandler.
def read(self, size=None): if size is None: return self.buf.read() + self.open_file.read() contents = self.buf.read(size) if len(contents) < size: contents += self.open_file.read(size - len(contents)) return contents
Read `size` of bytes.
def parse_file(self, f): if type(f) is str: self.f = open(f, 'rb') else: self.f = f self._parse_header(self.f.read(64))
Parse an ELF file and fill the class' properties. Arguments: f(file or str): The (path to) the ELF file to read.
def get_parameters_by_location(self, locations=None, excludes=None): result = self.parameters if locations: result = filter(lambda x: x.location_in in locations, result) if excludes: result = filter(lambda x: x.location_in not in excludes, result) return list(result)
Get parameters list by location :param locations: list of locations :type locations: list or None :param excludes: list of excludes locations :type excludes: list or None :return: list of Parameter :rtype: list
def eval_function(value): name, args = value[0], value[1:] if name == "NOW": return datetime.utcnow().replace(tzinfo=tzutc()) elif name in ["TIMESTAMP", "TS"]: return parse(unwrap(args[0])).replace(tzinfo=tzlocal()) elif name in ["UTCTIMESTAMP", "UTCTS"]: return parse(unwrap(args[0])).replace(tzinfo=tzutc()) elif name == "MS": return 1000 * resolve(args[0]) else: raise SyntaxError("Unrecognized function %r" % name)
Evaluate a timestamp function
def num_rewards(self): if not self.is_reward_range_finite: tf.logging.error("Infinite reward range, `num_rewards returning None`") return None if not self.is_processed_rewards_discrete: tf.logging.error( "Processed rewards are not discrete, `num_rewards` returning None") return None min_reward, max_reward = self.reward_range return max_reward - min_reward + 1
Returns the number of distinct rewards. Returns: Returns None if the reward range is infinite or the processed rewards aren't discrete, otherwise returns the number of distinct rewards.
def libvlc_video_set_deinterlace(p_mi, psz_mode): f = _Cfunctions.get('libvlc_video_set_deinterlace', None) or \ _Cfunction('libvlc_video_set_deinterlace', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p) return f(p_mi, psz_mode)
Enable or disable deinterlace filter. @param p_mi: libvlc media player. @param psz_mode: type of deinterlace filter, NULL to disable.
def exclude(self, minimum_address, maximum_address): if maximum_address < minimum_address: raise Error('bad address range') minimum_address *= self.word_size_bytes maximum_address *= self.word_size_bytes self._segments.remove(minimum_address, maximum_address)
Exclude given range and keep the rest. `minimum_address` is the first word address to exclude (including). `maximum_address` is the last word address to exclude (excluding).
def index(value, array): i = array.searchsorted(value) if i == len(array): return -1 else: return i
Array search that behaves like I want it to. Totally dumb, I know.
def encrypt(key_id, plaintext, encryption_context=None, grant_tokens=None, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: ciphertext = conn.encrypt( key_id, plaintext, encryption_context=encryption_context, grant_tokens=grant_tokens ) r['ciphertext'] = ciphertext['CiphertextBlob'] except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
Encrypt plaintext into cipher text using specified key. CLI example:: salt myminion boto_kms.encrypt 'alias/mykey' 'myplaindata' '{"aws:username":"myuser"}'
def rm_prefix(name): if name.startswith('nova_'): return name[5:] elif name.startswith('novaclient_'): return name[11:] elif name.startswith('os_'): return name[3:] else: return name
Removes nova_ os_ novaclient_ prefix from string.
def _support_op(*args): def inner(func): for one_arg in args: _op_mapping_[one_arg] = func return func return inner
Internal decorator to define an criteria compare operations.
def last_modified(self): last_modified = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time())) return last_modified
Get the HTTP-datetime of when the collection was modified.
def _cwlvar_to_wdl(var): if isinstance(var, (list, tuple)): return [_cwlvar_to_wdl(x) for x in var] elif isinstance(var, dict): assert var.get("class") == "File", var return var.get("path") or var["value"] else: return var
Convert a CWL output object into a WDL output. This flattens files and other special CWL outputs that are plain strings in WDL.
def reloadFileOfCurrentItem(self, rtiRegItem=None): logger.debug("reloadFileOfCurrentItem, rtiClass={}".format(rtiRegItem)) currentIndex = self.getRowCurrentIndex() if not currentIndex.isValid(): return currentItem, _ = self.getCurrentItem() oldPath = currentItem.nodePath fileRtiIndex = self.model().findFileRtiIndex(currentIndex) isExpanded = self.isExpanded(fileRtiIndex) if rtiRegItem is None: rtiClass = None else: rtiRegItem.tryImportClass() rtiClass = rtiRegItem.cls newRtiIndex = self.model().reloadFileAtIndex(fileRtiIndex, rtiClass=rtiClass) try: _lastItem, lastIndex = self.expandPath(oldPath) self.setCurrentIndex(lastIndex) return lastIndex except Exception as ex: logger.warning("Unable to select {!r} beause of: {}".format(oldPath, ex)) self.setExpanded(newRtiIndex, isExpanded) self.setCurrentIndex(newRtiIndex) return newRtiIndex
Finds the repo tree item that holds the file of the current item and reloads it. Reloading is done by removing the repo tree item and inserting a new one. The new item will have by of type rtiRegItem.cls. If rtiRegItem is None (the default), the new rtiClass will be the same as the old one. The rtiRegItem.cls will be imported. If this fails the old class will be used, and a warning will be logged.
def show_partitioning(rdd, show=True): if show: partitionCount = rdd.getNumPartitions() try: valueCount = rdd.countApprox(1000, confidence=0.50) except: valueCount = -1 try: name = rdd.name() or None except: pass name = name or "anonymous" logging.info("For RDD %s, there are %d partitions with on average %s values" % (name, partitionCount, int(valueCount/float(partitionCount))))
Seems to be significantly more expensive on cluster than locally
def on_show_mainframe(self, event): self.parent.Enable() self.parent.Show() self.parent.Raise()
Show mainframe window
def patch_sys(self, inherit_path): def patch_dict(old_value, new_value): old_value.clear() old_value.update(new_value) def patch_all(path, path_importer_cache, modules): sys.path[:] = path patch_dict(sys.path_importer_cache, path_importer_cache) patch_dict(sys.modules, modules) new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path) new_sys_path.extend(merge_split(self._pex_info.pex_path, self._vars.PEX_PATH)) patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)
Patch sys with all site scrubbed.
def draw_polygon( self, *pts, close_path:bool=True, stroke:Color=None, stroke_width:float=1, stroke_dash:typing.Sequence=None, fill:Color=None ) -> None: pass
Draws the given linear path.
def delete_security_group_rule(self, sec_grp_rule_id): ret = self.network_conn.delete_security_group_rule( security_group_rule=sec_grp_rule_id) return ret if ret else True
Deletes the specified security group rule
def list_submissions(): submissions = [] try: submissions = session.query(Submission).all() except SQLAlchemyError as e: session.rollback() return render_template('list_submissions.html', submissions=submissions)
List the past submissions with information about them
def bytes(num, check_result=False): if num <= 0: raise ValueError("'num' should be > 0") buf = create_string_buffer(num) result = libcrypto.RAND_bytes(buf, num) if check_result and result == 0: raise RandError("Random Number Generator not seeded sufficiently") return buf.raw[:num]
Returns num bytes of cryptographically strong pseudo-random bytes. If checkc_result is True, raises error if PRNG is not seeded enough
def export_chat_invite_link( self, chat_id: Union[int, str] ) -> str: peer = self.resolve_peer(chat_id) if isinstance(peer, types.InputPeerChat): return self.send( functions.messages.ExportChatInvite( peer=peer.chat_id ) ).link elif isinstance(peer, types.InputPeerChannel): return self.send( functions.channels.ExportInvite( channel=peer ) ).link
Use this method to generate a new invite link for a chat; any previously generated link is revoked. You must be an administrator in the chat for this to work and have the appropriate admin rights. Args: chat_id (``int`` | ``str``): Unique identifier for the target chat or username of the target channel/supergroup (in the format @username). Returns: On success, the exported invite link as string is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def to_dict(self): def is_non_native_sc(ty, encoded): return (ty == 'StringCounter' and not is_native_string_counter(encoded)) fc = {} native = ('StringCounter', 'Unicode') for name, feat in self._features.iteritems(): if name.startswith(self.EPHEMERAL_PREFIX): continue if not isinstance(name, unicode): name = name.decode('utf-8') tyname = registry.feature_type_name(name, feat) encoded = registry.get(tyname).dumps(feat) if tyname not in native or is_non_native_sc(tyname, encoded): encoded = cbor.Tag(cbor_names_to_tags[tyname], encoded) fc[name] = encoded return fc
Dump a feature collection's features to a dictionary. This does not include additional data, such as whether or not the collection is read-only. The returned dictionary is suitable for serialization into JSON, CBOR, or similar data formats.
def slugify(s, delimiter='-'): s = unicodedata.normalize('NFKD', to_unicode(s)).encode('ascii', 'ignore').decode('ascii') return RE_SLUG.sub(delimiter, s).strip(delimiter).lower()
Normalize `s` into ASCII and replace non-word characters with `delimiter`.
def pack(self, value, nocheck=False, major=DEFAULT_KATCP_MAJOR): if value is None: value = self.get_default() if value is None: raise ValueError("Cannot pack a None value.") if not nocheck: self.check(value, major) return self.encode(value, major)
Return the value formatted as a KATCP parameter. Parameters ---------- value : object The value to pack. nocheck : bool, optional Whether to check that the value is valid before packing it. major : int, optional Major version of KATCP to use when interpreting types. Defaults to latest implemented KATCP version. Returns ------- packed_value : str The unescaped KATCP string representing the value.
def AddValue(self, name, number, aliases=None, description=None): if name in self.values_per_name: raise KeyError('Value with name: {0:s} already exists.'.format(name)) if number in self.values_per_number: raise KeyError('Value with number: {0!s} already exists.'.format(number)) for alias in aliases or []: if alias in self.values_per_alias: raise KeyError('Value with alias: {0:s} already exists.'.format(alias)) enumeration_value = EnumerationValue( name, number, aliases=aliases, description=description) self.values.append(enumeration_value) self.values_per_name[name] = enumeration_value self.values_per_number[number] = enumeration_value for alias in aliases or []: self.values_per_alias[alias] = enumeration_value
Adds an enumeration value. Args: name (str): name. number (int): number. aliases (Optional[list[str]]): aliases. description (Optional[str]): description. Raises: KeyError: if the enumeration value already exists.
def flatten(self): ls = [self.output] ls.extend(self.state) return ls
Create a flattened version by putting output first and then states.
def genealogic_types(self): types = [] parent = self while parent: types.append(parent.rest_name) parent = parent.parent_object return types
Get genealogic types Returns: Returns a list of all parent types
def _check_message_valid(msg): try: if int(msg[:2], 16) != (len(msg) - 2): raise ValueError("Elk message length incorrect") _check_checksum(msg) except IndexError: raise ValueError("Elk message length incorrect")
Check packet length valid and that checksum is good.
def from_json(cls, data, result=None): if data.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=data.get("type") ) ref = data.get("ref") role = data.get("role") attributes = {} ignore = ["geometry", "type", "ref", "role"] for n, v in data.items(): if n in ignore: continue attributes[n] = v geometry = data.get("geometry") if isinstance(geometry, list): geometry_orig = geometry geometry = [] for v in geometry_orig: geometry.append( RelationWayGeometryValue( lat=v.get("lat"), lon=v.get("lon") ) ) else: geometry = None return cls( attributes=attributes, geometry=geometry, ref=ref, role=role, result=result )
Create new RelationMember element from JSON data :param child: Element data from JSON :type child: Dict :param result: The result this element belongs to :type result: overpy.Result :return: New instance of RelationMember :rtype: overpy.RelationMember :raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList: return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])
Produce N identical layers.
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None): return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
Generates an RDD comprised of i.i.d. samples from the Exponential distribution with the input mean. :param sc: SparkContext used to create the RDD. :param mean: Mean, or 1 / lambda, for the Exponential distribution. :param size: Size of the RDD. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of float comprised of i.i.d. samples ~ Exp(mean). >>> mean = 2.0 >>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2) >>> stats = x.stats() >>> stats.count() 1000 >>> abs(stats.mean() - mean) < 0.5 True >>> from math import sqrt >>> abs(stats.stdev() - sqrt(mean)) < 0.5 True
def mesh_axis_to_tensor_axis(self, mesh_ndims): ta2ma = self._tensor_axis_to_mesh_axis return tuple( [ta2ma.index(mesh_axis) if mesh_axis in ta2ma else None for mesh_axis in xrange(mesh_ndims)])
For each mesh axis, which Tensor axis maps to it. Args: mesh_ndims: int. Returns: Tuple of optional integers, with length mesh_ndims.
def decrypt(self, data, oaep_hash_fn_name=None, mgf1_hash_fn_name=None): if self.__private_key is None: raise ValueError('Unable to call this method. Private key must be set') if oaep_hash_fn_name is None: oaep_hash_fn_name = self.__class__.__default_oaep_hash_function_name__ if mgf1_hash_fn_name is None: mgf1_hash_fn_name = self.__class__.__default_mgf1_hash_function_name__ oaep_hash_cls = getattr(hashes, oaep_hash_fn_name) mgf1_hash_cls = getattr(hashes, mgf1_hash_fn_name) return self.__private_key.decrypt( data, padding.OAEP( mgf=padding.MGF1(algorithm=mgf1_hash_cls()), algorithm=oaep_hash_cls(), label=None ) )
Decrypt a data that used PKCS1 OAEP protocol :param data: data to decrypt :param oaep_hash_fn_name: hash function name to use with OAEP :param mgf1_hash_fn_name: hash function name to use with MGF1 padding :return: bytes
def process_response(self, request, response): if self._is_enabled(): log_prefix = self._log_prefix(u"After", request) new_memory_data = self._memory_data(log_prefix) log_prefix = self._log_prefix(u"Diff", request) cached_memory_data_response = self._cache.get_cached_response(self.memory_data_key) old_memory_data = cached_memory_data_response.get_value_or_default(None) self._log_diff_memory_data(log_prefix, new_memory_data, old_memory_data) return response
Logs memory data after processing response.
def _build_instruction_ds(instructions): tensor_inputs = { k: np.array(vals, dtype=np.int64) if k == "mask_offset" else list(vals) for k, vals in utils.zip_dict(*instructions) } return tf.data.Dataset.from_tensor_slices(tensor_inputs)
Create a dataset containing individual instruction for each shard. Each instruction is a dict: ``` { "filepath": tf.Tensor(shape=(), dtype=tf.string), "mask_offset": tf.Tensor(shape=(), dtype=tf.int64), "mask": tf.Tensor(shape=(100,), dtype=tf.bool), } ``` Args: instructions: `list[dict]`, the list of instruction dict Returns: instruction_ds: The dataset containing the instruction. The dataset size is the number of shard.
def fuzzy_search(self, *filters): matches = [] logger.verbose( "Performing fuzzy search on %s (%s) ..", pluralize(len(filters), "pattern"), concatenate(map(repr, filters)) ) patterns = list(map(create_fuzzy_pattern, filters)) for entry in self.filtered_entries: if all(p.search(entry.name) for p in patterns): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using fuzzy search.", pluralize(len(matches), "password"), ) return matches
Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings).
def format(self, record): out = dict( Timestamp=int(record.created * 1e9), Type=record.name, Logger=self.logger_name, Hostname=self.hostname, EnvVersion=self.LOGGING_FORMAT_VERSION, Severity=self.SYSLOG_LEVEL_MAP.get(record.levelno, self.DEFAULT_SYSLOG_LEVEL), Pid=record.process, ) fields = dict() for key, value in record.__dict__.items(): if key not in self.EXCLUDED_LOGRECORD_ATTRS: fields[key] = value message = record.getMessage() if message: if not message.startswith("{") and not message.endswith("}"): fields["msg"] = message if record.exc_info is not None: fields["error"] = repr(record.exc_info[1]) fields["traceback"] = safer_format_traceback(*record.exc_info) out['Fields'] = fields return json.dumps(out)
Map from Python LogRecord attributes to JSON log format fields * from - https://docs.python.org/3/library/logging.html#logrecord-attributes * to - https://mana.mozilla.org/wiki/pages/viewpage.action?pageId=42895640
def buff_interaction_eval(cls, specification, sequences, parameters, **kwargs): instance = cls(specification, sequences, parameters, build_fn=default_build, eval_fn=buff_interaction_eval, **kwargs) return instance
Creates optimizer with default build and BUFF interaction eval. Notes ----- Any keyword arguments will be propagated down to BaseOptimizer. Parameters ---------- specification : ampal.assembly.specification Any assembly level specification. sequences : [str] A list of sequences, one for each polymer. parameters : [base_ev_opt.Parameter] A list of `Parameter` objects in the same order as the function signature expects.
def LockedRead(self): file_contents = None with self._thread_lock: if not self._EnsureFileExists(): return None with self._process_lock_getter() as acquired_plock: if not acquired_plock: return None with open(self._filename, 'rb') as f: file_contents = f.read().decode(encoding=self._encoding) return file_contents
Acquire an interprocess lock and dump cache contents. This method safely acquires the locks then reads a string from the cache file. If the file does not exist and cannot be created, it will return None. If the locks cannot be acquired, this will also return None. Returns: cache data - string if present, None on failure.
def read_character_string(self): length = ord(self.data[self.offset]) self.offset += 1 return self.read_string(length)
Reads a character string from the packet
def dumps(self, o): f = io.BytesIO() VaultPickler(self, f).dump(o) f.seek(0) return f.read()
Returns a serialized string representing the object, post-deduplication. :param o: the object
def log_request_end_send(self, target_system, target_component, force_mavlink1=False): return self.send(self.log_request_end_encode(target_system, target_component), force_mavlink1=force_mavlink1)
Stop log transfer and resume normal logging target_system : System ID (uint8_t) target_component : Component ID (uint8_t)
def flatten_dict(d, parent_key='', sep='.', ignore_under_prefixed=True, mark_value=True): items = {} for k in d: if ignore_under_prefixed and k.startswith('__'): continue v = d[k] if mark_value and k.startswith('_') and not k.startswith('__'): v = MarkValue(repr(v)) new_key = sep.join((parent_key, k)) if parent_key else k if isinstance(v, collections.MutableMapping): items.update(flatten_dict(v, new_key, sep=sep, ignore_under_prefixed=True, mark_value=True) ) else: items[new_key] = v return items
Flattens a nested dictionary >>> from pprint import pprint >>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } } >>> fd = flatten_dict(d) >>> pprint(fd) {'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
def set_environ(env_name, value): value_changed = value is not None if value_changed: old_value = os.environ.get(env_name) os.environ[env_name] = value try: yield finally: if value_changed: if old_value is None: del os.environ[env_name] else: os.environ[env_name] = old_value
Set the environment variable 'env_name' to 'value' Save previous value, yield, and then restore the previous value stored in the environment variable 'env_name'. If 'value' is None, do nothing
def sign(self, msg, key): hasher = hashes.Hash(self.hash_algorithm(), backend=default_backend()) hasher.update(msg) digest = hasher.finalize() sig = key.sign( digest, padding.PSS( mgf=padding.MGF1(self.hash_algorithm()), salt_length=padding.PSS.MAX_LENGTH), utils.Prehashed(self.hash_algorithm())) return sig
Create a signature over a message :param msg: The message :param key: The key :return: A signature
def save_x509s(self, x509s): for file_type in TLSFileType: if file_type.value in x509s: x509 = x509s[file_type.value] if file_type is not TLSFileType.CA: tlsfile = getattr(self, file_type.value) if tlsfile: tlsfile.save(x509)
Saves the x509 objects to the paths known by this bundle
def hourly(dt=datetime.datetime.utcnow(), fmt=None): date = datetime.datetime(dt.year, dt.month, dt.day, dt.hour, 1, 1, 0, dt.tzinfo) if fmt is not None: return date.strftime(fmt) return date
Get a new datetime object every hour.
def read_group(self, group_id, mount_point=DEFAULT_MOUNT_POINT): api_path = '/v1/{mount_point}/group/id/{id}'.format( mount_point=mount_point, id=group_id, ) response = self._adapter.get( url=api_path, ) return response.json()
Query the group by its identifier. Supported methods: GET: /{mount_point}/group/id/{id}. Produces: 200 application/json :param group_id: Identifier of the group. :type group_id: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response
def register(self, cls, instance): if not issubclass(cls, DropletInterface): raise TypeError('Given class is not a NAZInterface subclass: %s' % cls) if not isinstance(instance, cls): raise TypeError('Given instance does not implement the class: %s' % instance) if instance.name in self.INSTANCES_BY_NAME: if self.INSTANCES_BY_NAME[instance.name] != instance: raise ValueError('Given name is registered ' 'by other instance: %s' % instance.name) self.INSTANCES_BY_INTERFACE[cls].add(instance) self.INSTANCES_BY_NAME[instance.name] = instance
Register the given instance as implementation for a class interface
def get_user(self, userPk): r = self._request('user/' + str(userPk)) if r: u = User() u.pk = u.id = userPk u.__dict__.update(r.json()) return u return None
Returns the user specified with the user's Pk or UUID
def create_api_network_ipv6(self): return ApiNetworkIPv6( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of Api Networkv6 services facade.
def batch(iterable, size): item = iter(iterable) while True: batch_iterator = islice(item, size) try: yield chain([next(batch_iterator)], batch_iterator) except StopIteration: return
Get items from a sequence a batch at a time. .. note: Adapted from https://code.activestate.com/recipes/303279-getting-items-in-batches/. .. note: All batches must be exhausted immediately. :params iterable: An iterable to get batches from. :params size: Size of the batches. :returns: A new batch of the given size at each time. >>> [list(i) for i in batch([1, 2, 3, 4, 5], 2)] [[1, 2], [3, 4], [5]]
def cors_setup(self, request): def cors_headers(request, response): if request.method.lower() == 'options': response.headers.update({ '-'.join([p.capitalize() for p in k.split('_')]): v for k, v in self.cors_options.items() }) else: origin = self.cors_options.get('access_control_allow_origin', '*') expose_headers = self.cors_options.get('access_control_expose_headers', '') response.headers['Access-Control-Allow-Origin'] = origin if expose_headers: response.headers['Access-Control-Expose-Headers'] = expose_headers request.add_response_callback(cors_headers)
Sets up the CORS headers response based on the settings used for the API. :param request: <pyramid.request.Request>
def load_handler(self): handler_path = self.handler_name.split(".") handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1])) self.handler = getattr(handler_module, handler_path[-1])()
Load the detected handler.
def check_extracted_paths(namelist, subdir=None): def relpath(p): q = os.path.relpath(p) if p.endswith(os.path.sep) or p.endswith('/'): q += os.path.sep return q parent = os.path.abspath('.') if subdir: if os.path.isabs(subdir): raise FileException('subdir must be a relative path', subdir) subdir = relpath(subdir + os.path.sep) for name in namelist: if os.path.commonprefix([parent, os.path.abspath(name)]) != parent: raise FileException('Insecure path in zipfile', name) if subdir and os.path.commonprefix( [subdir, relpath(name)]) != subdir: raise FileException( 'Path in zipfile is not in required subdir', name)
Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract
def add_custom_feature(self, feature): if feature.dimension <= 0: raise ValueError("Dimension has to be positive. " "Please override dimension attribute in feature!") if not hasattr(feature, 'transform'): raise ValueError("no 'transform' method in given feature") elif not callable(getattr(feature, 'transform')): raise ValueError("'transform' attribute exists but is not a method") self.__add_feature(feature)
Adds a custom feature to the feature list. Parameters ---------- feature : object an object with interface like CustomFeature (map, describe methods)
def get_nth_unique_value(self, keypath, n, distance_from, open_interval=True): unique_values = self.get_ordered_values(keypath, distance_from, open_interval) if 0 <= n < len(unique_values): return unique_values[n] else: raise Contradiction("n-th Unique value out of range: " + str(n))
Returns the `n-1`th unique value, or raises a contradiction if that is out of bounds
def content(self, name, attrs=None, characters=None): with self.no_inner_space(outer=True): with self.element(name, attrs): if characters: self.characters(characters)
Writes an element, some content for the element, and then closes the element, all without indentation. :name: the name of the element :attrs: a dict of attributes :characters: the characters to write
def set_widgets(self): self.tvBrowserExposure_selection_changed() exposure = self.parent.step_fc_functions1.selected_value( layer_purpose_exposure['key']) icon_path = get_image_path(exposure) self.lblIconIFCWExposureFromBrowser.setPixmap(QPixmap(icon_path))
Set widgets on the Exposure Layer From Browser tab.
def _add_data_to_general_stats(self, data): headers = _get_general_stats_headers() self.general_stats_headers.update(headers) header_names = ('ERROR_count', 'WARNING_count', 'file_validation_status') general_data = dict() for sample in data: general_data[sample] = {column: data[sample][column] for column in header_names} if sample not in self.general_stats_data: self.general_stats_data[sample] = dict() if data[sample]['file_validation_status'] != 'pass': headers['file_validation_status']['hidden'] = False self.general_stats_data[sample].update(general_data[sample])
Add data for the general stats in a Picard-module specific manner
def set_value(self, visual_property, value): if visual_property is None or value is None: raise ValueError('Both VP and value are required.') new_value = [ { 'visualProperty': visual_property, "value": value } ] requests.put(self.url, data=json.dumps(new_value), headers=HEADERS)
Set a single Visual Property Value :param visual_property: Visual Property ID :param value: New value for the VP :return: None
def build(self): for cmd in self.build_cmds: log.info('building command: {}'.format(cmd)) full_cmd = 'cd {}; {}'.format(self.analyses_path, cmd) log.debug('full command: {}'.format(full_cmd)) subprocess.call(full_cmd, shell=True) log.info('build done')
Run the build command specified in index.yaml.
def _dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): if col[0] == 'rowid': d['_id'] = row[idx] else: d[col[0]] = row[idx] return d
factory for sqlite3 to return results as dict
def get_art(cache_dir, size, client): song = client.currentsong() if len(song) < 2: print("album: Nothing currently playing.") return file_name = f"{song['artist']}_{song['album']}_{size}.jpg".replace("/", "") file_name = cache_dir / file_name if file_name.is_file(): shutil.copy(file_name, cache_dir / "current.jpg") print("album: Found cached art.") else: print("album: Downloading album art...") brainz.init() album_art = brainz.get_cover(song, size) if album_art: util.bytes_to_file(album_art, cache_dir / file_name) util.bytes_to_file(album_art, cache_dir / "current.jpg") print(f"album: Swapped art to {song['artist']}, {song['album']}.")
Get the album art.
def absent(name, domain, user=None): ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} out = __salt__['macdefaults.delete'](domain, name, user) if out['retcode'] != 0: ret['comment'] += "{0} {1} is already absent".format(domain, name) else: ret['changes']['absent'] = "{0} {1} is now absent".format(domain, name) return ret
Make sure the defaults value is absent name The key of the given domain to remove domain The name of the domain to remove from user The user to write the defaults to
def username(self, default=None): if self.lis_person_name_given: return self.lis_person_name_given elif self.lis_person_name_family: return self.lis_person_name_family elif self.lis_person_name_full: return self.lis_person_name_full else: return default
Return the full, given, or family name if set.
def set_args(self, **kwargs): try: kwargs_items = kwargs.iteritems() except AttributeError: kwargs_items = kwargs.items() for key, val in kwargs_items: self.args[key] = val
Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value
def json_encoder_default(obj): if isinstance(obj, numbers.Integral) and (obj < min_safe_integer or obj > max_safe_integer): return str(obj) if isinstance(obj, np.integer): return str(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return list(obj) elif isinstance(obj, (set, frozenset)): return list(obj) raise TypeError
JSON encoder function that handles some numpy types.
def dump(self): f = h5py.File(self.h5file, 'w') for i, k in enumerate(self.kwslist): v = self.h5data[:, i] dset = f.create_dataset(k, shape=v.shape, dtype=v.dtype) dset[...] = v f.close()
dump extracted data into a single hdf5file, :return: None :Example: >>> # dump data into an hdf5 formated file >>> datafields = ['s', 'Sx', 'Sy', 'enx', 'eny'] >>> datascript = 'sddsprintdata.sh' >>> datapath = './tests/tracking' >>> hdf5file = './tests/tracking/test.h5' >>> A = DataExtracter('test.sig', *datafields) >>> A.setDataScript(datascript) >>> A.setDataPath (datapath) >>> A.setH5file (hdf5file) >>> A.extractData().dump() >>> >>> # read dumped file >>> fd = h5py.File(hdf5file, 'r') >>> d_s = fd['s'][:] >>> d_sx = fd['Sx'][:] >>> >>> # plot dumped data >>> import matplotlib.pyplot as plt >>> plt.figure(1) >>> plt.plot(d_s, d_sx, 'r-') >>> plt.xlabel('$s$') >>> plt.ylabel('$\sigma_x$') >>> plt.show() Just like the following figure shows: .. image:: ../../images/test_DataExtracter.png :width: 400px
def corpusindex(): corpora = [] for f in glob.glob(settings.ROOT + "corpora/*"): if os.path.isdir(f): corpora.append(os.path.basename(f)) return corpora
Get list of pre-installed corpora
def fast_combine_pairs(files, force_single, full_name, separators): files = sort_filenames(files) chunks = tz.sliding_window(10, files) pairs = [combine_pairs(chunk, force_single, full_name, separators) for chunk in chunks] pairs = [y for x in pairs for y in x] longest = defaultdict(list) for pair in pairs: for file in pair: if len(longest[file]) < len(pair): longest[file] = pair longest = {tuple(sort_filenames(x)) for x in longest.values()} return [sort_filenames(list(x)) for x in longest]
assume files that need to be paired are within 10 entries of each other, once the list is sorted
def OnTableListToggle(self, event): table_list_panel_info = \ self.main_window._mgr.GetPane("table_list_panel") self._toggle_pane(table_list_panel_info) event.Skip()
Table list toggle event handler
def _add_string_to_commastring(self, field, string): if string in self._get_stringlist_from_commastring(field): return False strings = '%s,%s' % (self.data.get(field, ''), string) if strings[0] == ',': strings = strings[1:] self.data[field] = strings return True
Add a string to a comma separated list of strings Args: field (str): Field containing comma separated list string (str): String to add Returns: bool: True if string added or False if string already present
def set_ignores(self, folder, *patterns): if not patterns: return {} data = {'ignore': list(patterns)} return self.post('ignores', params={'folder': folder}, data=data)
Applies ``patterns`` to ``folder``'s ``.stignore`` file. Args: folder (str): patterns (str): Returns: dict
def expanduser(path): if path[:1] != '~': return path i, n = 1, len(path) while i < n and path[i] not in '/\\': i = i + 1 if 'HOME' in os.environ: userhome = os.environ['HOME'] elif 'USERPROFILE' in os.environ: userhome = os.environ['USERPROFILE'] elif not 'HOMEPATH' in os.environ: return path else: try: drive = os.environ['HOMEDRIVE'] except KeyError: drive = '' userhome = join(drive, os.environ['HOMEPATH']) if i != 1: userhome = join(dirname(userhome), path[1:i]) return userhome + path[i:]
Expand ~ and ~user constructs. If user or $HOME is unknown, do nothing.
def loudest_time(self, start=0, duration=0): if duration == 0: duration = self.sound.nframes self.current_frame = start arr = self.read_frames(duration) max_amp_sample = int(np.floor(arr.argmax()/2)) + start return max_amp_sample
Find the loudest time in the window given by start and duration Returns frame number in context of entire track, not just the window. :param integer start: Start frame :param integer duration: Number of frames to consider from start :returns: Frame number of loudest frame :rtype: integer
def parse_fallback(self): if self.strict: raise PywavefrontException("Unimplemented OBJ format statement '%s' on line '%s'" % (self.values[0], self.line.rstrip())) else: logger.warning("Unimplemented OBJ format statement '%s' on line '%s'" % (self.values[0], self.line.rstrip()))
Fallback method when parser doesn't know the statement