code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def usage(): l_bracket = clr.stringc("[", "dark gray") r_bracket = clr.stringc("]", "dark gray") pipe = clr.stringc("|", "dark gray") app_name = clr.stringc("%prog", "bright blue") commands = clr.stringc("{0}".format(pipe).join(c.VALID_ACTIONS), "normal") help = clr.stringc("--help", "green") options = clr.stringc("options", "yellow") guide = "\n\n" for action in c.VALID_ACTIONS: guide += command_name(app_name, action, c.MESSAGES["help_" + action]) guide = guide[:-1] return "{0} {1}{2}{3} {1}{4}{3} {1}{5}{3}\n{6}".format(app_name, l_bracket, commands, r_bracket, help, options, guide)
Return the usage for the help command.
def _meters_per_pixel(zoom, lat=0.0, tilesize=256): return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / ( tilesize * 2 ** zoom )
Return the pixel resolution for a given mercator tile zoom and lattitude. Parameters ---------- zoom: int Mercator zoom level lat: float, optional Latitude in decimal degree (default: 0) tilesize: int, optional Mercator tile size (default: 256). Returns ------- Pixel resolution in meters
def _make_instance(cls, element_class, webelement): if isinstance(webelement, FirefoxWebElement): element_class = copy.deepcopy(element_class) element_class.__bases__ = tuple( FirefoxWebElement if base is WebElement else base for base in element_class.__bases__ ) return element_class(webelement)
Firefox uses another implementation of element. This method switch base of wrapped element to firefox one.
def get_from_search_doc(cls, doc_id): if hasattr(doc_id, 'doc_id'): doc_id = doc_id.doc_id return cls.from_urlsafe(doc_id)
Returns an instance of the model from a search document id. :param doc_id: Search document id :return: Instance of cls
def get_edit_scripts(pron_a, pron_b, edit_costs=(1.0, 1.0, 1.0)): op_costs = {'insert': lambda x: edit_costs[0], 'match': lambda x, y: 0 if x == y else edit_costs[1], 'delete': lambda x: edit_costs[2]} distance, scripts, costs, ops = edit_distance.best_transforms(pron_a, pron_b, op_costs=op_costs) return [full_edit_script(script.to_primitive()) for script in scripts]
Get the edit scripts to transform between two given pronunciations. :param pron_a: Source pronunciation as list of strings, each string corresponding to a phoneme :param pron_b: Target pronunciation as list of strings, each string corresponding to a phoneme :param edit_costs: Costs of insert, replace and delete respectively :return: List of edit scripts. Each edit script is represented as a list of operations, where each operation is a dictionary.
async def async_set_port_poe_mode(self, port_idx, mode): no_existing_config = True for port_override in self.port_overrides: if port_idx == port_override['port_idx']: port_override['poe_mode'] = mode no_existing_config = False break if no_existing_config: self.port_overrides.append({ 'port_idx': port_idx, 'portconf_id': self.ports[port_idx].portconf_id, 'poe_mode': mode }) url = 's/{site}/rest/device/' + self.id data = {'port_overrides': self.port_overrides} await self._request('put', url, json=data)
Set port poe mode. Auto, 24v, passthrough, off. Make sure to not overwrite any existing configs.
def cmd_create(self, name, auto=False): LOGGER.setLevel('INFO') LOGGER.propagate = 0 router = Router(self.database, migrate_dir=self.app.config['PEEWEE_MIGRATE_DIR'], migrate_table=self.app.config['PEEWEE_MIGRATE_TABLE']) if auto: auto = self.models router.create(name, auto=auto)
Create a new migration.
def is_win64(): global _is_win64 if _is_win64 is None: _is_win64 = False if os.environ.get('PROCESSOR_ARCHITECTURE', 'x86') != 'x86': _is_win64 = True if os.environ.get('PROCESSOR_ARCHITEW6432'): _is_win64 = True if os.environ.get('ProgramW6432'): _is_win64 = True return _is_win64
Return true if running on windows 64 bits. Works whether python itself runs in 64 bits or 32 bits.
def unmapped(sam, mates): for read in sam: if read.startswith('@') is True: continue read = read.strip().split() if read[2] == '*' and read[6] == '*': yield read elif mates is True: if read[2] == '*' or read[6] == '*': yield read for i in read: if i == 'YT:Z:UP': yield read
get unmapped reads
def _dump_additional_attributes(additional_attributes): attributes_raw = io.BytesIO(additional_attributes) attributes_hex = binascii.hexlify(additional_attributes) if not len(additional_attributes): return attributes_hex len_attribute, = unpack('<I', attributes_raw.read(4)) if len_attribute != 8: return attributes_hex attr_id, = unpack('<I', attributes_raw.read(4)) if attr_id != APK._APK_SIG_ATTR_V2_STRIPPING_PROTECTION: return attributes_hex scheme_id, = unpack('<I', attributes_raw.read(4)) return "stripping protection set, scheme %d" % scheme_id
try to parse additional attributes, but ends up to hexdump if the scheme is unknown
def write_packages(self, reqs_file): write_file_lines(reqs_file, ('{}\n'.format(package) for package in self.packages))
Dump the packages in the catalog in a requirements file
def param_show(param=None): ret = _run_varnishadm('param.show', [param]) if ret['retcode']: return False else: result = {} for line in ret['stdout'].split('\n'): m = re.search(r'^(\w+)\s+(.*)$', line) result[m.group(1)] = m.group(2) if param: break return result
Show params of varnish cache CLI Example: .. code-block:: bash salt '*' varnish.param_show param
def check_hash(path, file_hash): path = os.path.expanduser(path) if not isinstance(file_hash, six.string_types): raise SaltInvocationError('hash must be a string') for sep in (':', '='): if sep in file_hash: hash_type, hash_value = file_hash.split(sep, 1) break else: hash_value = file_hash hash_len = len(file_hash) hash_type = HASHES_REVMAP.get(hash_len) if hash_type is None: raise SaltInvocationError( 'Hash {0} (length: {1}) could not be matched to a supported ' 'hash type. The supported hash types and lengths are: ' '{2}'.format( file_hash, hash_len, ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) return get_hash(path, hash_type) == hash_value
Check if a file matches the given hash string Returns ``True`` if the hash matches, otherwise ``False``. path Path to a file local to the minion. hash The hash to check against the file specified in the ``path`` argument. .. versionchanged:: 2016.11.4 For this and newer versions the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>=<hash_value>`` (e.g. ``md5=e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
def appendInnerHTML(self, html): from .Parser import AdvancedHTMLParser encoding = None if self.ownerDocument: encoding = self.ownerDocument.encoding blocks = AdvancedHTMLParser.createBlocksFromHTML(html, encoding) self.appendBlocks(blocks)
appendInnerHTML - Appends nodes from arbitrary HTML as if doing element.innerHTML += 'someHTML' in javascript. @param html <str> - Some HTML NOTE: If associated with a document ( AdvancedHTMLParser ), the html will use the encoding associated with that document. @return - None. A browser would return innerHTML, but that's somewhat expensive on a high-level node. So just call .innerHTML explicitly if you need that
def get_features( dataset, query=None, crs="epsg:4326", bounds=None, sortby=None, pagesize=10000, max_workers=5, ): param_dicts = define_request(dataset, query, crs, bounds, sortby, pagesize) with ThreadPoolExecutor(max_workers=max_workers) as executor: for result in executor.map(make_request, param_dicts): for feature in result: yield feature
Yield features from DataBC WFS
def append_form( self, obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]], headers: Optional['MultiMapping[str]']=None ) -> Payload: assert isinstance(obj, (Sequence, Mapping)) if headers is None: headers = CIMultiDict() if isinstance(obj, Mapping): obj = list(obj.items()) data = urlencode(obj, doseq=True) return self.append_payload( StringPayload(data, headers=headers, content_type='application/x-www-form-urlencoded'))
Helper to append form urlencoded part.
def lookup_bulk(self, ResponseGroup="Large", **kwargs): response = self.api.ItemLookup(ResponseGroup=ResponseGroup, **kwargs) root = objectify.fromstring(response) if not hasattr(root.Items, 'Item'): return [] return list( AmazonProduct( item, self.aws_associate_tag, self, region=self.region) for item in root.Items.Item )
Lookup Amazon Products in bulk. Returns all products matching requested ASINs, ignoring invalid entries. :return: A list of :class:`~.AmazonProduct` instances.
def get_marks(self): data = self.message(MessageType.GET_MARKS, '') return json.loads(data)
Get a list of the names of all currently set marks. :rtype: list
def is_equal(self, other): other = self.coerce(other) return len(self.get_values().symmetric_difference(other.get_values())) == 0
True iff all members are the same
def create_track_token(request): from tracked_model.models import RequestInfo request_pk = RequestInfo.create_or_get_from_request(request).pk user_pk = None if request.user.is_authenticated(): user_pk = request.user.pk return TrackToken(request_pk=request_pk, user_pk=user_pk)
Returns ``TrackToken``. ``TrackToken' contains request and user making changes. It can be passed to ``TrackedModel.save`` instead of ``request``. It is intended to be used when passing ``request`` is not possible e.g. when ``TrackedModel.save`` will be called from celery task.
def read_yaml(file_path, Loader=yaml.Loader, object_pairs_hook=OrderedDict): class OrderedLoader(Loader): pass def construct_mapping(loader, node): loader.flatten_mapping(node) return object_pairs_hook(loader.construct_pairs(node)) OrderedLoader.add_constructor( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) with open(file_path, 'r') as f: return yaml.load(f, OrderedLoader)
Read YAML file and return as python dictionary
def _get_all_dependencies_of(name, deps=set(), force=False): first_deps = _get_api_dependencies_of(name, force=force) for dep in first_deps: dep = _strip_version_from_dependency(dep) if dep in deps: continue if dap in get_installed_daps(): continue deps |= _get_all_dependencies_of(dep, deps) return deps | set([name])
Returns list of dependencies of the given dap from Dapi recursively
def validate_url(cls, url: str) -> Optional[Match[str]]: match = re.match(cls._VALID_URL, url) return match
Check if the Extractor can handle the given url.
def pprint_label(self): "The pretty-printed label string for the Dimension" unit = ('' if self.unit is None else type(self.unit)(self.unit_format).format(unit=self.unit)) return bytes_to_unicode(self.label) + bytes_to_unicode(unit)
The pretty-printed label string for the Dimension
def get_all_edge_nodes(self): edge_nodes = set(e for es in self.edges for e in es) for edges in self.edges_rel.values(): rel_nodes = set(e for es in edges for e in es) edge_nodes.update(rel_nodes) return edge_nodes
Return a list of all GO IDs that are connected to edges.
def img_url(obj, profile_app_name, profile_model_name): try: content_type = ContentType.objects.get( app_label=profile_app_name, model=profile_model_name.lower() ) except ContentType.DoesNotExist: return "" except AttributeError: return "" Profile = content_type.model_class() fields = Profile._meta.get_fields() profile = content_type.model_class().objects.get(user=obj.user) for field in fields: if hasattr(field, "upload_to"): return field.value_from_object(profile).url
returns url of profile image of a user
def _get_thintar_prefix(tarname): tfd, tmp_tarname = tempfile.mkstemp(dir=os.path.dirname(tarname), prefix=".thin-", suffix="." + os.path.basename(tarname).split(".", 1)[-1]) os.close(tfd) return tmp_tarname
Make sure thintar temporary name is concurrent and secure. :param tarname: name of the chosen tarball :return: prefixed tarname
def normalize_medscan_name(name): suffix = ' complex' for i in range(2): if name.endswith(suffix): name = name[:-len(suffix)] return name
Removes the "complex" and "complex complex" suffixes from a medscan agent name so that it better corresponds with the grounding map. Parameters ---------- name: str The Medscan agent name Returns ------- norm_name: str The Medscan agent name with the "complex" and "complex complex" suffixes removed.
def from_element(self, element, defaults={}): if isinstance(defaults, SvdElement): defaults = vars(defaults) for key in self.props: try: value = element.find(key).text except AttributeError: default = defaults[key] if key in defaults else None value = element.get(key, default) if value is not None: if key in self.props_to_integer: try: value = int(value) except ValueError: value = int(value, 16) elif key in self.props_to_boolean: value = value.lower() in ("yes", "true", "t", "1") setattr(self, key, value)
Populate object variables from SVD element
def phi_s(spin1x, spin1y, spin2x, spin2y): phi1 = phi_from_spinx_spiny(spin1x, spin1y) phi2 = phi_from_spinx_spiny(spin2x, spin2y) return (phi1 + phi2) % (2 * numpy.pi)
Returns the sum of the in-plane perpendicular spins.
def is_same_quaternion(q0, q1): q0 = np.array(q0) q1 = np.array(q1) return np.allclose(q0, q1) or np.allclose(q0, -q1)
Return True if two quaternions are equal.
def import_dashboards(self): f = request.files.get('file') if request.method == 'POST' and f: dashboard_import_export.import_dashboards(db.session, f.stream) return redirect('/dashboard/list/') return self.render_template('superset/import_dashboards.html')
Overrides the dashboards using json instances from the file.
def compute(self): if "Signature" in self.params: raise RuntimeError("Existing signature in parameters") if self.signature_version is not None: version = self.signature_version else: version = self.params["SignatureVersion"] if str(version) == "1": bytes = self.old_signing_text() hash_type = "sha1" elif str(version) == "2": bytes = self.signing_text() if self.signature_method is not None: signature_method = self.signature_method else: signature_method = self.params["SignatureMethod"] hash_type = signature_method[len("Hmac"):].lower() else: raise RuntimeError("Unsupported SignatureVersion: '%s'" % version) return self.creds.sign(bytes, hash_type)
Compute and return the signature according to the given data.
def gen_query(self): return ( SQL.forwards_relation(self.src, self.rel) if self.dst is None else SQL.inverse_relation(self.dst, self.rel) )
Generate an SQL query for the edge object.
def _wait_until_connectable(self, timeout=30): count = 0 while not utils.is_connectable(self.profile.port): if self.process.poll() is not None: raise WebDriverException( "The browser appears to have exited " "before we could connect. If you specified a log_file in " "the FirefoxBinary constructor, check it for details.") if count >= timeout: self.kill() raise WebDriverException( "Can't load the profile. Possible firefox version mismatch. " "You must use GeckoDriver instead for Firefox 48+. Profile " "Dir: %s If you specified a log_file in the " "FirefoxBinary constructor, check it for details." % (self.profile.path)) count += 1 time.sleep(1) return True
Blocks until the extension is connectable in the firefox.
def group(self, base_dn, samaccountname, attributes=(), explicit_membership_only=False): groups = self.groups(base_dn, samaccountnames=[samaccountname], attributes=attributes, explicit_membership_only=explicit_membership_only) try: return groups[0] except IndexError: logging.info("%s - unable to retrieve object from AD by sAMAccountName", samaccountname)
Produces a single, populated ADGroup object through the object factory. Does not populate attributes for the caller instance. sAMAccountName may not be present in group objects in modern AD schemas. Searching by common name and object class (group) may be an alternative approach if required in the future. :param str base_dn: The base DN to search within :param str samaccountname: The group's sAMAccountName :param list attributes: Object attributes to populate, defaults to all :return: A populated ADGroup object :rtype: ADGroup
def list(self, **params): _, _, loss_reasons = self.http_client.get("/loss_reasons", params=params) return loss_reasons
Retrieve all reasons Returns all deal loss reasons available to the user according to the parameters provided :calls: ``get /loss_reasons`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of LossReasons. :rtype: list
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001): x_new = x x_old = x-1 f_old = f(x_new, a, c) while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new x_tmp = levenberg_marquardt_update(x_old, a, c, damping) f_new = f(x_tmp, a, c) if f_new < f_old: damping = np.max(damping/10., 1e-20) x_new = x_tmp f_old = f_new else: damping *= 10. return x_new
Optimise value of x using levenberg-marquardt
def _read_etc(etc_file): etc_type = dtype([('offset', '<i'), ('samplestamp', '<i'), ('sample_num', '<i'), ('sample_span', '<h'), ('unknown', '<h')]) with etc_file.open('rb') as f: f.seek(352) etc = fromfile(f, dtype=etc_type) return etc
Return information about table of content for each erd.
async def list_keys(request: web.Request) -> web.Response: return web.json_response( {'public_keys': [{'key_md5': details[0], 'key': details[1]} for details in get_keys()]}, status=200)
List keys in the authorized_keys file. GET /server/ssh_keys -> 200 OK {"public_keys": [{"key_md5": md5 hex digest, "key": key string}]} (or 403 if not from the link-local connection)
def get_polypeptide_within(self, chain_id, resnum, angstroms, only_protein=True, use_ca=False, custom_coord=None, return_resnums=False): if self.structure: parsed = self.structure else: parsed = self.parse_structure() residue_list = ssbio.protein.structure.properties.residues.within(resnum=resnum, chain_id=chain_id, model=parsed.first_model, angstroms=angstroms, use_ca=use_ca, custom_coord=custom_coord) if only_protein: filtered_residue_list = [x for x in residue_list if x.id[0] == ' '] else: filtered_residue_list = residue_list residue_list_combined = Polypeptide(filtered_residue_list) if return_resnums: resnums = [int(x.id[1]) for x in filtered_residue_list] return residue_list_combined, resnums return residue_list_combined
Get a Polypeptide object of the amino acids within X angstroms of the specified chain + residue number. Args: resnum (int): Residue number of the structure chain_id (str): Chain ID of the residue number angstroms (float): Radius of the search sphere only_protein (bool): If only protein atoms (no HETATMS) should be included in the returned sequence use_ca (bool): If the alpha-carbon atom should be used for searching, default is False (last atom of residue used) custom_coord (list): custom XYZ coord return_resnums (bool): if list of resnums should be returned Returns: Bio.PDB.Polypeptide.Polypeptide: Biopython Polypeptide object
def find_device(self): for bdaddr, name in self.scan(): if name == "Wireless Controller": self.logger.info("Found device {0}", bdaddr) return BluetoothDS4Device.connect(bdaddr)
Scan for bluetooth devices and return a DS4 device if found.
def max_fmeasure(fg_vals, bg_vals): x, y = roc_values(fg_vals, bg_vals) x, y = x[1:], y[1:] p = y / (y + x) filt = np.logical_and((p * y) > 0, (p + y) > 0) p = p[filt] y = y[filt] f = (2 * p * y) / (p + y) if len(f) > 0: return np.nanmax(f) else: return None
Computes the maximum F-measure. Parameters ---------- fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. Returns ------- f : float Maximum f-measure.
def keys_breadth_first(self, include_dicts=False): namespaces = [] for key in self._key_order: if isinstance(getattr(self, key), DotDict): namespaces.append(key) if include_dicts: yield key else: yield key for a_namespace in namespaces: for key in self[a_namespace].keys_breadth_first(include_dicts): yield '%s.%s' % (a_namespace, key)
a generator that returns all the keys in a set of nested DotDict instances. The keys take the form X.Y.Z
def preview_view(self, context): view_to_render = 'author_view' if hasattr(self, 'author_view') else 'student_view' renderer = getattr(self, view_to_render) return renderer(context)
Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context. Default implementation uses author_view if available, otherwise falls back to student_view Child classes can override this method to control their presentation in preview context
def move(self, delta): pos = self.pos self.pos = (pos[0]+delta[0], pos[1]+delta[1], pos[2]+delta[0], pos[3]+delta[1]) for age in self.nodes: for node in age: node.move(delta)
Move the tree. Args: delta (tupel): The adjustment of the position.
def get_mchirp(h5group): mass1 = h5group['mass1'][:] mass2 = h5group['mass2'][:] return (mass1 * mass2) ** (3/5.) / (mass1 + mass2) ** (1/5.)
Calculate the chipr mass column for this PyCBC HDF5 table group
def _report_problem(self, problem, level=logging.ERROR): problem = self.basename + ': ' + problem if self._logger.isEnabledFor(level): self._problematic = True if self._check_raises: raise DapInvalid(problem) self._logger.log(level, problem)
Report a given problem
def exec_commands(commands: str, **parameters: Any) -> None: cmdlist = commands.split(';') print(f'Start to execute the commands {cmdlist} for testing purposes.') for par, value in parameters.items(): exec(f'{par} = {value}') for command in cmdlist: command = command.replace('__', 'temptemptemp') command = command.replace('_', ' ') command = command.replace('temptemptemp', '_') exec(command)
Execute the given Python commands. Function |exec_commands| is thought for testing purposes only (see the main documentation on module |hyd|). Seperate individual commands by semicolons and replaced whitespaces with underscores: >>> from hydpy.exe.commandtools import exec_commands >>> import sys >>> exec_commands("x_=_1+1;print(x)") Start to execute the commands ['x_=_1+1', 'print(x)'] for testing purposes. 2 |exec_commands| interprets double underscores as a single underscores: >>> exec_commands("x_=_1;print(x.____class____)") Start to execute the commands ['x_=_1', 'print(x.____class____)'] \ for testing purposes. <class 'int'> |exec_commands| evaluates additional keyword arguments before it executes the given commands: >>> exec_commands("e=x==y;print(e)", x=1, y=2) Start to execute the commands ['e=x==y', 'print(e)'] for testing purposes. False
async def listDependentTasks(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["listDependentTasks"], *args, **kwargs)
List Dependent Tasks List tasks that depend on the given `taskId`. As many tasks from different task-groups may dependent on a single tasks, this end-point may return a `continuationToken`. To continue listing tasks you must call `listDependentTasks` again with the `continuationToken` as the query-string option `continuationToken`. By default this end-point will try to return up to 1000 tasks in one request. But it **may return less**, even if more tasks are available. It may also return a `continuationToken` even though there are no more results. However, you can only be sure to have seen all results if you keep calling `listDependentTasks` with the last `continuationToken` until you get a result without a `continuationToken`. If you are not interested in listing all the tasks at once, you may use the query-string option `limit` to return fewer. This method gives output: ``v1/list-dependent-tasks-response.json#`` This method is ``stable``
def _init_browser(self): chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--disable-extensions') chrome_options.add_argument('--disable-infobars') chrome_options.add_argument('--ignore-certificate-errors') chrome_options.add_experimental_option('prefs', { 'profile.managed_default_content_settings.notifications': 1 }) browser = webdriver.Chrome(chrome_options=chrome_options) browser.set_page_load_timeout(10) browser.implicitly_wait(1) browser.maximize_window() browser.get(settings.HARNESS_URL) self._browser = browser if not wait_until(lambda: 'Thread' in browser.title, 30): self.assertIn('Thread', browser.title)
Open harness web page. Open a quiet chrome which: 1. disables extensions, 2. ignore certificate errors and 3. always allow notifications.
def get_instance(self, payload): return YesterdayInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Build an instance of YesterdayInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.yesterday.YesterdayInstance :rtype: twilio.rest.api.v2010.account.usage.record.yesterday.YesterdayInstance
def save(self, set_cookie, **params): if set(self.store.items()) ^ set(self.items()): value = dict(self.items()) value = json.dumps(value) value = self.encrypt(value) if not isinstance(value, str): value = value.encode(self.encoding) set_cookie(self.key, value, **self.params) return True return False
Update cookies if the session has been changed.
def schema(self): if not self.__schema: context = getattr(self.parent, 'context', {}) if isinstance(self.nested, SchemaABC): self.__schema = self.nested self.__schema.context.update(context) else: if isinstance(self.nested, type) and issubclass(self.nested, SchemaABC): schema_class = self.nested elif not isinstance(self.nested, basestring): raise ValueError( 'Nested fields must be passed a ' 'Schema, not {}.'.format(self.nested.__class__), ) elif self.nested == 'self': schema_class = self.parent.__class__ else: schema_class = class_registry.get_class(self.nested) self.__schema = schema_class( many=self.many, only=self.only, exclude=self.exclude, context=context, load_only=self._nested_normalized_option('load_only'), dump_only=self._nested_normalized_option('dump_only'), ) return self.__schema
The nested Schema object. .. versionchanged:: 1.0.0 Renamed from `serializer` to `schema`
def dimNamesFromDataset(h5Dataset): dimNames = [] for dimNr, dimScales in enumerate(h5Dataset.dims): if len(dimScales) == 0: dimNames.append('Dim{}'.format(dimNr)) elif len(dimScales) == 1: dimScaleLabel, dimScaleDataset = dimScales.items()[0] path = dimScaleDataset.name if path: dimNames.append(os.path.basename(path)) elif dimScaleLabel: dimNames.append(dimScaleLabel) else: dimNames.append('Dim{}'.format(dimNr)) else: logger.warn("More than one dimension scale found: {!r}".format(dimScales)) dimNames.append('Dim{}'.format(dimNr)) return dimNames
Constructs the dimension names given a h5py dataset. First looks in the dataset's dimension scales to see if it refers to another dataset. In that case the referred dataset's name is used. If not, the label of the dimension scale is used. Finally, if this is empty, the dimension is numbered.
def poll_for_exceptionless_callable(callable, attempts, interval): @wraps(callable) def poll(*args, **kwargs): for attempt in range(attempts): try: return callable(*args, **kwargs) except Exception as ex: if attempt == attempts-1: raise MaximumAttemptsReached(ex) time.sleep(interval) continue return poll
Poll with a given callable for a specified number of times. :param callable: callable to invoke in loop -- if no exception is raised the call is considered succeeded :param attempts: number of iterations to attempt :param interval: seconds to wait before next attempt
def set_color_scheme(self, color_scheme, reset=True): try: self.shellwidget.set_color_scheme(color_scheme, reset) except AttributeError: pass
Set IPython color scheme.
def zoom_blur(x, severity=1): c = [ np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03) ][severity - 1] x = (np.array(x) / 255.).astype(np.float32) out = np.zeros_like(x) for zoom_factor in c: out += clipped_zoom(x, zoom_factor) x = (x + out) / (len(c) + 1) x_clip = np.clip(x, 0, 1) * 255 return around_and_astype(x_clip)
Zoom blurring to images. Applying zoom blurring to images by zooming the central part of the images. Args: x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255]. severity: integer, severity of corruption. Returns: numpy array, image with uint8 pixels in [0,255]. Applied zoom blur.
def integrate_storage(self, timeseries, position, **kwargs): StorageControl(edisgo=self, timeseries=timeseries, position=position, **kwargs)
Integrates storage into grid. See :class:`~.grid.network.StorageControl` for more information.
def add_synchronous_cb(self, cb): if self.connection.synchronous or self._synchronous: wrapper = SyncWrapper(cb) self._pending_events.append(wrapper) while wrapper._read: if self.closed: if self.close_info and \ len(self.close_info['reply_text']) > 0: raise ChannelClosed( "channel %d is closed: %s : %s", self.channel_id, self.close_info['reply_code'], self.close_info['reply_text']) raise ChannelClosed() self.connection.read_frames() return wrapper._result else: self._pending_events.append(cb)
Add an expectation of a callback to release a synchronous transaction.
def _get_top_of_rupture_depth_term(self, C, imt, rup): if rup.ztor >= 20.0: return C['a15'] else: return C['a15'] * rup.ztor / 20.0
Compute and return top of rupture depth term. See paragraph 'Depth-to-Top of Rupture Model', page 1042.
async def server_call_async(method, server, loop: asyncio.AbstractEventLoop=asyncio.get_event_loop(), timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters): if method is None: raise Exception("A method name must be specified") if server is None: raise Exception("A server (eg. my3.geotab.com) must be specified") parameters = api.process_parameters(parameters) return await _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl, loop=loop)
Makes an asynchronous call to an un-authenticated method on a server. :param method: The method name. :param server: The MyGeotab server. :param loop: The event loop. :param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes). :param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this. :param parameters: Additional parameters to send (for example, search=dict(id='b123') ). :return: The JSON result (decoded into a dict) from the server. :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time.
def fulltext(search, lang=Lang.English, ignore_case=True): return { "$text": { "$search": search, "$language": lang, "$caseSensitive": not ignore_case, "$diacriticSensitive": False, } }
Full text search. Example:: filters = Text.fulltext("python pymongo_mate") .. note:: This field doesn't need to specify field.
def encode_hooklist(self, hooklist, msg): for hook in hooklist: pbhook = msg.add() self.encode_hook(hook, pbhook)
Encodes a list of commit hooks into their protobuf equivalent. Used in bucket properties. :param hooklist: a list of commit hooks :type hooklist: list :param msg: a protobuf field that is a list of commit hooks
def basic_auth_tween_factory(handler, registry): def basic_auth_tween(request): remote_user = get_remote_user(request) if remote_user is not None: request.environ['REMOTE_USER'] = remote_user[0] return handler(request) return basic_auth_tween
Do basic authentication, parse HTTP_AUTHORIZATION and set remote_user variable to request
def path(self): raw_path = wsgi_decoding_dance( self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors ) return "/" + raw_path.lstrip("/")
Requested path as unicode. This works a bit like the regular path info in the WSGI environment but will always include a leading slash, even if the URL root is accessed.
def get_role(member, cython=False): if inspect.isroutine(member) or isinstance(member, numpy.ufunc): return 'func' elif inspect.isclass(member): return 'class' elif cython: return 'func' return 'const'
Return the reStructuredText role `func`, `class`, or `const` best describing the given member. Some examples based on the site-package |numpy|. |numpy.clip| is a function: >>> from hydpy.core.autodoctools import Substituter >>> import numpy >>> Substituter.get_role(numpy.clip) 'func' |numpy.ndarray| is a class: >>> Substituter.get_role(numpy.ndarray) 'class' |numpy.ndarray.clip| is a method, for which also the `function` role is returned: >>> Substituter.get_role(numpy.ndarray.clip) 'func' For everything else the `constant` role is returned: >>> Substituter.get_role(numpy.nan) 'const' When analysing cython extension modules, set the option `cython` flag to |True|. |Double| is correctly identified as a class: >>> from hydpy.cythons import pointerutils >>> Substituter.get_role(pointerutils.Double, cython=True) 'class' Only with the `cython` flag beeing |True|, for everything else the `function` text role is returned (doesn't make sense here, but the |numpy| module is not something defined in module |pointerutils| anyway): >>> Substituter.get_role(pointerutils.numpy, cython=True) 'func'
def to_dms(angle, style='dms'): sign = 1 if angle >= 0 else -1 angle = abs(angle) * 3600 minutes, seconds = divmod(angle, 60) degrees, minutes = divmod(minutes, 60) if style == 'dms': return tuple(sign * abs(i) for i in (int(degrees), int(minutes), seconds)) elif style == 'dm': return tuple(sign * abs(i) for i in (int(degrees), (minutes + seconds / 60))) else: raise ValueError('Unknown style type %r' % style)
Convert decimal angle to degrees, minutes and possibly seconds. Args: angle (float): Angle to convert style (str): Return fractional or whole minutes values Returns: tuple of int: Angle converted to degrees, minutes and possibly seconds Raises: ValueError: Unknown value for ``style``
def historical(self, date, base='USD'): try: resp = self.client.get(self.ENDPOINT_HISTORICAL % date.strftime("%Y-%m-%d"), params={'base': base}) resp.raise_for_status() except requests.exceptions.RequestException as e: raise OpenExchangeRatesClientException(e) return resp.json(parse_int=decimal.Decimal, parse_float=decimal.Decimal)
Fetches historical exchange rate data from service :Example Data: { disclaimer: "<Disclaimer data>", license: "<License data>", timestamp: 1358150409, base: "USD", rates: { AED: 3.666311, AFN: 51.2281, ALL: 104.748751, AMD: 406.919999, ANG: 1.7831, ... } }
def _assembled_out_file_name(self): if self.Parameters['-s'].isOn(): assembled_reads = self._absolute(str(self.Parameters['-s'].Value)) else: raise ValueError( "No assembled-reads (flag -s) output path specified") return assembled_reads
Checks file name is set for assembled output. Returns absolute path.
def can_manage(user, semester=None, pool=None, any_pool=False): if semester and user in semester.workshift_managers.all(): return True if Manager and Manager.objects.filter( incumbent__user=user, workshift_manager=True, ).count() > 0: return True if pool and pool.managers.filter(incumbent__user=user).count() > 0: return True if any_pool and WorkshiftPool.objects.filter( managers__incumbent__user=user, ): return True return user.is_superuser or user.is_staff
Whether a user is allowed to manage a workshift semester. This includes the current workshift managers, that semester's workshift managers, and site superusers.
def get_http_header(self) -> Response: with wpull.util.reset_file_offset(self.block_file): data = self.block_file.read(4096) match = re.match(br'(.*?\r?\n\r?\n)', data) if not match: return status_line, dummy, field_str = match.group(1).partition(b'\n') try: version, code, reason = Response.parse_status_line(status_line) except ValueError: return response = Response(status_code=code, reason=reason, version=version) try: response.fields.parse(field_str, strict=False) except ValueError: return return response
Return the HTTP header. It only attempts to read the first 4 KiB of the payload. Returns: Response, None: Returns an instance of :class:`.http.request.Response` or None.
def setup(self, app): for other in app.plugins: if not isinstance(other, SQLAlchemyPlugin): continue if other.keyword == self.keyword: raise bottle.PluginError("Found another SQLAlchemy plugin with "\ "conflicting settings (non-unique keyword).") elif other.name == self.name: self.name += '_%s' % self.keyword if self.create and not self.metadata: raise bottle.PluginError('Define metadata value to create database.')
Make sure that other installed plugins don't affect the same keyword argument and check if metadata is available.
def _rename_duplicate_tabs(self, current, name, path): for i in range(self.count()): if self.widget(i)._tab_name == name and self.widget(i) != current: file_path = self.widget(i).file.path if file_path: parent_dir = os.path.split(os.path.abspath( os.path.join(file_path, os.pardir)))[1] new_name = os.path.join(parent_dir, name) self.setTabText(i, new_name) self.widget(i)._tab_name = new_name break if path: parent_dir = os.path.split(os.path.abspath( os.path.join(path, os.pardir)))[1] return os.path.join(parent_dir, name) else: return name
Rename tabs whose title is the same as the name
def _parse_q2r(self, f): natom, dim, epsilon, borns = self._parse_parameters(f) fc_dct = {'fc': self._parse_fc(f, natom, dim), 'dimension': dim, 'dielectric': epsilon, 'born': borns} return fc_dct
Parse q2r output file The format of q2r output is described at the mailing list below: http://www.democritos.it/pipermail/pw_forum/2005-April/002408.html http://www.democritos.it/pipermail/pw_forum/2008-September/010099.html http://www.democritos.it/pipermail/pw_forum/2009-August/013613.html https://www.mail-archive.com/pw_forum@pwscf.org/msg24388.html
def image_scale(xscale=1.0, yscale=1.0, axes="gca"): if axes == "gca": axes = _pylab.gca() e = axes.images[0].get_extent() x1 = e[0]*xscale x2 = e[1]*xscale y1 = e[2]*yscale y2 = e[3]*yscale image_set_extent([x1,x2],[y1,y2], axes)
Scales the image extent.
def compute_ng_stat(gene_graph, pos_ct, alpha=.5): if not len(pos_ct): return 1.0, 0 max_pos = max(gene_graph) codon_vals = np.zeros(max_pos+1) for pos in pos_ct: mut_count = pos_ct[pos] neighbors = list(gene_graph[pos]) num_neighbors = len(neighbors) codon_vals[neighbors] += alpha*mut_count codon_vals[pos] += (1-alpha)*mut_count p = codon_vals / np.sum(codon_vals) graph_score = mymath.shannon_entropy(p) coverage = np.count_nonzero(p) return graph_score, coverage
Compute the clustering score for the gene on its neighbor graph. Parameters ---------- gene_graph : dict Graph of spatially near codons. keys = nodes, edges = key -> value. pos_ct : dict missense mutation count for each codon alpha : float smoothing factor Returns ------- graph_score : float score measuring the clustering of missense mutations in the graph coverage : int number of nodes that received non-zero weight
def mass1_from_mass2_eta(mass2, eta, force_real=True): return mass_from_knownmass_eta(mass2, eta, known_is_secondary=True, force_real=force_real)
Returns the primary mass from the secondary mass and symmetric mass ratio.
def build_signature_template(key_id, algorithm, headers): param_map = {'keyId': key_id, 'algorithm': algorithm, 'signature': '%s'} if headers: headers = [h.lower() for h in headers] param_map['headers'] = ' '.join(headers) kv = map('{0[0]}="{0[1]}"'.format, param_map.items()) kv_string = ','.join(kv) sig_string = 'Signature {0}'.format(kv_string) return sig_string
Build the Signature template for use with the Authorization header. key_id is the mandatory label indicating to the server which secret to use algorithm is one of the supported algorithms headers is a list of http headers to be included in the signing string. The signature must be interpolated into the template to get the final Authorization header value.
def bestfit_func(self, bestfit_x): bestfit_x = np.array(bestfit_x) if not self.done_bestfit: raise KeyError("Do do_bestfit first") bestfit_y = 0 for idx, val in enumerate(self.fit_args): bestfit_y += val * (bestfit_x ** (self.args.get("degree", 1) - idx)) return bestfit_y
Returns bestfit_y value args: bestfit_x: scalar, array_like x value return: scalar, array_like bestfit y value
def remove_numbers(text_string): if text_string is None or text_string == "": return "" elif isinstance(text_string, str): return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split()) else: raise InputError("string not passed as argument")
Removes any digit value discovered within text_string and returns the new string as type str. Keyword argument: - text_string: string instance Exceptions raised: - InputError: occurs should a non-string argument be passed
def get_context_and_attention_probs(values: mx.sym.Symbol, length: mx.sym.Symbol, logits: mx.sym.Symbol, dtype: str) -> Tuple[mx.sym.Symbol, mx.sym.Symbol]: logits = mx.sym.SequenceMask(data=logits, axis=1, use_sequence_length=True, sequence_length=length, value=-C.LARGE_VALUES[dtype]) probs = mx.sym.softmax(logits, axis=1, name='attention_softmax') context = mx.sym.batch_dot(lhs=values, rhs=probs, transpose_a=True) context = mx.sym.reshape(data=context, shape=(0, 0)) probs = mx.sym.reshape(data=probs, shape=(0, 0)) return context, probs
Returns context vector and attention probabilities via a weighted sum over values. :param values: Shape: (batch_size, seq_len, encoder_num_hidden). :param length: Shape: (batch_size,). :param logits: Shape: (batch_size, seq_len, 1). :param dtype: data type. :return: context: (batch_size, encoder_num_hidden), attention_probs: (batch_size, seq_len).
def id_to_piece(input, model_file=None, model_proto=None, name=None): return _gen_sentencepiece_processor_op.sentencepiece_id_to_piece( input, model_file=model_file, model_proto=model_proto, name=name)
Converts vocabulary id into piece. Args: input: An arbitrary tensor of int32. model_file: The sentencepiece model file path. model_proto: The sentencepiece model serialized proto. Either `model_file` or `model_proto` must be set. name: The name argument that is passed to the op function. Returns: A tensor of string with the same shape as input.
def plot_scales(self, titles=None, fig_kwargs={}, **kwargs): from ..plotting import plotting_library as pl if titles is None: titles = [r'${}$'.format(name) for name in self.names] M = len(self.bgplvms) fig = pl().figure(rows=1, cols=M, **fig_kwargs) for c in range(M): canvas = self.bgplvms[c].kern.plot_ARD(title=titles[c], figure=fig, col=c+1, **kwargs) return canvas
Plot input sensitivity for all datasets, to see which input dimensions are significant for which dataset. :param titles: titles for axes of datasets kwargs go into plot_ARD for each kernel.
def coefficients(self): vector = self.get_parameter_vector(include_frozen=True) pars = self.get_all_coefficients(vector) if len(pars) != 6: raise ValueError("there must be 6 coefficient blocks") if any(len(p.shape) != 1 for p in pars): raise ValueError("coefficient blocks must be 1D") if len(pars[0]) != len(pars[1]): raise ValueError("coefficient blocks must have the same shape") if any(len(pars[2]) != len(p) for p in pars[3:]): raise ValueError("coefficient blocks must have the same shape") return pars
All of the coefficient arrays This property is the concatenation of the results from :func:`terms.Term.get_real_coefficients` and :func:`terms.Term.get_complex_coefficients` but it will always return a tuple of length 6, even if ``alpha_complex_imag`` was omitted from ``get_complex_coefficients``. Returns: (array[j_real], array[j_real], array[j_complex], array[j_complex], array[j_complex], array[j_complex]): ``alpha_real``, ``beta_real``, ``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``, and ``beta_complex_imag`` as described above. Raises: ValueError: For invalid dimensions for the coefficients.
def like(self): self.requester.post( '/{endpoint}/{id}/like', endpoint=self.endpoint, id=self.id ) return self
Like the project
def _last_stack_str(): stack = extract_stack() for s in stack[::-1]: if op.join('vispy', 'gloo', 'buffer.py') not in __file__: break return format_list([s])[0]
Print stack trace from call that didn't originate from here
def get_single(decl_matcher, decls, recursive=True): answer = matcher.find(decl_matcher, decls, recursive) if len(answer) == 1: return answer[0] elif not answer: raise runtime_errors.declaration_not_found_t(decl_matcher) else: raise runtime_errors.multiple_declarations_found_t(decl_matcher)
Returns a reference to declaration, that match `decl_matcher` defined criteria. If a unique declaration could not be found, an appropriate exception will be raised. :param decl_matcher: Python callable object, that takes one argument - reference to a declaration :param decls: the search scope, :class:declaration_t object or :class:declaration_t objects list t :param recursive: boolean, if True, the method will run `decl_matcher` on the internal declarations too
def get_static(self, _, file_name=None): content_type = { 'ss': 'text/css', 'js': 'application/javascript', }.get(file_name[-2:]) if not content_type: raise HttpError(HTTPStatus.NOT_FOUND, 42) return HttpResponse(self.load_static(file_name), headers={ 'Content-Type': content_type, 'Content-Encoding': 'gzip', 'Cache-Control': 'public, max-age=300', })
Get static content for UI.
def decode_str(s, free=False): try: if s.len == 0: return u"" return ffi.unpack(s.data, s.len).decode("utf-8", "replace") finally: if free: lib.semaphore_str_free(ffi.addressof(s))
Decodes a SymbolicStr
def select_from_clusters(cluster_dict, measure_vect): out_dict = {} for idx_key, idx_list in cluster_dict.items(): out_idx, out_list = select_from_cluster( idx_key, idx_list, measure_vect) out_dict[out_idx] = out_list return out_dict
Select a single source from each cluster and make it the new cluster key cluster_dict : dict(int:[int,]) A dictionary of clusters. Each cluster is a source index and the list of other source in the cluster. measure_vect : np.narray((nsrc),float) vector of the measure used to select the best source in the cluster returns dict(int:[int,...]) New dictionary of clusters keyed by the best source in each cluster
def get_cluster_plan(self): _log.info('Fetching current cluster-topology from Zookeeper...') cluster_layout = self.get_topics(fetch_partition_state=False) partitions = [ { 'topic': topic_id, 'partition': int(p_id), 'replicas': partitions_data['replicas'] } for topic_id, topic_info in six.iteritems(cluster_layout) for p_id, partitions_data in six.iteritems(topic_info['partitions']) ] return { 'version': 1, 'partitions': partitions }
Fetch cluster plan from zookeeper.
def fill_buffer(heap_data, i_chan): now = datetime.datetime.utcnow() time_full = now.timestamp() time_count = int(time_full) time_fraction = int((time_full - time_count) * (2**32 - 1)) diff = now - (now.replace(hour=0, minute=0, second=0, microsecond=0)) time_data = diff.seconds + 1e-6 * diff.microseconds heap_data['visibility_timestamp_count'] = time_count heap_data['visibility_timestamp_fraction'] = time_fraction heap_data['correlator_output_data']['VIS'][:][:] = \ time_data + i_chan * 1j
Blocking function to populate data in the heap. This is run in an executor.
def all_to_public(self): if "private" not in self.modifiers: def public_collection(attribute): for key in self.collection(attribute): if key not in self.publics: self.publics[key.lower()] = 1 public_collection("members") public_collection("types") public_collection("executables")
Sets all members, types and executables in this module as public as long as it doesn't already have the 'private' modifier.
def returns(schema): validate = parse(schema).validate @decorator def validating(func, *args, **kwargs): ret = func(*args, **kwargs) validate(ret, adapt=False) return ret return validating
Create a decorator for validating function return value. Example:: @accepts(a=int, b=int) @returns(int) def f(a, b): return a + b :param schema: The schema for adapting a given parameter.
def load_json(json_object): content = None if isinstance(json_object, str) and os.path.exists(json_object): with open_(json_object) as f: try: content = json.load(f) except Exception as e: debug.log("Warning: Content of '%s' file is not json."%f.name) elif hasattr(json_object, 'read'): try: content = json.load(json_object) except Exception as e: debug.log("Warning: Content of '%s' file is not json."%json_object.name) else: debug.log("%s\nWarning: Object type invalid!"%json_object) return content
Load json from file or file name
def get_signature_request(self, signature_request_id, ux_version=None): request = self._get_request() parameters = None if ux_version is not None: parameters = { 'ux_version': ux_version } return request.get(self.SIGNATURE_REQUEST_INFO_URL + signature_request_id, parameters=parameters)
Get a signature request by its ID Args: signature_request_id (str): The id of the SignatureRequest to retrieve ux_version (int): UX version, either 1 (default) or 2. Returns: A SignatureRequest object
def find(self, groupid): return self.indices[self.offset[groupid] :self.offset[groupid]+ self.length[groupid]]
return all of the indices of particles of groupid
def gen_part_from_line(lines: Iterable[str], part_index: int, splitter: str = None) -> Generator[str, None, None]: for line in lines: parts = line.split(splitter) yield parts[part_index]
Splits lines with ``splitter`` and yields a specified part by index. Args: lines: iterable of strings part_index: index of part to yield splitter: string to split the lines on Yields: the specified part for each line
def switchCurrentView(self, viewType): if not self.count(): return self.addView(viewType) view = self.currentView() if type(view) == viewType: return view self.blockSignals(True) self.setUpdatesEnabled(False) index = self.indexOf(view) if not view.close(): return None index = self.currentIndex() new_view = viewType.createInstance(self.viewWidget(), self.viewWidget()) self.insertTab(index, new_view, new_view.windowTitle()) self.blockSignals(False) self.setUpdatesEnabled(True) self.setCurrentIndex(index) return new_view
Swaps the current tab view for the inputed action's type. :param action | <QAction> :return <XView> || None