positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def getMibSymbol(self): """Returns MIB variable symbolic identification. Returns ------- str MIB module name str MIB variable symbolic name : :py:class:`~pysnmp.proto.rfc1902.ObjectName` class instance representing MIB variable instance index. Raises ------ SmiError If MIB variable conversion has not been performed. Examples -------- >>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0') >>> objectIdentity.resolveWithMib(mibViewController) >>> objectIdentity.getMibSymbol() ('SNMPv2-MIB', 'sysDescr', (0,)) >>> """ if self._state & self.ST_CLEAN: return self._modName, self._symName, self._indices else: raise SmiError( '%s object not fully initialized' % self.__class__.__name__)
Returns MIB variable symbolic identification. Returns ------- str MIB module name str MIB variable symbolic name : :py:class:`~pysnmp.proto.rfc1902.ObjectName` class instance representing MIB variable instance index. Raises ------ SmiError If MIB variable conversion has not been performed. Examples -------- >>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0') >>> objectIdentity.resolveWithMib(mibViewController) >>> objectIdentity.getMibSymbol() ('SNMPv2-MIB', 'sysDescr', (0,)) >>>
def fvga(a, i, g, n): """ This function is for the future value of an annuity with growth rate. It is the future value of a growing stream of periodic investments. a = Periodic Investment (1000) i = interest rate as decimal (.0675) g = the growth rate (.05) n = the number of compound periods (20) Example: fv(1000, .0675, .05, 20) """ return a * ((((1 + i) ** n) - (((1 + g) ** n)))/(i - g))
This function is for the future value of an annuity with growth rate. It is the future value of a growing stream of periodic investments. a = Periodic Investment (1000) i = interest rate as decimal (.0675) g = the growth rate (.05) n = the number of compound periods (20) Example: fv(1000, .0675, .05, 20)
def idle_task(self): '''handle missing parameters''' self.check_new_target_system() sysid = self.get_sysid() self.pstate[sysid].vehicle_name = self.vehicle_name self.pstate[sysid].fetch_check(self.master)
handle missing parameters
def dict_subset(dict_, keys, default=util_const.NoParam): r""" Args: dict_ (dict): keys (list): Returns: dict: subset dictionary Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1} >>> keys = ['K', 'dcvs_clip_max'] >>> d = tuple([]) >>> subdict_ = dict_subset(dict_, keys) >>> result = ut.repr4(subdict_, sorted_=True, newlines=False) >>> print(result) {'K': 3, 'dcvs_clip_max': 0.2} """ if default is util_const.NoParam: items = dict_take(dict_, keys) else: items = dict_take(dict_, keys, default) subdict_ = OrderedDict(list(zip(keys, items))) #item_sublist = [(key, dict_[key]) for key in keys] ##subdict_ = type(dict_)(item_sublist) # maintain old dict format #subdict_ = OrderedDict(item_sublist) return subdict_
r""" Args: dict_ (dict): keys (list): Returns: dict: subset dictionary Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1} >>> keys = ['K', 'dcvs_clip_max'] >>> d = tuple([]) >>> subdict_ = dict_subset(dict_, keys) >>> result = ut.repr4(subdict_, sorted_=True, newlines=False) >>> print(result) {'K': 3, 'dcvs_clip_max': 0.2}
def fix_germline_samplename(in_file, sample_name, data): """Replace germline sample names, originally from normal BAM file. """ out_file = "%s-fixnames%s" % utils.splitext_plus(in_file) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: sample_file = "%s-samples.txt" % utils.splitext_plus(tx_out_file)[0] with open(sample_file, "w") as out_handle: out_handle.write("%s\n" % sample_name) cmd = ("bcftools reheader -s {sample_file} {in_file} -o {tx_out_file}") do.run(cmd.format(**locals()), "Fix germline samplename: %s" % sample_name) return vcfutils.bgzip_and_index(out_file, data["config"])
Replace germline sample names, originally from normal BAM file.
def group_by(self, *args): """ This method lets you specify the grouping fields explicitly. The `args` must be names of grouping fields or calculated fields that this queryset was created with. """ for name in args: assert name in self._fields or name in self._calculated_fields, \ 'Cannot group by `%s` since it is not included in the query' % name qs = copy(self) qs._grouping_fields = args return qs
This method lets you specify the grouping fields explicitly. The `args` must be names of grouping fields or calculated fields that this queryset was created with.
def face_encodings(face_image, known_face_locations=None, num_jitters=1): """ Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image) """ raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small") return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image)
async def xack(self, name: str, group: str, stream_id: str) -> int: """ [NOTICE] Not officially released yet XACK is the command that allows a consumer to mark a pending message as correctly processed. :param name: name of the stream :param group: name of the consumer group :param stream_id: id of the entry the consumer wants to mark :return: number of entry marked """ return await self.execute_command('XACK', name, group, stream_id)
[NOTICE] Not officially released yet XACK is the command that allows a consumer to mark a pending message as correctly processed. :param name: name of the stream :param group: name of the consumer group :param stream_id: id of the entry the consumer wants to mark :return: number of entry marked
def _set_mpls_adjust_bandwidth_lsp(self, v, load=False): """ Setter method for mpls_adjust_bandwidth_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_adjust_bandwidth_lsp (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_mpls_adjust_bandwidth_lsp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mpls_adjust_bandwidth_lsp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=mpls_adjust_bandwidth_lsp.mpls_adjust_bandwidth_lsp, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp", rest_name="mpls-adjust-bandwidth-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mpls_adjust_bandwidth_lsp must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=mpls_adjust_bandwidth_lsp.mpls_adjust_bandwidth_lsp, is_leaf=True, yang_name="mpls-adjust-bandwidth-lsp", rest_name="mpls-adjust-bandwidth-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'mplsAdjustBandwidth'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__mpls_adjust_bandwidth_lsp = t if hasattr(self, '_set'): self._set()
Setter method for mpls_adjust_bandwidth_lsp, mapped from YANG variable /brocade_mpls_rpc/mpls_adjust_bandwidth_lsp (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_mpls_adjust_bandwidth_lsp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mpls_adjust_bandwidth_lsp() directly.
def update(self): """Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one. """ if self.updated: return 0 res = super(BipartiteGraph, self).update() self.updated = True return res
Update reviewers' anomalous scores and products' summaries. Returns: maximum absolute difference between old summary and new one, and old anomalous score and new one.
def je(self): r"""Execute operations, returns a string ( '' if the result is None, join=''). This works like :attr:`j` except it returns an empty string if the execution result is None. Examples: >>> echo(None).je '' """ text = self._process() return self.make_string(text,join_str='',return_if_none='')
r"""Execute operations, returns a string ( '' if the result is None, join=''). This works like :attr:`j` except it returns an empty string if the execution result is None. Examples: >>> echo(None).je ''
def verify(path): """Verify that `path` is a zip file with Phasics TIFF files""" valid = False try: zf = zipfile.ZipFile(path) except (zipfile.BadZipfile, IsADirectoryError): pass else: names = sorted(zf.namelist()) names = [nn for nn in names if nn.endswith(".tif")] names = [nn for nn in names if nn.startswith("SID PHA")] for name in names: with zf.open(name) as pt: fd = io.BytesIO(pt.read()) if SingleTifPhasics.verify(fd): valid = True break zf.close() return valid
Verify that `path` is a zip file with Phasics TIFF files
def astype(self, dtype): """Return a copy of this element with new ``dtype``. Parameters ---------- dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. Data types with non-trivial shapes are not allowed. Returns ------- newelem : `DiscretizedSpaceElement` Version of this element with given data type. """ return self.space.astype(dtype).element(self.tensor.astype(dtype))
Return a copy of this element with new ``dtype``. Parameters ---------- dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. Data types with non-trivial shapes are not allowed. Returns ------- newelem : `DiscretizedSpaceElement` Version of this element with given data type.
def load_subcommand(subparsers): """Load this subcommand """ parser_analyze = subparsers.add_parser('analyze', help='Analyze uwsgi log to get report') parser_analyze.add_argument('-f', '--filepath', type=argparse.FileType('r'), dest='filepath', help='Path of uwsgi log file', required=True) parser_analyze.add_argument('--output', dest="output", type=argparse.FileType('w'), default=sys.stdout, help='HTML report file path') parser_analyze.add_argument('--min-msecs', dest="min_msecs", type=int, default=200, help='Request serve time lower than this value will not be counted, default: 200') parser_analyze.add_argument('--domain', dest="domain", type=str, required=False, help='Make url in report become a hyper-link by settings a domain') parser_analyze.add_argument('--url-file', dest="url_file", type=argparse.FileType('r'), required=False, help='Customized url rules in regular expression') parser_analyze.add_argument('--limit-url-groups', dest="limit_url_groups", type=int, required=False, default=LIMIT_URL_GROUPS, help='Number of url groups considered, default: 200') parser_analyze.add_argument('--limit-per-url-group', dest="limit_per_url_group", type=int, required=False, default=LIMIT_PER_URL_GROUP, help='Number of urls per group considered, default: 20') parser_analyze.set_defaults(func=analyze)
Load this subcommand
def meta_wrapped(f): """ Add a field label, errors, and a description (if it exists) to a field. """ @wraps(f) def wrapped(self, field, *args, **kwargs): html = "{label}{errors}{original}<small>{description}</small>".format( label=field.label(class_='control-label'), original=f(self, field, *args, **kwargs), errors=render_field_errors(field) or '', description=render_field_description(field) ) return HTMLString(html) return wrapped
Add a field label, errors, and a description (if it exists) to a field.
def _md5_compute(self, f): ''' Computes the checksum of the file ''' md5 = hashlib.md5() block_size = 16384 f.seek(0, 2) remaining = f.tell() f.seek(0) while (remaining > block_size): data = f.read(block_size) remaining = remaining - block_size md5.update(data) if remaining > 0: data = f.read(remaining) md5.update(data) return md5.digest()
Computes the checksum of the file
def choice(*es): """ Create a PEG function to match an ordered choice. """ msg = 'Expected one of: {}'.format(', '.join(map(repr, es))) def match_choice(s, grm=None, pos=0): errs = [] for e in es: try: return e(s, grm, pos) except PegreError as ex: errs.append((ex.message, ex.position)) if errs: raise PegreChoiceError(errs, pos) return match_choice
Create a PEG function to match an ordered choice.
def fcontext_add_or_delete_policy(action, name, filetype=None, sel_type=None, sel_user=None, sel_level=None): ''' .. versionadded:: 2017.7.0 Adds or deletes the SELinux policy for a given filespec and other optional parameters. Returns the result of the call to semanage. Note that you don't have to remove an entry before setting a new one for a given filespec and filetype, as adding one with semanage automatically overwrites a previously configured SELinux context. .. warning:: Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`, or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`. .. deprecated:: 2019.2.0 action The action to perform. Either ``add`` or ``delete``. name filespec of the file or directory. Regex syntax is allowed. file_type The SELinux filetype specification. Use one of [a, f, d, c, b, s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a' (all files). sel_type SELinux context type. There are many. sel_user SELinux user. Use ``semanage login -l`` to determine which ones are available to you. sel_level The MLS range of the SELinux context. CLI Example: .. code-block:: bash salt '*' selinux.fcontext_add_or_delete_policy add my-policy ''' salt.utils.versions.warn_until( 'Sodium', 'The \'selinux.fcontext_add_or_delete_policy\' module has been deprecated. Please use the ' '\'selinux.fcontext_add_policy\' and \'selinux.fcontext_delete_policy\' modules instead. ' 'Support for the \'selinux.fcontext_add_or_delete_policy\' module will be removed in Salt ' '{version}.' ) return _fcontext_add_or_delete_policy(action, name, filetype, sel_type, sel_user, sel_level)
.. versionadded:: 2017.7.0 Adds or deletes the SELinux policy for a given filespec and other optional parameters. Returns the result of the call to semanage. Note that you don't have to remove an entry before setting a new one for a given filespec and filetype, as adding one with semanage automatically overwrites a previously configured SELinux context. .. warning:: Use :mod:`selinux.fcontext_add_policy()<salt.modules.selinux.fcontext_add_policy>`, or :mod:`selinux.fcontext_delete_policy()<salt.modules.selinux.fcontext_delete_policy>`. .. deprecated:: 2019.2.0 action The action to perform. Either ``add`` or ``delete``. name filespec of the file or directory. Regex syntax is allowed. file_type The SELinux filetype specification. Use one of [a, f, d, c, b, s, l, p]. See also ``man semanage-fcontext``. Defaults to 'a' (all files). sel_type SELinux context type. There are many. sel_user SELinux user. Use ``semanage login -l`` to determine which ones are available to you. sel_level The MLS range of the SELinux context. CLI Example: .. code-block:: bash salt '*' selinux.fcontext_add_or_delete_policy add my-policy
def compile_temp(d, key, value): """ Compiles temporary dictionaries for metadata. Adds a new entry to an existing dictionary. :param dict d: :param str key: :param any value: :return dict: """ if not value: d[key] = None elif len(value) == 1: d[key] = value[0] else: d[key] = value return d
Compiles temporary dictionaries for metadata. Adds a new entry to an existing dictionary. :param dict d: :param str key: :param any value: :return dict:
def wait(self, num_slaves, timeout=0): """his command blocks the current client until all the previous write commands are successfully transferred and acknowledged by at least the specified number of slaves. If the timeout, specified in milliseconds, is reached, the command returns even if the specified number of slaves were not yet reached. The command will always return the number of slaves that acknowledged the write commands sent before the :meth:`~tredis.RedisClient.wait` command, both in the case where the specified number of slaves are reached, or when the timeout is reached. .. note:: **Time complexity**: ``O(1)`` :param int num_slaves: Number of slaves to acknowledge previous writes :param int timeout: Timeout in milliseconds :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ command = [ b'WAIT', ascii(num_slaves).encode('ascii'), ascii(timeout).encode('ascii') ] return self._execute(command)
his command blocks the current client until all the previous write commands are successfully transferred and acknowledged by at least the specified number of slaves. If the timeout, specified in milliseconds, is reached, the command returns even if the specified number of slaves were not yet reached. The command will always return the number of slaves that acknowledged the write commands sent before the :meth:`~tredis.RedisClient.wait` command, both in the case where the specified number of slaves are reached, or when the timeout is reached. .. note:: **Time complexity**: ``O(1)`` :param int num_slaves: Number of slaves to acknowledge previous writes :param int timeout: Timeout in milliseconds :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
def verify_env( dirs, user, permissive=False, pki_dir='', skip_extra=False, root_dir=ROOT_DIR): ''' Verify that the named directories are in place and that the environment can shake the salt ''' if salt.utils.platform.is_windows(): return win_verify_env(root_dir, dirs, permissive=permissive, skip_extra=skip_extra) import pwd # after confirming not running Windows try: pwnam = pwd.getpwnam(user) uid = pwnam[2] gid = pwnam[3] groups = salt.utils.user.get_gid_list(user, include_default=False) except KeyError: err = ('Failed to prepare the Salt environment for user ' '{0}. The user is not available.\n').format(user) sys.stderr.write(err) sys.exit(salt.defaults.exitcodes.EX_NOUSER) for dir_ in dirs: if not dir_: continue if not os.path.isdir(dir_): try: with salt.utils.files.set_umask(0o022): os.makedirs(dir_) # If starting the process as root, chown the new dirs if os.getuid() == 0: os.chown(dir_, uid, gid) except OSError as err: msg = 'Failed to create directory path "{0}" - {1}\n' sys.stderr.write(msg.format(dir_, err)) sys.exit(err.errno) mode = os.stat(dir_) # If starting the process as root, chown the new dirs if os.getuid() == 0: fmode = os.stat(dir_) if fmode.st_uid != uid or fmode.st_gid != gid: if permissive and fmode.st_gid in groups: # Allow the directory to be owned by any group root # belongs to if we say it's ok to be permissive pass else: # chown the file for the new user os.chown(dir_, uid, gid) for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]: fsubdir = os.path.join(dir_, subdir) if '{0}jobs'.format(os.path.sep) in fsubdir: continue for root, dirs, files in salt.utils.path.os_walk(fsubdir): for name in files: if name.startswith('.'): continue path = os.path.join(root, name) try: fmode = os.stat(path) except (IOError, OSError): pass if fmode.st_uid != uid or fmode.st_gid != gid: if permissive and fmode.st_gid in groups: pass else: # chown the file for the new user os.chown(path, uid, gid) for name in dirs: path = os.path.join(root, name) fmode = os.stat(path) if fmode.st_uid != uid or fmode.st_gid != gid: if permissive and fmode.st_gid in groups: pass else: # chown the file for the new user os.chown(path, uid, gid) # Allow the pki dir to be 700 or 750, but nothing else. # This prevents other users from writing out keys, while # allowing the use-case of 3rd-party software (like django) # to read in what it needs to integrate. # # If the permissions aren't correct, default to the more secure 700. # If acls are enabled, the pki_dir needs to remain readable, this # is still secure because the private keys are still only readable # by the user running the master if dir_ == pki_dir: smode = stat.S_IMODE(mode.st_mode) if smode != 448 and smode != 488: if os.access(dir_, os.W_OK): os.chmod(dir_, 448) else: msg = 'Unable to securely set the permissions of "{0}".' msg = msg.format(dir_) if is_console_configured(): log.critical(msg) else: sys.stderr.write("CRITICAL: {0}\n".format(msg)) if skip_extra is False: # Run the extra verification checks zmq_version()
Verify that the named directories are in place and that the environment can shake the salt
def _friends_bootstrap_radius(args): """Internal method used to compute the radius (half-side-length) for each ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using bootstrapping.""" # Unzipping. points, ftype = args rstate = np.random # Resampling. npoints, ndim = points.shape idxs = rstate.randint(npoints, size=npoints) # resample idx_in = np.unique(idxs) # selected objects sel = np.ones(npoints, dtype='bool') sel[idx_in] = False idx_out = np.where(sel)[0] # "missing" objects if len(idx_out) < 2: # edge case idx_out = np.append(idx_out, [0, 1]) points_in, points_out = points[idx_in], points[idx_out] # Construct KDTree to enable quick nearest-neighbor lookup for # our resampled objects. kdtree = spatial.KDTree(points_in) if ftype == 'balls': # Compute distances from our "missing" points its closest neighbor # among the resampled points using the Euclidean norm # (i.e. "radius" of n-sphere). dists, ids = kdtree.query(points_out, k=1, eps=0, p=2) elif ftype == 'cubes': # Compute distances from our "missing" points its closest neighbor # among the resampled points using the Euclidean norm # (i.e. "half-side-length" of n-cube). dists, ids = kdtree.query(points_out, k=1, eps=0, p=np.inf) # Conservative upper-bound on radius. dist = max(dists) return dist
Internal method used to compute the radius (half-side-length) for each ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using bootstrapping.
def convert_camel_case_to_snake_case(name): """Convert CamelCase to snake_case.""" s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Convert CamelCase to snake_case.
def get_redis(**kwargs): """Returns a redis client instance. Parameters ---------- redis_cls : class, optional Defaults to ``redis.StrictRedis``. url : str, optional If given, ``redis_cls.from_url`` is used to instantiate the class. **kwargs Extra parameters to be passed to the ``redis_cls`` class. Returns ------- server Redis client instance. """ redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS) url = kwargs.pop('url', None) if url: return redis_cls.from_url(url, **kwargs) else: return redis_cls(**kwargs)
Returns a redis client instance. Parameters ---------- redis_cls : class, optional Defaults to ``redis.StrictRedis``. url : str, optional If given, ``redis_cls.from_url`` is used to instantiate the class. **kwargs Extra parameters to be passed to the ``redis_cls`` class. Returns ------- server Redis client instance.
def close(self): """Closes out the stream.""" _LOGGER.debug("Closing stream") if not hasattr(self, "footer"): raise SerializationError("Footer not read") super(StreamDecryptor, self).close()
Closes out the stream.
def has(self, querypart_name, value=None): """Returns True if `querypart_name` with `value` is set. For example you can check if you already used condition by `sql.has('where')`. If you want to check for more information, for example if that condition also contain ID, you can do this by `sql.has('where', 'id')`. """ querypart = self._queryparts.get(querypart_name) if not querypart: return False if not querypart.is_set: return False if value: return querypart.has(value) return True
Returns True if `querypart_name` with `value` is set. For example you can check if you already used condition by `sql.has('where')`. If you want to check for more information, for example if that condition also contain ID, you can do this by `sql.has('where', 'id')`.
async def runItemCmdr(item, outp=None, **opts): ''' Create a cmdr for the given item and run the cmd loop. Example: runItemCmdr(foo) ''' cmdr = await getItemCmdr(item, outp=outp, **opts) await cmdr.runCmdLoop()
Create a cmdr for the given item and run the cmd loop. Example: runItemCmdr(foo)
def _make_options(x): """Standardize the options tuple format. The returned tuple should be in the format (('label', value), ('label', value), ...). The input can be * an iterable of (label, value) pairs * an iterable of values, and labels will be generated """ # Check if x is a mapping of labels to values if isinstance(x, Mapping): import warnings warnings.warn("Support for mapping types has been deprecated and will be dropped in a future release.", DeprecationWarning) return tuple((unicode_type(k), v) for k, v in x.items()) # only iterate once through the options. xlist = tuple(x) # Check if x is an iterable of (label, value) pairs if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist): return tuple((unicode_type(k), v) for k, v in xlist) # Otherwise, assume x is an iterable of values return tuple((unicode_type(i), i) for i in xlist)
Standardize the options tuple format. The returned tuple should be in the format (('label', value), ('label', value), ...). The input can be * an iterable of (label, value) pairs * an iterable of values, and labels will be generated
def rowCount(self, index=None): """Get number of rows in the header.""" if self.axis == 0: return max(1, self._shape[0]) else: if self.total_rows <= self.rows_loaded: return self.total_rows else: return self.rows_loaded
Get number of rows in the header.
def getStrips(self, maxstrips=None): """Get comic strips.""" if maxstrips: word = u"strip" if maxstrips == 1 else "strips" msg = u'Retrieving %d %s' % (maxstrips, word) else: msg = u'Retrieving all strips' if self.indexes: if len(self.indexes) == 1: msg += u" for index %s" % self.indexes[0] else: msg += u" for indexes %s" % self.indexes # Always call starter() since it might initialize cookies. # See for example Oglaf comic. self.starter() urls = [self.getIndexStripUrl(index) for index in self.indexes] else: urls = [self.getLatestUrl()] if self.adult: msg += u" (including adult content)" out.info(msg) for url in urls: for strip in self.getStripsFor(url, maxstrips): yield strip
Get comic strips.
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False): """Evaluate on voc dataset. Args: pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields. gt_boxlists(list[BoxList]): ground truth boxlist, has labels field. iou_thresh: iou thresh use_07_metric: boolean Returns: dict represents the results """ assert len(gt_boxlists) == len( pred_boxlists ), "Length of gt and pred lists need to be same." prec, rec = calc_detection_voc_prec_rec( pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh ) ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric) return {"ap": ap, "map": np.nanmean(ap)}
Evaluate on voc dataset. Args: pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields. gt_boxlists(list[BoxList]): ground truth boxlist, has labels field. iou_thresh: iou thresh use_07_metric: boolean Returns: dict represents the results
def lock(self): """Lock the device.""" success = self.set_status(CONST.STATUS_LOCKCLOSED_INT) if success: self._json_state['status'] = CONST.STATUS_LOCKCLOSED return success
Lock the device.
def human_and_00(X, y, model_generator, method_name): """ AND (false/false) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 0 """ return _human_and(X, model_generator, method_name, False, False)
AND (false/false) This tests how well a feature attribution method agrees with human intuition for an AND operation combined with linear effects. This metric deals specifically with the question of credit allocation for the following function when all three inputs are true: if fever: +2 points if cough: +2 points if fever and cough: +6 points transform = "identity" sort_order = 0
def request_get_next(request, default_next): """ get next url form request order: POST.next GET.next HTTP_REFERER, default_next """ next_url = request.POST.get('next')\ or request.GET.get('next')\ or request.META.get('HTTP_REFERER')\ or default_next return next_url
get next url form request order: POST.next GET.next HTTP_REFERER, default_next
def _ConvertMethodType(self, methodType): """ Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method definition """ if methodType: name = methodType.name wsdlName = methodType.wsdlName version = methodType.version params = self._Filter(self._ConvertParamType, methodType.paramTypeInfo) privId = methodType.privId faults = methodType.fault # Figure out reture info if methodType.returnTypeInfo: returnTypeInfo = methodType.returnTypeInfo retFlags = self._ConvertAnnotations(returnTypeInfo.annotation) methodRetType = returnTypeInfo.type else: retFlags = 0 methodRetType = "void" if wsdlName.endswith("_Task"): # TODO: Need a seperate task return type for task, instead of # hardcode vim.Task as return type retType = "vim.Task" else: retType = methodRetType retInfo = (retFlags, retType, methodRetType) method = (name, wsdlName, version, params, retInfo, privId, faults) else: method = None return method
Convert vmodl.reflect.DynamicTypeManager.MethodTypeInfo to pyVmomi method definition
def get_env_variable(var_name, default=None): """Get the environment variable or raise exception.""" try: return os.environ[var_name] except KeyError: if default is not None: return default else: error_msg = 'The environment variable {} was missing, abort...'\ .format(var_name) raise EnvironmentError(error_msg)
Get the environment variable or raise exception.
def if_pandas_df_convert_to_numpy(obj): """Return a Numpy array from a Pandas dataframe. Iterating over a DataFrame has weird side effects, such as the first row being the column names. Converting to Numpy is more safe. """ if pd is not None and isinstance(obj, pd.DataFrame): return obj.values else: return obj
Return a Numpy array from a Pandas dataframe. Iterating over a DataFrame has weird side effects, such as the first row being the column names. Converting to Numpy is more safe.
def getTargetNamespace(self): """return targetNamespace """ parent = self targetNamespace = 'targetNamespace' tns = self.attributes.get(targetNamespace) while not tns and parent and parent._parent is not None: parent = parent._parent() tns = parent.attributes.get(targetNamespace) return tns or ''
return targetNamespace
def flash(path_to_python=None, paths_to_microbits=None, path_to_runtime=None, python_script=None, minify=False): """ Given a path to or source of a Python file will attempt to create a hex file and then flash it onto the referenced BBC micro:bit. If the path_to_python & python_script are unspecified it will simply flash the unmodified MicroPython runtime onto the device. If used, the python_script argument should be a bytes object representing a UTF-8 encoded string. For example:: script = "from microbit import *\\ndisplay.scroll('Hello, World!')" uflash.flash(python_script=script.encode('utf-8')) If paths_to_microbits is unspecified it will attempt to find the device's path on the filesystem automatically. If the path_to_runtime is unspecified it will use the built in version of the MicroPython runtime. This feature is useful if a custom build of MicroPython is available. If the automatic discovery fails, then it will raise an IOError. """ # Check for the correct version of Python. if not ((sys.version_info[0] == 3 and sys.version_info[1] >= 3) or (sys.version_info[0] == 2 and sys.version_info[1] >= 7)): raise RuntimeError('Will only run on Python 2.7, or 3.3 and later.') # Grab the Python script (if needed). python_hex = '' if path_to_python: if not path_to_python.endswith('.py'): raise ValueError('Python files must end in ".py".') with open(path_to_python, 'rb') as python_script: python_hex = hexlify(python_script.read(), minify) elif python_script: python_hex = hexlify(python_script, minify) runtime = _RUNTIME # Load the hex for the runtime. if path_to_runtime: with open(path_to_runtime) as runtime_file: runtime = runtime_file.read() # Generate the resulting hex file. micropython_hex = embed_hex(runtime, python_hex) # Find the micro:bit. if not paths_to_microbits: found_microbit = find_microbit() if found_microbit: paths_to_microbits = [found_microbit] # Attempt to write the hex file to the micro:bit. if paths_to_microbits: for path in paths_to_microbits: hex_path = os.path.join(path, 'micropython.hex') print('Flashing Python to: {}'.format(hex_path)) save_hex(micropython_hex, hex_path) else: raise IOError('Unable to find micro:bit. Is it plugged in?')
Given a path to or source of a Python file will attempt to create a hex file and then flash it onto the referenced BBC micro:bit. If the path_to_python & python_script are unspecified it will simply flash the unmodified MicroPython runtime onto the device. If used, the python_script argument should be a bytes object representing a UTF-8 encoded string. For example:: script = "from microbit import *\\ndisplay.scroll('Hello, World!')" uflash.flash(python_script=script.encode('utf-8')) If paths_to_microbits is unspecified it will attempt to find the device's path on the filesystem automatically. If the path_to_runtime is unspecified it will use the built in version of the MicroPython runtime. This feature is useful if a custom build of MicroPython is available. If the automatic discovery fails, then it will raise an IOError.
def create_geometry(self, input_geometry, dip, upper_depth, lower_depth, mesh_spacing=1.0): ''' If geometry is defined as a numpy array then create instance of nhlib.geo.line.Line class, otherwise if already instance of class accept class :param input_geometry: Trace (line) of the fault source as either i) instance of nhlib.geo.line.Line class ii) numpy.ndarray [Longitude, Latitude] :param float dip: Dip of fault surface (in degrees) :param float upper_depth: Upper seismogenic depth (km) :param float lower_depth: Lower seismogenic depth (km) :param float mesh_spacing: Spacing of the fault mesh (km) {default = 1.0} ''' assert((dip > 0.) and (dip <= 90.)) self.dip = dip self._check_seismogenic_depths(upper_depth, lower_depth) if not isinstance(input_geometry, Line): if not isinstance(input_geometry, np.ndarray): raise ValueError('Unrecognised or unsupported geometry ' 'definition') else: self.fault_trace = Line([Point(row[0], row[1]) for row in input_geometry]) else: self.fault_trace = input_geometry # Build fault surface self.geometry = SimpleFaultSurface.from_fault_data(self.fault_trace, self.upper_depth, self.lower_depth, self.dip, mesh_spacing)
If geometry is defined as a numpy array then create instance of nhlib.geo.line.Line class, otherwise if already instance of class accept class :param input_geometry: Trace (line) of the fault source as either i) instance of nhlib.geo.line.Line class ii) numpy.ndarray [Longitude, Latitude] :param float dip: Dip of fault surface (in degrees) :param float upper_depth: Upper seismogenic depth (km) :param float lower_depth: Lower seismogenic depth (km) :param float mesh_spacing: Spacing of the fault mesh (km) {default = 1.0}
def upgrade_tools_all(call=None): ''' To upgrade VMware Tools on all virtual machines present in the specified provider .. note:: If the virtual machine is running Windows OS, this function will attempt to suppress the automatic reboot caused by a VMware Tools upgrade. CLI Example: .. code-block:: bash salt-cloud -f upgrade_tools_all my-vmware-config ''' if call != 'function': raise SaltCloudSystemExit( 'The upgrade_tools_all function must be called with ' '-f or --function.' ) ret = {} vm_properties = ["name"] vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties) for vm in vm_list: ret[vm['name']] = _upg_tools_helper(vm['object']) return ret
To upgrade VMware Tools on all virtual machines present in the specified provider .. note:: If the virtual machine is running Windows OS, this function will attempt to suppress the automatic reboot caused by a VMware Tools upgrade. CLI Example: .. code-block:: bash salt-cloud -f upgrade_tools_all my-vmware-config
def extract_causal_relations(self): """Extract causal relations as Statements.""" # Get the extractions that are labeled as directed and causal relations = [e for e in self.doc.extractions if 'DirectedRelation' in e['labels'] and 'Causal' in e['labels']] # For each relation, we try to extract an INDRA Statement and # save it if its valid for relation in relations: stmt = self.get_causal_relation(relation) if stmt is not None: self.statements.append(stmt)
Extract causal relations as Statements.
def fromJSON(value): """loads the GP object from a JSON string """ j = json.loads(value) v = GPString() if "defaultValue" in j: v.value = j['defaultValue'] else: v.value = j['value'] if 'paramName' in j: v.paramName = j['paramName'] elif 'name' in j: v.paramName = j['name'] return v
loads the GP object from a JSON string
def _estimate_progress(self): """ estimates the current progress that is then used in _receive_signal :return: current progress in percent """ estimate = True # ==== get the current subscript and the time it takes to execute it ===== current_subscript = self._current_subscript_stage['current_subscript'] # ==== get the number of subscripts ===== num_subscripts = len(self.scripts) # ==== get number of iterations and loop index ====================== if self.iterator_type == 'loop': num_iterations = self.settings['num_loops'] elif self.iterator_type == 'sweep': sweep_range = self.settings['sweep_range'] if self.settings['stepping_mode'] == 'value_step': num_iterations = int((sweep_range['max_value'] - sweep_range['min_value']) / sweep_range['N/value_step']) + 1 # len(np.linspace(sweep_range['min_value'], sweep_range['max_value'], # (sweep_range['max_value'] - sweep_range['min_value']) / # sweep_range['N/value_step'] + 1, endpoint=True).tolist()) elif self.settings['stepping_mode'] == 'N': num_iterations = sweep_range['N/value_step'] else: raise KeyError('unknown key' + self.settings['stepping_mode']) else: print('unknown iterator type in Iterator receive signal - can\'t estimate ramining time') estimate = False if estimate: # get number of loops (completed + 1) loop_index = self.loop_index if num_subscripts > 1: # estimate the progress based on the duration the individual subscripts loop_execution_time = 0. # time for a single loop execution in s sub_progress_time = 0. # progress of current loop iteration in s # ==== get typical duration of current subscript ====================== if current_subscript is not None: current_subscript_exec_duration = self._current_subscript_stage['subscript_exec_duration'][ current_subscript.name].total_seconds() else: current_subscript_exec_duration = 0.0 current_subscript_elapsed_time = (datetime.datetime.now() - current_subscript.start_time).total_seconds() # estimate the duration of the current subscript if the script hasn't been executed once fully and subscript_exec_duration is 0 if current_subscript_exec_duration == 0.0: remaining_time = current_subscript.remaining_time.total_seconds() current_subscript_exec_duration = remaining_time + current_subscript_elapsed_time # ==== get typical duration of one loop iteration ====================== remaining_scripts = 0 # script that remain to be executed for the first time for subscript_name, duration in self._current_subscript_stage['subscript_exec_duration'].items(): if duration.total_seconds() == 0.0: remaining_scripts += 1 loop_execution_time += duration.total_seconds() # add the times of the subscripts that have been executed in the current loop # ignore the current subscript, because that will be taken care of later if self._current_subscript_stage['subscript_exec_count'][subscript_name] == loop_index \ and subscript_name is not current_subscript.name: # this subscript has already been executed in this iteration sub_progress_time += duration.total_seconds() # add the proportional duration of the current subscript given by the subscript progress sub_progress_time += current_subscript_elapsed_time # if there are scripts that have not been executed yet # assume that all the scripts that have not been executed yet take as long as the average of the other scripts if remaining_scripts == num_subscripts: # none of the subscript has been finished. assume that all the scripts take as long as the first loop_execution_time = num_subscripts * current_subscript_exec_duration elif remaining_scripts > 1: loop_execution_time = 1. * num_subscripts / (num_subscripts - remaining_scripts) elif remaining_scripts == 1: # there is only one script left which is the current script loop_execution_time += current_subscript_exec_duration if loop_execution_time > 0: progress_subscript = 100. * sub_progress_time / loop_execution_time else: progress_subscript = 1. * progress_subscript / num_subscripts # print(' === script iterator progress estimation loop_index = {:d}/{:d}, progress_subscript = {:f}'.format(loop_index, number_of_iterations, progress_subscript)) progress = 100. * (loop_index - 1. + 0.01 * progress_subscript) / num_iterations else: # if can't estimate the remaining time set to half progress = 50 return progress
estimates the current progress that is then used in _receive_signal :return: current progress in percent
def _on_timeout(): """Invoked periodically to ensure that metrics that have been collected are submitted to InfluxDB. :rtype: tornado.concurrent.Future or None """ global _buffer_size LOGGER.debug('No metrics submitted in the last %.2f seconds', _timeout_interval / 1000.0) _buffer_size = _pending_measurements() if _buffer_size: return _trigger_batch_write() _start_timeout()
Invoked periodically to ensure that metrics that have been collected are submitted to InfluxDB. :rtype: tornado.concurrent.Future or None
def process_messages(self): """ Read from the incoming_message_mailbox and report to the storage backend based on the first message found there. Returns: None """ try: msg = self.msgbackend.pop(self.incoming_message_mailbox) self.handle_incoming_message(msg) except queue.Empty: logger.debug("Worker message queue currently empty.")
Read from the incoming_message_mailbox and report to the storage backend based on the first message found there. Returns: None
def button_clicked(self, button): """Action when button was clicked. Parameters ---------- button : instance of QPushButton which button was pressed """ if button is self.idx_ok: fn = Path(self.filename) xp_format = self.xp_format.get_value() if self.all_types.get_value(): evt_type = self.event_types else: evt_type = [ x.text() for x in self.idx_evt_type.selectedItems()] if 'CSV' == xp_format: self.parent.notes.annot.export_events(fn, evt_type) elif 'Brain Vision' == xp_format: events = [] for et in evt_type: events.extend(self.parent.notes.annot.get_events(name=et)) if not events: self.parent.statusBar.showMessage('No events found.') return events = sorted(events, key=lambda x: x['start']) dataset = self.parent.info.dataset data = ChanTime() data.start_time = dataset.header['start_time'] data.s_freq = int(dataset.header['s_freq']) with fn.with_suffix('.vmrk').open('w') as f: lg.info('Writing to ' + str(fn) + '.vmrk') f.write(_write_vmrk(data, fn, events)) self.accept() if button is self.idx_cancel: self.reject()
Action when button was clicked. Parameters ---------- button : instance of QPushButton which button was pressed
def get(self, timeout=None): """Return status""" status = self.status if status >= COMPLETED: return status else: self.wait(timeout) return self.status
Return status
def abort_expired_batches(self, request_timeout_ms, cluster): """Abort the batches that have been sitting in RecordAccumulator for more than the configured request_timeout due to metadata being unavailable. Arguments: request_timeout_ms (int): milliseconds to timeout cluster (ClusterMetadata): current metadata for kafka cluster Returns: list of ProducerBatch that were expired """ expired_batches = [] to_remove = [] count = 0 for tp in list(self._batches.keys()): assert tp in self._tp_locks, 'TopicPartition not in locks dict' # We only check if the batch should be expired if the partition # does not have a batch in flight. This is to avoid the later # batches get expired when an earlier batch is still in progress. # This protection only takes effect when user sets # max.in.flight.request.per.connection=1. Otherwise the expiration # order is not guranteed. if tp in self.muted: continue with self._tp_locks[tp]: # iterate over the batches and expire them if they have stayed # in accumulator for more than request_timeout_ms dq = self._batches[tp] for batch in dq: is_full = bool(bool(batch != dq[-1]) or batch.records.is_full()) # check if the batch is expired if batch.maybe_expire(request_timeout_ms, self.config['retry_backoff_ms'], self.config['linger_ms'], is_full): expired_batches.append(batch) to_remove.append(batch) count += 1 self.deallocate(batch) else: # Stop at the first batch that has not expired. break # Python does not allow us to mutate the dq during iteration # Assuming expired batches are infrequent, this is better than # creating a new copy of the deque for iteration on every loop if to_remove: for batch in to_remove: dq.remove(batch) to_remove = [] if expired_batches: log.warning("Expired %d batches in accumulator", count) # trace return expired_batches
Abort the batches that have been sitting in RecordAccumulator for more than the configured request_timeout due to metadata being unavailable. Arguments: request_timeout_ms (int): milliseconds to timeout cluster (ClusterMetadata): current metadata for kafka cluster Returns: list of ProducerBatch that were expired
def get_tick(self, name): """Check the config variables to see if there is a configurable tick. Sensor Graph has a built-in 10 second tick that is sent every 10 seconds to allow for triggering timed events. There is a second 'user' tick that is generated internally by the sensorgraph compiler and used for fast operations and finally there are several field configurable ticks that can be used for setting up configurable timers. This is done by setting a config variable on the controller with the desired tick interval, which is then interpreted by this function. The appropriate config_id to use is listed in `known_constants.py` Returns: int: 0 if the tick is disabled, otherwise the number of seconds between each tick """ name_map = { 'fast': config_fast_tick_secs, 'user1': config_tick1_secs, 'user2': config_tick2_secs } config = name_map.get(name) if config is None: raise ArgumentError("Unknown tick requested", name=name) slot = SlotIdentifier.FromString('controller') try: var = self.get_config(slot, config) return var[1] except ArgumentError: return 0
Check the config variables to see if there is a configurable tick. Sensor Graph has a built-in 10 second tick that is sent every 10 seconds to allow for triggering timed events. There is a second 'user' tick that is generated internally by the sensorgraph compiler and used for fast operations and finally there are several field configurable ticks that can be used for setting up configurable timers. This is done by setting a config variable on the controller with the desired tick interval, which is then interpreted by this function. The appropriate config_id to use is listed in `known_constants.py` Returns: int: 0 if the tick is disabled, otherwise the number of seconds between each tick
def _on_interface_opened(self, success, result, failure_reason, context, next_characteristic=None): """Callback function called when the notification related to an interface has been enabled. It is executed in the baBLE working thread: should not be blocking. Args: success (bool): A bool indicating that the operation is successful or not result (dict): Information (if successful) failure_reason (any): An object indicating the reason why the operation is not successful (else None) context (dict): The connection context next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable notification. """ if not success: self.connections.finish_operation(context['connection_id'], False, failure_reason) return if next_characteristic is not None: self.bable.set_notification( enabled=True, connection_handle=context['connection_handle'], characteristic=next_characteristic, on_notification_set=[self._on_interface_opened, context], on_notification_received=self._on_notification_received, sync=False ) else: self.connections.finish_operation(context['connection_id'], True, None)
Callback function called when the notification related to an interface has been enabled. It is executed in the baBLE working thread: should not be blocking. Args: success (bool): A bool indicating that the operation is successful or not result (dict): Information (if successful) failure_reason (any): An object indicating the reason why the operation is not successful (else None) context (dict): The connection context next_characteristic (bable_interface.Characteristic): If not None, indicate another characteristic to enable notification.
def is_replication_enabled(host=None, core_name=None): ''' SLAVE CALL Check for errors, and determine if a slave is replicating or not. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.is_replication_enabled music ''' ret = _get_return_dict() success = True # since only slaves can call this let's check the config: if _is_master() and host is None: errors = ['Only "slave" minions can run "is_replication_enabled"'] return ret.update({'success': False, 'errors': errors}) # define a convenience method so we don't duplicate code def _checks(ret, success, resp, core): if response['success']: slave = resp['data']['details']['slave'] # we need to initialize this to false in case there is an error # on the master and we can't get this info. enabled = 'false' master_url = slave['masterUrl'] # check for errors on the slave if 'ERROR' in slave: success = False err = "{0}: {1} - {2}".format(core, slave['ERROR'], master_url) resp['errors'].append(err) # if there is an error return everything data = slave if core is None else {core: {'data': slave}} else: enabled = slave['masterDetails']['master'][ 'replicationEnabled'] # if replication is turned off on the master, or polling is # disabled we need to return false. These may not be errors, # but the purpose of this call is to check to see if the slaves # can replicate. if enabled == 'false': resp['warnings'].append("Replication is disabled on master.") success = False if slave['isPollingDisabled'] == 'true': success = False resp['warning'].append("Polling is disabled") # update the return ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) return (ret, success) if _get_none_or_value(core_name) is None and _check_for_cores(): for name in __opts__['solr.cores']: response = _replication_request('details', host=host, core_name=name) ret, success = _checks(ret, success, response, name) else: response = _replication_request('details', host=host, core_name=core_name) ret, success = _checks(ret, success, response, core_name) return ret
SLAVE CALL Check for errors, and determine if a slave is replicating or not. host : str (None) The solr host to query. __opts__['host'] is default. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.is_replication_enabled music
def build(self, title, text, img_url): """ :param title: Title of the card :param text: Description of the card :param img_url: Image of the card """ super(ImageCard, self).build() self.title = Title(id=self.id + "-title", text=title, classname="card-title", size=3, parent=self) self.block = Panel(id=self.id + "-block", classname="card-block", parent=self) self.image = Image(id=self.id + "-image", img_url=img_url, classname="card-image-top img-fluid", parent=self.block) self.text = Paragraph(id=self.id + "-text", text=text, classname="card-text", parent=self.block)
:param title: Title of the card :param text: Description of the card :param img_url: Image of the card
def evals_get(self, service_staff_id, start_date, end_date, session): '''taobao.wangwang.eservice.evals.get 获取评价详细 根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价''' request = TOPRequest('taobao.wangwang.eservice.evals.get') request['service_staff_id'] = service_staff_id request['start_date'] = start_date request['end_date'] = end_date self.create(self.execute(request, session)) return self.staff_eval_details
taobao.wangwang.eservice.evals.get 获取评价详细 根据用户id查询用户对应的评价详细情况, 主账号id可以查询店铺内子账号的评价 组管理员可以查询组内账号的评价 非管理员的子账号可以查自己的评价
def _CRsweep(A, B, Findex, Cindex, nu, thetacr, method): """Perform CR sweeps on a target vector. Internal function called by CR. Performs habituated or concurrent relaxation sweeps on target vector. Stops when either (i) very fast convergence, CF < 0.1*thetacr, are observed, or at least a given number of sweeps have been performed and the relative change in CF < 0.1. Parameters ---------- A : csr_matrix B : array like Target near null space mode Findex : array like List of F indices in current splitting Cindex : array like List of C indices in current splitting nu : int minimum number of relaxation sweeps to do thetacr Desired convergence factor Returns ------- rho : float Convergence factor of last iteration e : array like Smoothed error vector """ n = A.shape[0] # problem size numax = nu z = np.zeros((n,)) e = deepcopy(B[:, 0]) e[Cindex] = 0.0 enorm = norm(e) rhok = 1 it = 0 while True: if method == 'habituated': gauss_seidel(A, e, z, iterations=1) e[Cindex] = 0.0 elif method == 'concurrent': gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1) else: raise NotImplementedError('method not recognized: need habituated ' 'or concurrent') enorm_old = enorm enorm = norm(e) rhok_old = rhok rhok = enorm / enorm_old it += 1 # criteria 1 -- fast convergence if rhok < 0.1 * thetacr: break # criteria 2 -- at least nu iters, small relative change in CF (<0.1) elif ((abs(rhok - rhok_old) / rhok) < 0.1) and (it >= nu): break return rhok, e
Perform CR sweeps on a target vector. Internal function called by CR. Performs habituated or concurrent relaxation sweeps on target vector. Stops when either (i) very fast convergence, CF < 0.1*thetacr, are observed, or at least a given number of sweeps have been performed and the relative change in CF < 0.1. Parameters ---------- A : csr_matrix B : array like Target near null space mode Findex : array like List of F indices in current splitting Cindex : array like List of C indices in current splitting nu : int minimum number of relaxation sweeps to do thetacr Desired convergence factor Returns ------- rho : float Convergence factor of last iteration e : array like Smoothed error vector
def _load_torrents_directory(self): """ Load torrents directory If it does not exist yet, this request will cause the system to create one """ r = self._req_lixian_get_id(torrent=True) self._downloads_directory = self._load_directory(r['cid'])
Load torrents directory If it does not exist yet, this request will cause the system to create one
def get_by_id(self, id): """Return user info by user id.""" with contextlib.closing(self.database.cursor()) as cursor: cursor.execute('SELECT id, name FROM users WHERE id=?', (id,)) return cursor.fetchone()
Return user info by user id.
def _recode_for_categories(codes, old_categories, new_categories): """ Convert a set of codes for to a new set of categories Parameters ---------- codes : array old_categories, new_categories : Index Returns ------- new_codes : array Examples -------- >>> old_cat = pd.Index(['b', 'a', 'c']) >>> new_cat = pd.Index(['a', 'b']) >>> codes = np.array([0, 1, 1, 2]) >>> _recode_for_categories(codes, old_cat, new_cat) array([ 1, 0, 0, -1]) """ from pandas.core.algorithms import take_1d if len(old_categories) == 0: # All null anyway, so just retain the nulls return codes.copy() elif new_categories.equals(old_categories): # Same categories, so no need to actually recode return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=-1) return new_codes
Convert a set of codes for to a new set of categories Parameters ---------- codes : array old_categories, new_categories : Index Returns ------- new_codes : array Examples -------- >>> old_cat = pd.Index(['b', 'a', 'c']) >>> new_cat = pd.Index(['a', 'b']) >>> codes = np.array([0, 1, 1, 2]) >>> _recode_for_categories(codes, old_cat, new_cat) array([ 1, 0, 0, -1])
def inc_n(self, n, exception=None): # type: (int, Optional[ParseError]) -> bool """ Increments the parser by n characters if the end of the input has not been reached. """ return self._src.inc_n(n=n, exception=exception)
Increments the parser by n characters if the end of the input has not been reached.
def register_provider(cls, provider): """Register method to keep list of providers.""" def decorator(subclass): """Register as decorator function.""" cls._providers[provider] = subclass subclass.name = provider return subclass return decorator
Register method to keep list of providers.
def get_renderer(self, with_layout=True): """ Get the default renderer """ if with_layout and self.is_lti(): return self._default_renderer_lti elif with_layout: return self._default_renderer else: return self._default_renderer_nolayout
Get the default renderer
def parse(self): '''parse is the base function for parsing the recipe, whether it be a Dockerfile or Singularity recipe. The recipe is read in as lines, and saved to a list if needed for the future. If the client has it, the recipe type specific _parse function is called. Instructions for making a client subparser: It should have a main function _parse that parses a list of lines from some recipe text file into the appropriate sections, e.g., self.fromHeader self.environ self.labels self.install self.files self.test self.entrypoint ''' self.cmd = None self.comments = [] self.entrypoint = None self.environ = [] self.files = [] self.install = [] self.labels = [] self.ports = [] self.test = None self.volumes = [] if self.recipe: # Read in the raw lines of the file self.lines = read_file(self.recipe) # If properly instantiated by Docker or Singularity Recipe, parse if hasattr(self, '_parse'): self._parse()
parse is the base function for parsing the recipe, whether it be a Dockerfile or Singularity recipe. The recipe is read in as lines, and saved to a list if needed for the future. If the client has it, the recipe type specific _parse function is called. Instructions for making a client subparser: It should have a main function _parse that parses a list of lines from some recipe text file into the appropriate sections, e.g., self.fromHeader self.environ self.labels self.install self.files self.test self.entrypoint
def guestfs_conn_mount_ro(disk_path, disk_root, retries=5, wait=1): """ Open a GuestFS handle with `disk_path` and try mounting the root filesystem. `disk_root` is a hint where it should be looked and will only be used if GuestFS will not be able to deduce it independently. Note that mounting a live guest, can lead to filesystem inconsistencies, causing the mount operation to fail. As we use readonly mode, this is safe, but the operation itself can still fail. Therefore, this method will watch for mount failures and retry 5 times before throwing an exception. Args: disk_path(str): Path to the disk. disk_root(str): Hint what is the root device with the OS filesystem. retries(int): Number of retries for :func:`~guestfs.GuestFS.mount_ro` operation. Note that on each retry a new GuestFS handle will be used. wait(int): Time to wait between retries. Yields: guestfs.GuestFS: An open GuestFS handle. Raises: :exc:`GuestFSError`: On any guestfs operation error, including exceeding retries for the :func:`~guestfs.GuestFS.mount_ro` operation. """ for attempt in range(retries): with guestfs_conn_ro(disk_path) as conn: rootfs = find_rootfs(conn, disk_root) try: conn.mount_ro(rootfs, '/') except RuntimeError as err: LOGGER.debug(err) if attempt < retries - 1: LOGGER.debug( ( 'failed mounting %s:%s using guestfs, ' 'attempt %s/%s' ), disk_path, rootfs, attempt + 1, retries ) time.sleep(wait) continue else: raise GuestFSError( 'failed mounting {0}:{1} using guestfs'.format( disk_path, rootfs ) ) yield conn try: conn.umount(rootfs) except RuntimeError as err: LOGGER.debug(err) raise GuestFSError( ('failed unmounting {0}:{1} using' 'guestfs').format(disk_path, rootfs) ) break
Open a GuestFS handle with `disk_path` and try mounting the root filesystem. `disk_root` is a hint where it should be looked and will only be used if GuestFS will not be able to deduce it independently. Note that mounting a live guest, can lead to filesystem inconsistencies, causing the mount operation to fail. As we use readonly mode, this is safe, but the operation itself can still fail. Therefore, this method will watch for mount failures and retry 5 times before throwing an exception. Args: disk_path(str): Path to the disk. disk_root(str): Hint what is the root device with the OS filesystem. retries(int): Number of retries for :func:`~guestfs.GuestFS.mount_ro` operation. Note that on each retry a new GuestFS handle will be used. wait(int): Time to wait between retries. Yields: guestfs.GuestFS: An open GuestFS handle. Raises: :exc:`GuestFSError`: On any guestfs operation error, including exceeding retries for the :func:`~guestfs.GuestFS.mount_ro` operation.
def compress_folder_dump(path, target): ''' Compress folder dump to tar.gz file ''' import tarfile if not path or not os.path.isdir(path): raise SystemExit(_error_codes.get(105)) name_out_file = (target + 'dump-' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) tar = tarfile.open(name_out_file + '.tar.gz', 'w:gz') tar.add(path, arcname='dump') tar.close() return (name_out_file, name_out_file + '.tar.gz')
Compress folder dump to tar.gz file
def setTags(self, tags): """Set the tags for current photo to list tags. (flickr.photos.settags) """ method = 'flickr.photos.setTags' tags = uniq(tags) _dopost(method, auth=True, photo_id=self.id, tags=tags) self._load_properties()
Set the tags for current photo to list tags. (flickr.photos.settags)
def _make_request(self, bbox, meta_info, timestamps): """ Make OGC request to create input for cloud detector classifier :param bbox: Bounding box :param meta_info: Meta-info dictionary of input eopatch :return: Requested data """ service_type = ServiceType(meta_info['service_type']) # Raise error if resolutions are not specified if self.cm_size_x is None and self.cm_size_y is None: raise ValueError("Specify size_x and size_y for data request") # If WCS request, make sure both resolutions are set if service_type == ServiceType.WCS: if self.cm_size_y is None: self.cm_size_y = self.cm_size_x elif self.cm_size_x is None: self.cm_size_x = self.cm_size_y custom_url_params = {CustomUrlParam.SHOWLOGO: False, CustomUrlParam.TRANSPARENT: False, CustomUrlParam.EVALSCRIPT: self.model_evalscript} request = {ServiceType.WMS: self._get_wms_request, ServiceType.WCS: self._get_wcs_request}[service_type](bbox, meta_info['time_interval'], self.cm_size_x, self.cm_size_y, meta_info['maxcc'], meta_info['time_difference'], custom_url_params) request_dates = request.get_dates() download_frames = get_common_timestamps(request_dates, timestamps) request_return = request.get_data(raise_download_errors=False, data_filter=download_frames) bad_data = [idx for idx, value in enumerate(request_return) if value is None] for idx in reversed(sorted(bad_data)): LOGGER.warning('Data from %s could not be downloaded for %s!', str(request_dates[idx]), self.data_feature) del request_return[idx] del request_dates[idx] return np.asarray(request_return), request_dates
Make OGC request to create input for cloud detector classifier :param bbox: Bounding box :param meta_info: Meta-info dictionary of input eopatch :return: Requested data
def select_entry(self, *arguments): """ Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`). """ matches = self.smart_search(*arguments) if len(matches) > 1: logger.info("More than one match, prompting for choice ..") labels = [entry.name for entry in matches] return matches[labels.index(prompt_for_choice(labels))] else: logger.info("Matched one entry: %s", matches[0].name) return matches[0]
Select a password from the available choices. :param arguments: Refer to :func:`smart_search()`. :returns: The name of a password (a string) or :data:`None` (when no password matched the given `arguments`).
def generate_scaling_plot(timing_data, title, ylabel, description, plot_file): """ Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata """ proc_counts = timing_data['proc_counts'] if len(proc_counts) > 2: plt.figure(figsize=(10, 8), dpi=150) plt.title(title) plt.xlabel("Number of processors") plt.ylabel(ylabel) for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']): case_data = timing_data[case] means = case_data['means'] mins = case_data['mins'] maxs = case_data['maxs'] plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5) plt.plot(proc_counts, means, 'o-', color=case_color, label=case) plt.legend(loc='best') else: plt.figure(figsize=(5, 3)) plt.axis('off') plt.text(0.4, 0.8, "ERROR:") plt.text(0.0, 0.6, "Not enough data points to draw scaling plot") plt.text(0.0, 0.44, "To generate this data rerun BATS with the") plt.text(0.0, 0.36, "performance option enabled.") if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata
def copy_scubadir_file(self, name, source): '''Copies source into the scubadir Returns the container-path of the copied file ''' dest = os.path.join(self.__scubadir_hostpath, name) assert not os.path.exists(dest) shutil.copy2(source, dest) return os.path.join(self.__scubadir_contpath, name)
Copies source into the scubadir Returns the container-path of the copied file
def make_response(self, image, size, mode, filename=None, *args, **kwargs): """ :param image: image as bytes :param size: requested maximum width/height size :param mode: one of 'scale', 'fit' or 'crop' :param filename: filename """ try: fmt = get_format(image) except IOError: # not a known image file raise NotFound() self.content_type = "image/png" if fmt == "PNG" else "image/jpeg" ext = "." + str(fmt.lower()) if not filename: filename = "image" if not filename.lower().endswith(ext): filename += ext self.filename = filename if size: image = resize(image, size, size, mode=mode) if mode == CROP: assert get_size(image) == (size, size) else: image = image.read() return make_response(image)
:param image: image as bytes :param size: requested maximum width/height size :param mode: one of 'scale', 'fit' or 'crop' :param filename: filename
def _index_list_of_values(d, k): """Returns d[k] or [d[k]] if the value is not a list""" v = d[k] if isinstance(v, list): return v return [v]
Returns d[k] or [d[k]] if the value is not a list
def sanitize_qualifiers(repos=None, followers=None, language=None): ''' qualifiers = c repos:+42 followers:+1000 language: params = {'q': 'tom repos:>42 followers:>1000'} ''' qualifiers = '' if repos: qualifiers += 'repos:{0} '.format(repos) qualifiers = re.sub(r"([+])([=a-zA-Z0-9]+)", r">\2", qualifiers) qualifiers = re.sub(r"([-])([=a-zA-Z0-9]+)", r"<\2", qualifiers) if followers: qualifiers += 'followers:{0} '.format(followers) qualifiers = re.sub(r"([+])([=a-zA-Z0-9]+)", r">\2", qualifiers) qualifiers = re.sub(r"([-])([=a-zA-Z0-9]+)", r"<\2", qualifiers) try: if language in ALLOWED_LANGUAGES and not language == '': qualifiers += 'language:{0} '.format(language) elif language == '': qualifiers += '' else: raise AllowedLanguagesException except AllowedLanguagesException as e: print(e) return qualifiers
qualifiers = c repos:+42 followers:+1000 language: params = {'q': 'tom repos:>42 followers:>1000'}
def apply_ufunc( func: Callable, *args: Any, input_core_dims: Optional[Sequence[Sequence]] = None, output_core_dims: Optional[Sequence[Sequence]] = ((),), exclude_dims: AbstractSet = frozenset(), vectorize: bool = False, join: str = 'exact', dataset_join: str = 'exact', dataset_fill_value: object = _NO_FILL_VALUE, keep_attrs: bool = False, kwargs: Mapping = None, dask: str = 'forbidden', output_dtypes: Optional[Sequence] = None, output_sizes: Optional[Mapping[Any, int]] = None ) -> Any: """Apply a vectorized function for unlabeled arrays on xarray objects. The function will be mapped over the data variable(s) of the input arguments using xarray's standard rules for labeled computation, including alignment, broadcasting, looping over GroupBy/Dataset variables, and merging of coordinates. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on unlabeled arrays (``.data``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars Mix of labeled and/or unlabeled arrays to which to apply the function. input_core_dims : Sequence[Sequence], optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. For example, ``input_core_dims=[[], ['time']]`` indicates that all dimensions on the first argument and all dimensions other than 'time' on the second argument should be broadcast. Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. output_core_dims : List[tuple], optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` outputs exactly one array, with axes corresponding to each broadcast dimension. Core dimensions are assumed to appear as the last dimensions of each output in the provided order. exclude_dims : set, optional Core dimensions on the inputs to exclude from alignment and broadcasting entirely. Any input coordinates along these dimensions will be dropped. Each excluded dimension must also appear in ``input_core_dims`` for at least one argument. Only dimensions listed here are allowed to change size between input and output objects. vectorize : bool, optional If True, then assume ``func`` only takes arrays defined over core dimensions as input and vectorize it automatically with :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. Using this option requires NumPy version 1.12 or newer. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining variables of Dataset objects with mismatched data variables. - 'outer': take variables from both Dataset objects - 'inner': take only overlapped variables - 'left': take only variables from the first object - 'right': take only variables from the last object - 'exact': data variables on all Dataset objects must match exactly dataset_fill_value : optional Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. keep_attrs: boolean, Optional Whether to copy attributes from the first argument to the output. kwargs: dict, optional Optional keyword arguments passed directly on to call ``func``. dask: 'forbidden', 'allowed' or 'parallelized', optional How to handle applying to objects containing lazy data in the form of dask arrays: - 'forbidden' (default): raise an error if a dask array is encountered. - 'allowed': pass dask arrays directly on to ``func``. - 'parallelized': automatically parallelize ``func`` if any of the inputs are a dask array. If used, the ``output_dtypes`` argument must also be provided. Multiple output arguments are not yet supported. output_dtypes : list of dtypes, optional Optional list of output dtypes. Only used if dask='parallelized'. output_sizes : dict, optional Optional mapping from dimension names to sizes for outputs. Only used if dask='parallelized' and new dimensions (not found on inputs) appear on outputs. Returns ------- Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or numpy.ndarray, the first type on that list to appear on an input. Examples -------- Calculate the vector magnitude of two arguments: >>> def magnitude(a, b): ... func = lambda x, y: np.sqrt(x ** 2 + y ** 2) ... return xr.apply_ufunc(func, a, b) You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset`` objects, with automatically preserved dimensions and coordinates, e.g., >>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])]) >>> magnitude(array, -array) <xarray.DataArray (x: 3)> array([1.414214, 2.828427, 4.242641]) Coordinates: * x (x) float64 0.1 0.2 0.3 Plain scalars, numpy arrays and a mix of these with xarray objects is also supported: >>> magnitude(4, 5) 5.0 >>> magnitude(3, np.array([0, 4])) array([3., 5.]) >>> magnitude(array, 0) <xarray.DataArray (x: 3)> array([1., 2., 3.]) Coordinates: * x (x) float64 0.1 0.2 0.3 Other examples of how you could use ``apply_ufunc`` to write functions to (very nearly) replicate existing xarray functionality: Compute the mean (``.mean``) over one dimension:: def mean(obj, dim): # note: apply always moves core dimensions to the end return apply_ufunc(np.mean, obj, input_core_dims=[[dim]], kwargs={'axis': -1}) Inner product over a specific dimension (like ``xr.dot``):: def _inner(x, y): result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis]) return result[..., 0, 0] def inner_product(a, b, dim): return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]]) Stack objects along a new dimension (like ``xr.concat``):: def stack(objects, dim, new_coord): # note: this version does not stack coordinates func = lambda *x: np.stack(x, axis=-1) result = apply_ufunc(func, *objects, output_core_dims=[[dim]], join='outer', dataset_fill_value=np.nan) result[dim] = new_coord return result If your function is not vectorized but can be applied only to core dimensions, you can use ``vectorize=True`` to turn into a vectorized function. This wraps :py:func:`numpy.vectorize`, so the operation isn't terribly fast. Here we'll use it to calculate the distance between empirical samples from two probability distributions, using a scipy function that needs to be applied to vectors:: import scipy.stats def earth_mover_distance(first_samples, second_samples, dim='ensemble'): return apply_ufunc(scipy.stats.wasserstein_distance, first_samples, second_samples, input_core_dims=[[dim], [dim]], vectorize=True) Most of NumPy's builtin functions already broadcast their inputs appropriately for use in `apply`. You may find helper functions such as numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also works well with numba's vectorize and guvectorize. Further explanation with examples are provided in the xarray documentation [3]. See also -------- numpy.broadcast_arrays numba.vectorize numba.guvectorize References ---------- .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html .. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation """ # noqa: E501 # don't error on that URL one line up from .groupby import GroupBy from .dataarray import DataArray from .variable import Variable if input_core_dims is None: input_core_dims = ((),) * (len(args)) elif len(input_core_dims) != len(args): raise ValueError( 'input_core_dims must be None or a tuple with the length same to ' 'the number of arguments. Given input_core_dims: {}, ' 'number of args: {}.'.format(input_core_dims, len(args))) if kwargs is None: kwargs = {} signature = _UFuncSignature(input_core_dims, output_core_dims) if exclude_dims and not exclude_dims <= signature.all_core_dims: raise ValueError('each dimension in `exclude_dims` must also be a ' 'core dimension in the function signature') if kwargs: func = functools.partial(func, **kwargs) if vectorize: if signature.all_core_dims: # we need the signature argument if LooseVersion(np.__version__) < '1.12': # pragma: no cover raise NotImplementedError( 'numpy 1.12 or newer required when using vectorize=True ' 'in xarray.apply_ufunc with non-scalar output core ' 'dimensions.') func = np.vectorize(func, otypes=output_dtypes, signature=signature.to_gufunc_string()) else: func = np.vectorize(func, otypes=output_dtypes) variables_vfunc = functools.partial(apply_variable_ufunc, func, signature=signature, exclude_dims=exclude_dims, keep_attrs=keep_attrs, dask=dask, output_dtypes=output_dtypes, output_sizes=output_sizes) if any(isinstance(a, GroupBy) for a in args): this_apply = functools.partial(apply_ufunc, func, input_core_dims=input_core_dims, output_core_dims=output_core_dims, exclude_dims=exclude_dims, join=join, dataset_join=dataset_join, dataset_fill_value=dataset_fill_value, keep_attrs=keep_attrs, dask=dask) return apply_groupby_func(this_apply, *args) elif any(is_dict_like(a) for a in args): return apply_dataset_vfunc(variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, dataset_join=dataset_join, fill_value=dataset_fill_value, keep_attrs=keep_attrs) elif any(isinstance(a, DataArray) for a in args): return apply_dataarray_vfunc(variables_vfunc, *args, signature=signature, join=join, exclude_dims=exclude_dims, keep_attrs=keep_attrs) elif any(isinstance(a, Variable) for a in args): return variables_vfunc(*args) else: return apply_array_ufunc(func, *args, dask=dask)
Apply a vectorized function for unlabeled arrays on xarray objects. The function will be mapped over the data variable(s) of the input arguments using xarray's standard rules for labeled computation, including alignment, broadcasting, looping over GroupBy/Dataset variables, and merging of coordinates. Parameters ---------- func : callable Function to call like ``func(*args, **kwargs)`` on unlabeled arrays (``.data``) that returns an array or tuple of arrays. If multiple arguments with non-matching dimensions are supplied, this function is expected to vectorize (broadcast) over axes of positional arguments in the style of NumPy universal functions [1]_ (if this is not the case, set ``vectorize=True``). If this function returns multiple outputs, you must set ``output_core_dims`` as well. *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars Mix of labeled and/or unlabeled arrays to which to apply the function. input_core_dims : Sequence[Sequence], optional List of the same length as ``args`` giving the list of core dimensions on each input argument that should not be broadcast. By default, we assume there are no core dimensions on any input arguments. For example, ``input_core_dims=[[], ['time']]`` indicates that all dimensions on the first argument and all dimensions other than 'time' on the second argument should be broadcast. Core dimensions are automatically moved to the last axes of input variables before applying ``func``, which facilitates using NumPy style generalized ufuncs [2]_. output_core_dims : List[tuple], optional List of the same length as the number of output arguments from ``func``, giving the list of core dimensions on each output that were not broadcast on the inputs. By default, we assume that ``func`` outputs exactly one array, with axes corresponding to each broadcast dimension. Core dimensions are assumed to appear as the last dimensions of each output in the provided order. exclude_dims : set, optional Core dimensions on the inputs to exclude from alignment and broadcasting entirely. Any input coordinates along these dimensions will be dropped. Each excluded dimension must also appear in ``input_core_dims`` for at least one argument. Only dimensions listed here are allowed to change size between input and output objects. vectorize : bool, optional If True, then assume ``func`` only takes arrays defined over core dimensions as input and vectorize it automatically with :py:func:`numpy.vectorize`. This option exists for convenience, but is almost always slower than supplying a pre-vectorized function. Using this option requires NumPy version 1.12 or newer. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining the indexes of the passed objects along each dimension, and the variables of Dataset objects with mismatched data variables: - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': raise `ValueError` instead of aligning when indexes to be aligned are not equal dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining variables of Dataset objects with mismatched data variables. - 'outer': take variables from both Dataset objects - 'inner': take only overlapped variables - 'left': take only variables from the first object - 'right': take only variables from the last object - 'exact': data variables on all Dataset objects must match exactly dataset_fill_value : optional Value used in place of missing variables on Dataset inputs when the datasets do not share the exact same ``data_vars``. Required if ``dataset_join not in {'inner', 'exact'}``, otherwise ignored. keep_attrs: boolean, Optional Whether to copy attributes from the first argument to the output. kwargs: dict, optional Optional keyword arguments passed directly on to call ``func``. dask: 'forbidden', 'allowed' or 'parallelized', optional How to handle applying to objects containing lazy data in the form of dask arrays: - 'forbidden' (default): raise an error if a dask array is encountered. - 'allowed': pass dask arrays directly on to ``func``. - 'parallelized': automatically parallelize ``func`` if any of the inputs are a dask array. If used, the ``output_dtypes`` argument must also be provided. Multiple output arguments are not yet supported. output_dtypes : list of dtypes, optional Optional list of output dtypes. Only used if dask='parallelized'. output_sizes : dict, optional Optional mapping from dimension names to sizes for outputs. Only used if dask='parallelized' and new dimensions (not found on inputs) appear on outputs. Returns ------- Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or numpy.ndarray, the first type on that list to appear on an input. Examples -------- Calculate the vector magnitude of two arguments: >>> def magnitude(a, b): ... func = lambda x, y: np.sqrt(x ** 2 + y ** 2) ... return xr.apply_ufunc(func, a, b) You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset`` objects, with automatically preserved dimensions and coordinates, e.g., >>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])]) >>> magnitude(array, -array) <xarray.DataArray (x: 3)> array([1.414214, 2.828427, 4.242641]) Coordinates: * x (x) float64 0.1 0.2 0.3 Plain scalars, numpy arrays and a mix of these with xarray objects is also supported: >>> magnitude(4, 5) 5.0 >>> magnitude(3, np.array([0, 4])) array([3., 5.]) >>> magnitude(array, 0) <xarray.DataArray (x: 3)> array([1., 2., 3.]) Coordinates: * x (x) float64 0.1 0.2 0.3 Other examples of how you could use ``apply_ufunc`` to write functions to (very nearly) replicate existing xarray functionality: Compute the mean (``.mean``) over one dimension:: def mean(obj, dim): # note: apply always moves core dimensions to the end return apply_ufunc(np.mean, obj, input_core_dims=[[dim]], kwargs={'axis': -1}) Inner product over a specific dimension (like ``xr.dot``):: def _inner(x, y): result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis]) return result[..., 0, 0] def inner_product(a, b, dim): return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]]) Stack objects along a new dimension (like ``xr.concat``):: def stack(objects, dim, new_coord): # note: this version does not stack coordinates func = lambda *x: np.stack(x, axis=-1) result = apply_ufunc(func, *objects, output_core_dims=[[dim]], join='outer', dataset_fill_value=np.nan) result[dim] = new_coord return result If your function is not vectorized but can be applied only to core dimensions, you can use ``vectorize=True`` to turn into a vectorized function. This wraps :py:func:`numpy.vectorize`, so the operation isn't terribly fast. Here we'll use it to calculate the distance between empirical samples from two probability distributions, using a scipy function that needs to be applied to vectors:: import scipy.stats def earth_mover_distance(first_samples, second_samples, dim='ensemble'): return apply_ufunc(scipy.stats.wasserstein_distance, first_samples, second_samples, input_core_dims=[[dim], [dim]], vectorize=True) Most of NumPy's builtin functions already broadcast their inputs appropriately for use in `apply`. You may find helper functions such as numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also works well with numba's vectorize and guvectorize. Further explanation with examples are provided in the xarray documentation [3]. See also -------- numpy.broadcast_arrays numba.vectorize numba.guvectorize References ---------- .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html .. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation
def choice_default_invalidator(self, obj): """Invalidated cached items when the Choice changes.""" invalid = [('Question', obj.question_id, True)] for pk in obj.voters.values_list('pk', flat=True): invalid.append(('User', pk, False)) return invalid
Invalidated cached items when the Choice changes.
def _parse_files(self, msp_pth, chunk, db_type, celery_obj=False): """Parse the MSP files and insert into database Args: msp_pth (str): path to msp file or directory [required] db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required] chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required] celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks [default False] """ if os.path.isdir(msp_pth): c = 0 for folder, subs, files in sorted(os.walk(msp_pth)): for msp_file in sorted(files): msp_file_pth = os.path.join(folder, msp_file) if os.path.isdir(msp_file_pth) or not msp_file_pth.lower().endswith(('txt', 'msp')): continue print('MSP FILE PATH', msp_file_pth) self.num_lines = line_count(msp_file_pth) # each file is processed separately but we want to still process in chunks so we save the number # of spectra currently being processed with the c variable with open(msp_file_pth, "r") as f: c = self._parse_lines(f, chunk, db_type, celery_obj, c) else: self.num_lines = line_count(msp_pth) with open(msp_pth, "r") as f: self._parse_lines(f, chunk, db_type, celery_obj) self.insert_data(remove_data=True, db_type=db_type)
Parse the MSP files and insert into database Args: msp_pth (str): path to msp file or directory [required] db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required] chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required] celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks [default False]
def tar_file(files, tarname): '''Compress a file or directory into a tar file.''' if isinstance(files, basestring): files = [files] o = tarfile.open(tarname, 'w:gz') for file in files: o.add(file) o.close()
Compress a file or directory into a tar file.
def is_verbose(): """ Only safe to call within a click context. """ ctx = click.get_current_context() state = ctx.ensure_object(CommandState) return state.is_verbose()
Only safe to call within a click context.
def begin(self, user_url, anonymous=False): """Start the OpenID authentication process. See steps 1-2 in the overview at the top of this file. @param user_url: Identity URL given by the user. This method performs a textual transformation of the URL to try and make sure it is normalized. For example, a user_url of example.com will be normalized to http://example.com/ normalizing and resolving any redirects the server might issue. @type user_url: unicode @param anonymous: Whether to make an anonymous request of the OpenID provider. Such a request does not ask for an authorization assertion for an OpenID identifier, but may be used with extensions to pass other data. e.g. "I don't care who you are, but I'd like to know your time zone." @type anonymous: bool @returns: An object containing the discovered information will be returned, with a method for building a redirect URL to the server, as described in step 3 of the overview. This object may also be used to add extension arguments to the request, using its L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>} method. @returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @raises openid.consumer.discover.DiscoveryFailure: when I fail to find an OpenID server for this URL. If the C{yadis} package is available, L{openid.consumer.discover.DiscoveryFailure} is an alias for C{yadis.discover.DiscoveryFailure}. """ disco = Discovery(self.session, user_url, self.session_key_prefix) try: service = disco.getNextService(self._discover) except fetchers.HTTPFetchingError as why: raise DiscoveryFailure('Error fetching XRDS document: %s' % (why.why, ), None) if service is None: raise DiscoveryFailure('No usable OpenID services found for %s' % (user_url, ), None) else: return self.beginWithoutDiscovery(service, anonymous)
Start the OpenID authentication process. See steps 1-2 in the overview at the top of this file. @param user_url: Identity URL given by the user. This method performs a textual transformation of the URL to try and make sure it is normalized. For example, a user_url of example.com will be normalized to http://example.com/ normalizing and resolving any redirects the server might issue. @type user_url: unicode @param anonymous: Whether to make an anonymous request of the OpenID provider. Such a request does not ask for an authorization assertion for an OpenID identifier, but may be used with extensions to pass other data. e.g. "I don't care who you are, but I'd like to know your time zone." @type anonymous: bool @returns: An object containing the discovered information will be returned, with a method for building a redirect URL to the server, as described in step 3 of the overview. This object may also be used to add extension arguments to the request, using its L{addExtensionArg<openid.consumer.consumer.AuthRequest.addExtensionArg>} method. @returntype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @raises openid.consumer.discover.DiscoveryFailure: when I fail to find an OpenID server for this URL. If the C{yadis} package is available, L{openid.consumer.discover.DiscoveryFailure} is an alias for C{yadis.discover.DiscoveryFailure}.
def attributes(self, **kwargs): # pragma: no cover """Retrieve the attribute configuration object. Retrieves a mapping that identifies the custom directory attributes configured for the Directory SyncService instance, and the mapping of the custom attributes to standard directory attributes. Args: **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``directory_attributes.py`` example. """ path = "/directory-sync-service/v1/attributes" r = self._httpclient.request( method="GET", path=path, url=self.url, **kwargs ) return r
Retrieve the attribute configuration object. Retrieves a mapping that identifies the custom directory attributes configured for the Directory SyncService instance, and the mapping of the custom attributes to standard directory attributes. Args: **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``directory_attributes.py`` example.
def mapillary_tag_exists(self): ''' Check existence of required Mapillary tags ''' description_tag = "Image ImageDescription" if description_tag not in self.tags: return False for requirement in ["MAPSequenceUUID", "MAPSettingsUserKey", "MAPCaptureTime", "MAPLongitude", "MAPLatitude"]: if requirement not in self.tags[description_tag].values or json.loads(self.tags[description_tag].values)[requirement] in ["", None, " "]: return False return True
Check existence of required Mapillary tags
def get_groups(self, env, token): """Get groups for the given token. :param env: The current WSGI environment dictionary. :param token: Token to validate and return a group string for. :returns: None if the token is invalid or a string containing a comma separated list of groups the authenticated user is a member of. The first group in the list is also considered a unique identifier for that user. """ groups = None memcache_client = cache_from_env(env) if memcache_client: memcache_key = '%s/auth/%s' % (self.reseller_prefix, token) cached_auth_data = memcache_client.get(memcache_key) if cached_auth_data: expires, groups = cached_auth_data if expires < time(): groups = None s3_auth_details = env.get('swift3.auth_details') if s3_auth_details: if not self.s3_support: self.logger.warning('S3 support is disabled in swauth.') return None if self.swauth_remote: # TODO(gholt): Support S3-style authorization with # swauth_remote mode self.logger.warning('S3-style authorization not supported yet ' 'with swauth_remote mode.') return None try: account, user = s3_auth_details['access_key'].split(':', 1) signature_from_user = s3_auth_details['signature'] msg = s3_auth_details['string_to_sign'] except Exception: self.logger.debug( 'Swauth cannot parse swift3.auth_details value %r' % (s3_auth_details, )) return None path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user)) resp = self.make_pre_authed_request( env, 'GET', path).get_response(self.app) if resp.status_int // 100 != 2: return None if 'x-object-meta-account-id' in resp.headers: account_id = resp.headers['x-object-meta-account-id'] else: path = quote('/v1/%s/%s' % (self.auth_account, account)) resp2 = self.make_pre_authed_request( env, 'HEAD', path).get_response(self.app) if resp2.status_int // 100 != 2: return None account_id = resp2.headers['x-container-meta-account-id'] path = env['PATH_INFO'] env['PATH_INFO'] = path.replace("%s:%s" % (account, user), account_id, 1) detail = json.loads(resp.body) if detail: creds = detail.get('auth') try: auth_encoder, creds_dict = \ swauth.authtypes.validate_creds(creds) except ValueError as e: self.logger.error('%s' % e.args[0]) return None password = creds_dict['hash'] # https://bugs.python.org/issue5285 if isinstance(password, six.text_type): password = password.encode('utf-8') if isinstance(msg, six.text_type): msg = msg.encode('utf-8') valid_signature = base64.encodestring(hmac.new( password, msg, sha1).digest()).strip() if signature_from_user != valid_signature: return None groups = [g['name'] for g in detail['groups']] if '.admin' in groups: groups.remove('.admin') groups.append(account_id) groups = ','.join(groups) return groups if not groups: if self.swauth_remote: with Timeout(self.swauth_remote_timeout): conn = http_connect(self.swauth_remote_parsed.hostname, self.swauth_remote_parsed.port, 'GET', '%s/v2/.token/%s' % (self.swauth_remote_parsed.path, quote(token)), ssl=(self.swauth_remote_parsed.scheme == 'https')) resp = conn.getresponse() resp.read() conn.close() if resp.status // 100 != 2: return None expires_from_now = float(resp.getheader('x-auth-ttl')) groups = resp.getheader('x-auth-groups') if memcache_client: memcache_client.set( memcache_key, (time() + expires_from_now, groups), time=expires_from_now) else: object_name = self._get_concealed_token(token) path = quote('/v1/%s/.token_%s/%s' % (self.auth_account, object_name[-1], object_name)) resp = self.make_pre_authed_request( env, 'GET', path).get_response(self.app) if resp.status_int // 100 != 2: return None detail = json.loads(resp.body) if detail['expires'] < time(): self.make_pre_authed_request( env, 'DELETE', path).get_response(self.app) return None groups = [g['name'] for g in detail['groups']] if '.admin' in groups: groups.remove('.admin') groups.append(detail['account_id']) groups = ','.join(groups) if memcache_client: memcache_client.set( memcache_key, (detail['expires'], groups), time=float(detail['expires'] - time())) return groups
Get groups for the given token. :param env: The current WSGI environment dictionary. :param token: Token to validate and return a group string for. :returns: None if the token is invalid or a string containing a comma separated list of groups the authenticated user is a member of. The first group in the list is also considered a unique identifier for that user.
def segment(f, output, target_duration, mpegts): """Segment command.""" try: target_duration = int(target_duration) except ValueError: exit('Error: Invalid target duration.') try: mpegts = int(mpegts) except ValueError: exit('Error: Invalid MPEGTS value.') WebVTTSegmenter().segment(f, output, target_duration, mpegts)
Segment command.
def _parse_unrecognized_segment(self, fptr): """Looks like a valid marker, but not sure from reading the specs. """ msg = ("Unrecognized codestream marker 0x{marker_id:x} encountered at " "byte offset {offset}.") msg = msg.format(marker_id=self._marker_id, offset=fptr.tell()) warnings.warn(msg, UserWarning) cpos = fptr.tell() read_buffer = fptr.read(2) next_item, = struct.unpack('>H', read_buffer) fptr.seek(cpos) if ((next_item & 0xff00) >> 8) == 255: # No segment associated with this marker, so reset # to two bytes after it. segment = Segment(id='0x{0:x}'.format(self._marker_id), offset=self._offset, length=0) else: segment = self._parse_reserved_segment(fptr) return segment
Looks like a valid marker, but not sure from reading the specs.
def _create_scales(hist: HistogramBase, vega: dict, kwargs: dict): """Find proper scales for axes.""" if hist.ndim == 1: bins0 = hist.bins.astype(float) else: bins0 = hist.bins[0].astype(float) xlim = kwargs.pop("xlim", "auto") ylim = kwargs.pop("ylim", "auto") if xlim is "auto": nice_x = True else: nice_x = False if ylim is "auto": nice_y = True else: nice_y = False # TODO: Unify xlim & ylim parameters with matplotlib # TODO: Apply xscale & yscale parameters vega["scales"] = [ { "name": "xscale", "type": "linear", "range": "width", "nice": nice_x, "zero": None, "domain": [bins0[0, 0], bins0[-1, 1]] if xlim == "auto" else [float(xlim[0]), float(xlim[1])], # "domain": {"data": "table", "field": "x"} }, { "name": "yscale", "type": "linear", "range": "height", "nice": nice_y, "zero": True if hist.ndim == 1 else None, "domain": {"data": "table", "field": "y"} if ylim == "auto" else [float(ylim[0]), float(ylim[1])] } ] if hist.ndim >= 2: bins1 = hist.bins[1].astype(float) vega["scales"][1]["domain"] = [bins1[0, 0], bins1[-1, 1]]
Find proper scales for axes.
def surface_or_abstract(cls, predstr): """Instantiate a Pred from either its surface or abstract symbol.""" if predstr.strip('"').lstrip("'").startswith('_'): return cls.surface(predstr) else: return cls.abstract(predstr)
Instantiate a Pred from either its surface or abstract symbol.
def syscall_noreturn(self, func): ''' Call a syscall method. A syscall method is executed outside of any routines, directly in the scheduler loop, which gives it chances to directly operate the event loop. See :py:method::`vlcp.event.core.Scheduler.syscall`. ''' matcher = self.scheduler.syscall(func) while not matcher: yield matcher = self.scheduler.syscall(func) ev, _ = yield (matcher,) return ev
Call a syscall method. A syscall method is executed outside of any routines, directly in the scheduler loop, which gives it chances to directly operate the event loop. See :py:method::`vlcp.event.core.Scheduler.syscall`.
def included_length(self): """Surveyed length, not including "excluded" shots""" return sum([shot.length for shot in self.shots if shot.is_included])
Surveyed length, not including "excluded" shots
def _ordered_categories(df, categories): """ Make the columns in df categorical Parameters: ----------- categories: dict Of the form {str: list}, where the key the column name and the value is the ordered category list """ for col, cats in categories.items(): df[col] = df[col].astype(CategoricalDtype(cats, ordered=True)) return df
Make the columns in df categorical Parameters: ----------- categories: dict Of the form {str: list}, where the key the column name and the value is the ordered category list
def multipublish(self, topic, messages, block=True, timeout=None, raise_error=True): """Publish an iterable of messages to the given topic. :param topic: the topic to publish to :param messages: iterable of bytestrings to publish :param block: wait for a connection to become available before publishing the message. If block is `False` and no connections are available, :class:`~gnsq.errors.NSQNoConnections` is raised :param timeout: if timeout is a positive number, it blocks at most ``timeout`` seconds before raising :class:`~gnsq.errors.NSQNoConnections` :param raise_error: if ``True``, it blocks until a response is received from the nsqd server, and any error response is raised. Otherwise an :class:`~gevent.event.AsyncResult` is returned """ result = AsyncResult() conn = self._get_connection(block=block, timeout=timeout) try: self._response_queues[conn].append(result) conn.multipublish(topic, messages) finally: self._put_connection(conn) if raise_error: return result.get() return result
Publish an iterable of messages to the given topic. :param topic: the topic to publish to :param messages: iterable of bytestrings to publish :param block: wait for a connection to become available before publishing the message. If block is `False` and no connections are available, :class:`~gnsq.errors.NSQNoConnections` is raised :param timeout: if timeout is a positive number, it blocks at most ``timeout`` seconds before raising :class:`~gnsq.errors.NSQNoConnections` :param raise_error: if ``True``, it blocks until a response is received from the nsqd server, and any error response is raised. Otherwise an :class:`~gevent.event.AsyncResult` is returned
def from_dict(cls, data, read_only=False): '''Recreate a feature collection from a dictionary. The dictionary is of the format dumped by :meth:`to_dict`. Additional information, such as whether the feature collection should be read-only, is not included in this dictionary, and is instead passed as parameters to this function. ''' fc = cls(read_only=read_only) fc._features = {} fc._from_dict_update(data) return fc
Recreate a feature collection from a dictionary. The dictionary is of the format dumped by :meth:`to_dict`. Additional information, such as whether the feature collection should be read-only, is not included in this dictionary, and is instead passed as parameters to this function.
def measure(self, v, rf, off=None): """Create/convert a measure using the frame state set on the measures server instance (via :meth:`do_frame`) :param v: The measure to convert :param rf: The frame reference to convert to :param off: The optional offset for the measure """ if off is None: off = {} keys = ["m0", "m1", "m2"] for key in keys: if key in v: if dq.is_quantity(v[key]): v[key] = v[key].to_dict() return _measures.measure(self, v, rf, off)
Create/convert a measure using the frame state set on the measures server instance (via :meth:`do_frame`) :param v: The measure to convert :param rf: The frame reference to convert to :param off: The optional offset for the measure
def init_blueprint(self, blueprint, path="templates.yaml"): """Initialize a Flask Blueprint, similar to init_app, but without the access to the application config. Keyword Arguments: blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None}) path {str} -- path to templates yaml file, relative to Blueprint (Default: {'templates.yaml'}) """ if self._route is not None: raise TypeError("route cannot be set when using blueprints!") # we need to tuck our reference to this Assistant instance # into the blueprint object and find it later! blueprint.assist = self # BlueprintSetupState.add_url_rule gets called underneath the covers and # concats the rule string, so we should set to an empty string to allow # Blueprint('blueprint_api', __name__, url_prefix="/assist") to result in # exposing the rule at "/assist" and not "/assist/". blueprint.add_url_rule( "", view_func=self._flask_assitant_view_func, methods=["POST"] )
Initialize a Flask Blueprint, similar to init_app, but without the access to the application config. Keyword Arguments: blueprint {Flask Blueprint} -- Flask Blueprint instance to initialize (Default: {None}) path {str} -- path to templates yaml file, relative to Blueprint (Default: {'templates.yaml'})
def populate_from_staging(self, staging_table, from_column_list, output_table): """ generate SQL to insert staging table records into the core table based on column_list (If no column list then insert sequentially) """ self.sql_text += 'INSERT INTO ' + output_table + ' (\n' for c in self.col_list: if c != '': self.sql_text += ' ' + c + ',\n' self.sql_text += ' ' + self.date_updated_col + ') (\n' self.sql_text += ' SELECT \n' for c in from_column_list: if c != '': self.sql_text += ' ' + c + ',\n' self.sql_text += ' SYSDATE \n FROM ' + staging_table self.sql_text += '\n);\n'
generate SQL to insert staging table records into the core table based on column_list (If no column list then insert sequentially)
def disable(cls): """Restore sys.stderr and sys.stdout to their original objects. Resets colors to their original values. :return: If streams restored successfully. :rtype: bool """ # Skip if not on Windows. if not IS_WINDOWS: return False # Restore default colors. if hasattr(sys.stderr, '_original_stream'): getattr(sys, 'stderr').color = None if hasattr(sys.stdout, '_original_stream'): getattr(sys, 'stdout').color = None # Restore original streams. changed = False if hasattr(sys.stderr, '_original_stream'): changed = True sys.stderr = getattr(sys.stderr, '_original_stream') if hasattr(sys.stdout, '_original_stream'): changed = True sys.stdout = getattr(sys.stdout, '_original_stream') return changed
Restore sys.stderr and sys.stdout to their original objects. Resets colors to their original values. :return: If streams restored successfully. :rtype: bool
def _at_dump_functions(self, calculator, rule, scope, block): """ Implements @dump_functions """ sys.stderr.write("%s\n" % repr(rule.namespace._functions))
Implements @dump_functions
def parse_file(fname): """Parse a python file into an AST. This is a very thin wrapper around ast.parse TODO: Handle encodings other than the default for Python 2 (issue #26) """ try: with fopen(fname) as f: fstr = f.read() except IOError: if fname != 'stdin': raise sys.stdout.write('\nReading from stdin:\n\n') fstr = sys.stdin.read() fstr = fstr.replace('\r\n', '\n').replace('\r', '\n') if not fstr.endswith('\n'): fstr += '\n' return ast.parse(fstr, filename=fname)
Parse a python file into an AST. This is a very thin wrapper around ast.parse TODO: Handle encodings other than the default for Python 2 (issue #26)
def vpc_peering_connection_present(name, requester_vpc_id=None, requester_vpc_name=None, peer_vpc_id=None, peer_vpc_name=None, conn_name=None, peer_owner_id=None, peer_region=None, region=None, key=None, keyid=None, profile=None): ''' name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC in the same account, else resolving it into a vpc ID will fail. Exclusive with peer_vpc_id. conn_name The name to use for this VPC peering connection. peer_owner_id ID of the owner of the peer VPC. Defaults to your account ID, so a value is required if peering with a VPC in a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml ensure peering twixt local vpc and the other guys: boto_vpc.vpc_peering_connection_present: - requester_vpc_name: my_local_vpc - peer_vpc_name: some_other_guys_vpc - conn_name: peering_from_here_to_there - peer_owner_id: 012345654321 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if __salt__['boto_vpc.is_peering_connection_pending'](conn_name=conn_name, region=region, key=key, keyid=keyid, profile=profile): if __salt__['boto_vpc.peering_connection_pending_from_vpc'](conn_name=conn_name, vpc_id=requester_vpc_id, vpc_name=requester_vpc_name, region=region, key=key, keyid=keyid, profile=profile): ret['comment'] = ('VPC peering {0} already requested - pending ' 'acceptance by {1}'.format(conn_name, peer_owner_id or peer_vpc_name or peer_vpc_id)) log.info(ret['comment']) return ret return accept_vpc_peering_connection(name=name, conn_name=conn_name, region=region, key=key, keyid=keyid, profile=profile) return request_vpc_peering_connection(name=name, requester_vpc_id=requester_vpc_id, requester_vpc_name=requester_vpc_name, peer_vpc_id=peer_vpc_id, peer_vpc_name=peer_vpc_name, conn_name=conn_name, peer_owner_id=peer_owner_id, peer_region=peer_region, region=region, key=key, keyid=keyid, profile=profile)
name Name of the state requester_vpc_id ID of the requesting VPC. Exclusive with requester_vpc_name. requester_vpc_name Name tag of the requesting VPC. Exclusive with requester_vpc_id. peer_vpc_id ID of the VPC tp crete VPC peering connection with. This can be a VPC in another account. Exclusive with peer_vpc_name. peer_vpc_name Name tag of the VPC tp crete VPC peering connection with. This can only be a VPC in the same account, else resolving it into a vpc ID will fail. Exclusive with peer_vpc_id. conn_name The name to use for this VPC peering connection. peer_owner_id ID of the owner of the peer VPC. Defaults to your account ID, so a value is required if peering with a VPC in a different account. peer_region Region of peer VPC. For inter-region vpc peering connections. Not required for intra-region peering connections. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml ensure peering twixt local vpc and the other guys: boto_vpc.vpc_peering_connection_present: - requester_vpc_name: my_local_vpc - peer_vpc_name: some_other_guys_vpc - conn_name: peering_from_here_to_there - peer_owner_id: 012345654321
def statement(self, days=60): """Download the :py:class:`ofxparse.Statement` given the time range :param days: Number of days to look back at :type days: integer :rtype: :py:class:`ofxparser.Statement` """ parsed = self.download_parsed(days=days) return parsed.account.statement
Download the :py:class:`ofxparse.Statement` given the time range :param days: Number of days to look back at :type days: integer :rtype: :py:class:`ofxparser.Statement`
def p_ansible_sentence(self, t): """ansible_sentence : ANSIBLE VAR LPAREN features RPAREN""" t[0] = ansible(t[2], t[4], line=t.lineno(1))
ansible_sentence : ANSIBLE VAR LPAREN features RPAREN
def gpsFromUTC(year, month, day, hour, min, sec, leapSecs=14): """converts UTC to: gpsWeek, secsOfWeek, gpsDay, secsOfDay a good reference is: http://www.oc.nps.navy.mil/~jclynch/timsys.html This is based on the following facts (see reference above): GPS time is basically measured in (atomic) seconds since January 6, 1980, 00:00:00.0 (the GPS Epoch) The GPS week starts on Saturday midnight (Sunday morning), and runs for 604800 seconds. Currently, GPS time is 13 seconds ahead of UTC (see above reference). While GPS SVs transmit this difference and the date when another leap second takes effect, the use of leap seconds cannot be predicted. This routine is precise until the next leap second is introduced and has to be updated after that. SOW = Seconds of Week SOD = Seconds of Day Note: Python represents time in integer seconds, fractions are lost!!! """ secFract = sec % 1 epochTuple = gpsEpoch + (-1, -1, 0) t0 = time.mktime(epochTuple) t = time.mktime((year, month, day, hour, min, sec, -1, -1, 0)) # Note: time.mktime strictly works in localtime and to yield UTC, it should be # corrected with time.timezone # However, since we use the difference, this correction is unnecessary. # Warning: trouble if daylight savings flag is set to -1 or 1 !!! t = t + leapSecs tdiff = t - t0 gpsSOW = (tdiff % secsInWeek) + secFract gpsWeek = int(math.floor(tdiff/secsInWeek)) gpsDay = int(math.floor(gpsSOW/secsInDay)) gpsSOD = (gpsSOW % secsInDay) return (gpsWeek, gpsSOW, gpsDay, gpsSOD)
converts UTC to: gpsWeek, secsOfWeek, gpsDay, secsOfDay a good reference is: http://www.oc.nps.navy.mil/~jclynch/timsys.html This is based on the following facts (see reference above): GPS time is basically measured in (atomic) seconds since January 6, 1980, 00:00:00.0 (the GPS Epoch) The GPS week starts on Saturday midnight (Sunday morning), and runs for 604800 seconds. Currently, GPS time is 13 seconds ahead of UTC (see above reference). While GPS SVs transmit this difference and the date when another leap second takes effect, the use of leap seconds cannot be predicted. This routine is precise until the next leap second is introduced and has to be updated after that. SOW = Seconds of Week SOD = Seconds of Day Note: Python represents time in integer seconds, fractions are lost!!!