positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def expanduser(path): """ Args: path (pathlike): A path to expand Returns: `fsnative` Like :func:`python:os.path.expanduser` but supports unicode home directories under Windows + Python 2 and always returns a `fsnative`. """ path = path2fsn(path) if path == "~": return _get_userdir() elif path.startswith("~" + sep) or ( altsep is not None and path.startswith("~" + altsep)): userdir = _get_userdir() if userdir is None: return path return userdir + path[1:] elif path.startswith("~"): sep_index = path.find(sep) if altsep is not None: alt_index = path.find(altsep) if alt_index != -1 and alt_index < sep_index: sep_index = alt_index if sep_index == -1: user = path[1:] rest = "" else: user = path[1:sep_index] rest = path[sep_index:] userdir = _get_userdir(user) if userdir is not None: return userdir + rest else: return path else: return path
Args: path (pathlike): A path to expand Returns: `fsnative` Like :func:`python:os.path.expanduser` but supports unicode home directories under Windows + Python 2 and always returns a `fsnative`.
def multiply(self, p, e): """Multiply a point by an integer.""" e %= self.order() if p == self._infinity or e == 0: return self._infinity pubkey = create_string_buffer(64) public_pair_bytes = b'\4' + to_bytes_32(p[0]) + to_bytes_32(p[1]) r = libsecp256k1.secp256k1_ec_pubkey_parse( libsecp256k1.ctx, pubkey, public_pair_bytes, len(public_pair_bytes)) if not r: return False r = libsecp256k1.secp256k1_ec_pubkey_tweak_mul(libsecp256k1.ctx, pubkey, to_bytes_32(e)) if not r: return self._infinity pubkey_serialized = create_string_buffer(65) pubkey_size = c_size_t(65) libsecp256k1.secp256k1_ec_pubkey_serialize( libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey, SECP256K1_EC_UNCOMPRESSED) x = from_bytes_32(pubkey_serialized[1:33]) y = from_bytes_32(pubkey_serialized[33:]) return self.Point(x, y)
Multiply a point by an integer.
def restart(name): ''' Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name> ''' cmd = '/usr/sbin/svcadm restart {0}'.format(name) if not __salt__['cmd.retcode'](cmd, python_shell=False): # calling restart doesn't clear maintenance # or tell us that the service is in the 'online' state return start(name) return False
Restart the named service CLI Example: .. code-block:: bash salt '*' service.restart <service name>
def _designspace_locations(self, designspace): """Map font filenames to their locations in a designspace.""" maps = [] for elements in (designspace.sources, designspace.instances): location_map = {} for element in elements: path = _normpath(element.path) location_map[path] = element.location maps.append(location_map) return maps
Map font filenames to their locations in a designspace.
def issues(self, kind, email): """ Filter unique issues for given activity type and email """ return list(set([unicode(activity.issue) for activity in self.activities() if kind == activity.kind and activity.user['email'] == email]))
Filter unique issues for given activity type and email
def _repr_pretty_(self, p: Any, cycle: bool) -> None: """Print ASCII diagram in Jupyter.""" if cycle: # There should never be a cycle. This is just in case. p.text('Circuit(...)') else: p.text(self.to_text_diagram())
Print ASCII diagram in Jupyter.
def get_task_var(self, key, default=None): """ Fetch the value of a task variable related to connection configuration, or, if delegate_to is active, fetch the same variable via HostVars for the delegated-to machine. When running with delegate_to, Ansible tasks have variables associated with the original machine, not the delegated-to machine, therefore it does not make sense to extract connection-related configuration for the delegated-to machine from them. """ if self._task_vars: if self.delegate_to_hostname is None: if key in self._task_vars: return self._task_vars[key] else: delegated_vars = self._task_vars['ansible_delegated_vars'] if self.delegate_to_hostname in delegated_vars: task_vars = delegated_vars[self.delegate_to_hostname] if key in task_vars: return task_vars[key] return default
Fetch the value of a task variable related to connection configuration, or, if delegate_to is active, fetch the same variable via HostVars for the delegated-to machine. When running with delegate_to, Ansible tasks have variables associated with the original machine, not the delegated-to machine, therefore it does not make sense to extract connection-related configuration for the delegated-to machine from them.
def _parse_hparams(hparams): """Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. """ prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] ret = [] for prefix in prefixes: ret_dict = {} for key in hparams.values(): if prefix in key: par_name = key[len(prefix):] ret_dict[par_name] = hparams.get(key) ret.append(ret_dict) return ret
Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer.
def _client_send(self, msg): """Sends an Rpc message through the connection. Args: msg: string, the message to send. Raises: Error: a socket error occurred during the send. """ try: self._client.write(msg.encode("utf8") + b'\n') self._client.flush() self.log.debug('Snippet sent %s.', msg) except socket.error as e: raise Error( self._ad, 'Encountered socket error "%s" sending RPC message "%s"' % (e, msg))
Sends an Rpc message through the connection. Args: msg: string, the message to send. Raises: Error: a socket error occurred during the send.
def ensemble_res_1to1(ensemble, pst,facecolor='0.5',logger=None,filename=None, skip_groups=[],base_ensemble=None,**kwargs): """helper function to plot ensemble 1-to-1 plots sbowing the simulated range Parameters ---------- ensemble : varies the ensemble argument can be a pandas.DataFrame or derived type or a str, which is treated as a fileanme. Optionally, ensemble can be a list of these types or a dict, in which case, the keys are treated as facecolor str (e.g., 'b', 'y', etc). pst : pyemu.Pst pst instance facecolor : str the histogram facecolor. Only applies if ensemble is a single thing filename : str the name of the pdf to create. If None, return figs without saving. Default is None. base_ensemble : varies an optional ensemble argument for the observations + noise ensemble. This will be plotted as a transparent red bar on the 1to1 plot. """ if logger is None: logger=Logger('Default_Loggger.log',echo=False) logger.log("plot res_1to1") obs = pst.observation_data ensembles = _process_ensemble_arg(ensemble,facecolor,logger) if base_ensemble is not None: base_ensemble = _process_ensemble_arg(base_ensemble,"r",logger) if "grouper" in kwargs: raise NotImplementedError() else: grouper = obs.groupby(obs.obgnme).groups for skip_group in skip_groups: grouper.pop(skip_group) fig = plt.figure(figsize=figsize) if "fig_title" in kwargs: plt.figtext(0.5,0.5,kwargs["fig_title"]) else: plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind='1to1')\nfrom pest control file '{0}'\n at {1}" .format(pst.filename, str(datetime.now())), ha="center") #if plot_hexbin: # pdfname = pst.filename.replace(".pst", ".1to1.hexbin.pdf") #else: # pdfname = pst.filename.replace(".pst", ".1to1.pdf") figs = [] ax_count = 0 for g, names in grouper.items(): logger.log("plotting 1to1 for {0}".format(g)) obs_g = obs.loc[names, :] logger.statement("using control file obsvals to calculate residuals") if "include_zero" not in kwargs or kwargs["include_zero"] is False: obs_g = obs_g.loc[obs_g.weight > 0, :] if obs_g.shape[0] == 0: logger.statement("no non-zero obs for group '{0}'".format(g)) logger.log("plotting 1to1 for {0}".format(g)) continue if ax_count % (nr * nc) == 0: if ax_count > 0: plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) fig = plt.figure(figsize=figsize) axes = get_page_axes() ax_count = 0 ax = axes[ax_count] if base_ensemble is None: mx = obs_g.obsval.max() mn = obs_g.obsval.min() else: mn = base_ensemble["r"].loc[:,names].min().min() mx = base_ensemble["r"].loc[:, names].max().max() #if obs_g.shape[0] == 1: mx *= 1.1 mn *= 0.9 #ax.axis('square') if base_ensemble is not None: obs_gg = obs_g.sort_values(by="obsval") for c, en in base_ensemble.items(): en_g = en.loc[:, obs_gg.obsnme] ex = en_g.max() en = en_g.min() #[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)] ax.fill_between(obs_gg.obsval,en,ex,facecolor=c,alpha=0.2) #ax.scatter([obs_g.sim], [obs_g.obsval], marker='.', s=10, color='b') for c,en in ensembles.items(): en_g = en.loc[:,obs_g.obsnme] ex = en_g.max() en = en_g.min() [ax.plot([ov,ov],[een,eex],color=c) for ov,een,eex in zip(obs_g.obsval.values,en.values,ex.values)] ax.plot([mn,mx],[mn,mx],'k--',lw=1.0) xlim = (mn,mx) ax.set_xlim(mn,mx) ax.set_ylim(mn,mx) if mx > 1.0e5: ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e')) ax.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e')) ax.grid() ax.set_xlabel("observed",labelpad=0.1) ax.set_ylabel("simulated",labelpad=0.1) ax.set_title("{0}) group:{1}, {2} observations". format(abet[ax_count], g, obs_g.shape[0]), loc="left") ax_count += 1 ax = axes[ax_count] #ax.scatter(obs_g.obsval, obs_g.res, marker='.', s=10, color='b') if base_ensemble is not None: obs_gg = obs_g.sort_values(by="obsval") for c, en in base_ensemble.items(): en_g = en.loc[:, obs_gg.obsnme].subtract(obs_gg.obsval) ex = en_g.max() en = en_g.min() #[ax.plot([ov, ov], [een, eex], color=c,alpha=0.3) for ov, een, eex in zip(obs_g.obsval.values, en.values, ex.values)] ax.fill_between(obs_gg.obsval,en,ex,facecolor=c,alpha=0.2) for c,en in ensembles.items(): en_g = en.loc[:,obs_g.obsnme].subtract(obs_g.obsval,axis=1) ex = en_g.max() en = en_g.min() [ax.plot([ov,ov],[een,eex],color=c) for ov,een,eex in zip(obs_g.obsval.values,en.values,ex.values)] # if base_ensemble is not None: # if base_ensemble is not None: # for c, en in base_ensemble.items(): # en_g = en.loc[:, obs_g.obsnme].subtract(obs_g.obsval,axis=1) # ex = en_g.max() # en = en_g.min() # [ax.plot([ov, ov], [een, eex], color=c, alpha=0.3) for ov, een, eex in # zip(obs_g.obsval.values, en.values, ex.values)] ylim = ax.get_ylim() mx = max(np.abs(ylim[0]), np.abs(ylim[1])) if obs_g.shape[0] == 1: mx *= 1.1 ax.set_ylim(-mx, mx) #show a zero residuals line ax.plot(xlim, [0,0], 'k--', lw=1.0) ax.set_xlim(xlim) ax.set_ylabel("residual",labelpad=0.1) ax.set_xlabel("observed",labelpad=0.1) ax.set_title("{0}) group:{1}, {2} observations". format(abet[ax_count], g, obs_g.shape[0]), loc="left") ax.grid() if ax.get_xlim()[1] > 1.0e5: ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%1.0e')) ax_count += 1 logger.log("plotting 1to1 for {0}".format(g)) for a in range(ax_count, nr * nc): axes[a].set_axis_off() axes[a].set_yticks([]) axes[a].set_xticks([]) plt.tight_layout() #pdf.savefig() #plt.close(fig) figs.append(fig) if filename is not None: plt.tight_layout() with PdfPages(filename) as pdf: for fig in figs: pdf.savefig(fig) plt.close(fig) logger.log("plot res_1to1") else: logger.log("plot res_1to1") return figs
helper function to plot ensemble 1-to-1 plots sbowing the simulated range Parameters ---------- ensemble : varies the ensemble argument can be a pandas.DataFrame or derived type or a str, which is treated as a fileanme. Optionally, ensemble can be a list of these types or a dict, in which case, the keys are treated as facecolor str (e.g., 'b', 'y', etc). pst : pyemu.Pst pst instance facecolor : str the histogram facecolor. Only applies if ensemble is a single thing filename : str the name of the pdf to create. If None, return figs without saving. Default is None. base_ensemble : varies an optional ensemble argument for the observations + noise ensemble. This will be plotted as a transparent red bar on the 1to1 plot.
def vm_state(vm_=None, **kwargs): ''' Return list of all the vms and their state. If you pass a VM name in as an argument then it will return info for just the named VM, otherwise it will return all VMs. :param vm_: name of the domain :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.vm_state <domain> ''' def _info(dom): ''' Compute domain state ''' state = '' raw = dom.info() state = VIRT_STATE_NAME_MAP.get(raw[0], 'unknown') return state info = {} conn = __get_conn(**kwargs) if vm_: info[vm_] = _info(_get_domain(conn, vm_)) else: for domain in _get_domain(conn, iterable=True): info[domain.name()] = _info(domain) conn.close() return info
Return list of all the vms and their state. If you pass a VM name in as an argument then it will return info for just the named VM, otherwise it will return all VMs. :param vm_: name of the domain :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.vm_state <domain>
def _setup_parser(self, filename=None): """ Configure the ConfigParser instance the way we want it. Args: filename (str) or None Returns: SafeConfigParser """ assert isinstance(filename, str) or filename is None # If we are not overriding the config filename if not filename: filename = MACKUP_CONFIG_FILE parser = configparser.SafeConfigParser(allow_no_value=True) parser.read(os.path.join(os.path.join(os.environ['HOME'], filename))) return parser
Configure the ConfigParser instance the way we want it. Args: filename (str) or None Returns: SafeConfigParser
def NEW_DEBUG_FRAME(self, requestHeader): """ Initialize a debug frame with requestHeader Frame count is updated and will be attached to respond header The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data] Some of them may be None """ if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests) new_frame = [requestHeader, None, None, None] if self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1: # pragma no branch (Should be covered) self._frameBuffer.append(new_frame) else: self._frameBuffer[0] = new_frame # pragma no cover (Should be covered) self._frameCount = len(self._frameBuffer) - 1
Initialize a debug frame with requestHeader Frame count is updated and will be attached to respond header The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data] Some of them may be None
def _execute(self, command, # type: str args, # type: List[str] env_vars=None, # type: EnvVars shim=None # type: OptStr ): # type: (...) -> Tuple[int, bytes, bytes] """Execute a pip command with the given arguments.""" main_args = [command] + args logger.debug("calling pip %s", ' '.join(main_args)) rc, out, err = self._wrapped_pip.main(main_args, env_vars=env_vars, shim=shim) return rc, out, err
Execute a pip command with the given arguments.
def _attach_document(self, doc): ''' Attach a model to a Bokeh |Document|. This private interface should only ever called by the Document implementation to set the private ._document field properly ''' if self._document is not None and self._document is not doc: raise RuntimeError("Models must be owned by only a single document, %r is already in a doc" % (self)) doc.theme.apply_to_model(self) self._document = doc self._update_event_callbacks()
Attach a model to a Bokeh |Document|. This private interface should only ever called by the Document implementation to set the private ._document field properly
def pairwise_ellpitical_binary(sources, eps, far=None): """ Do a pairwise comparison of all sources and determine if they have a normalized distance within eps. Form this into a matrix of shape NxN. Parameters ---------- sources : list A list of sources (objects with parameters: ra,dec,a,b,pa) eps : float Normalised distance constraint. far : float If sources have a dec that differs by more than this amount then they are considered to be not matched. This is a short-cut around performing GCD calculations. Returns ------- prob : numpy.ndarray A 2d array of True/False. See Also -------- :func:`AegeanTools.cluster.norm_dist` """ if far is None: far = max(a.a/3600 for a in sources) l = len(sources) distances = np.zeros((l, l), dtype=bool) for i in range(l): for j in range(i, l): if i == j: distances[i, j] = False continue src1 = sources[i] src2 = sources[j] if src2.dec - src1.dec > far: break if abs(src2.ra - src1.ra)*np.cos(np.radians(src1.dec)) > far: continue distances[i, j] = norm_dist(src1, src2) > eps distances[j, i] = distances[i, j] return distances
Do a pairwise comparison of all sources and determine if they have a normalized distance within eps. Form this into a matrix of shape NxN. Parameters ---------- sources : list A list of sources (objects with parameters: ra,dec,a,b,pa) eps : float Normalised distance constraint. far : float If sources have a dec that differs by more than this amount then they are considered to be not matched. This is a short-cut around performing GCD calculations. Returns ------- prob : numpy.ndarray A 2d array of True/False. See Also -------- :func:`AegeanTools.cluster.norm_dist`
def filterflags_general_tags(tags_list, has_any=None, has_all=None, has_none=None, min_num=None, max_num=None, any_startswith=None, any_endswith=None, in_any=None, any_match=None, none_match=None, logic='and', ignore_case=True): r""" maybe integrate into utool? Seems pretty general Args: tags_list (list): has_any (None): (default = None) has_all (None): (default = None) min_num (None): (default = None) max_num (None): (default = None) Notes: in_any should probably be ni_any TODO: make this function more natural CommandLine: python -m utool.util_tags --exec-filterflags_general_tags python -m utool.util_tags --exec-filterflags_general_tags:0 --helpx python -m utool.util_tags --exec-filterflags_general_tags:0 python -m utool.util_tags --exec-filterflags_general_tags:0 --none_match n python -m utool.util_tags --exec-filterflags_general_tags:0 --has_none=n,o python -m utool.util_tags --exec-filterflags_general_tags:1 python -m utool.util_tags --exec-filterflags_general_tags:2 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']] >>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list) >>> print('kwargs = %r' % (kwargs,)) >>> flags = filterflags_general_tags(tags_list, **kwargs) >>> print(flags) >>> result = ut.compress(tags_list, flags) >>> print('result = %r' % (result,)) Example1: >>> # ENABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']] >>> has_all = 'n' >>> min_num = 1 >>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num) >>> result = ut.compress(tags_list, flags) >>> print('result = %r' % (result,)) Example2: >>> # ENABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']] >>> kwargs = { >>> 'any_endswith': 'n', >>> 'any_match': None, >>> 'any_startswith': 'n', >>> 'has_all': None, >>> 'has_any': None, >>> 'has_none': None, >>> 'max_num': 3, >>> 'min_num': 1, >>> 'none_match': ['P'], >>> } >>> flags = filterflags_general_tags(tags_list, **kwargs) >>> filtered = ut.compress(tags_list, flags) >>> result = ('result = %s' % (ut.repr2(filtered),)) result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']] """ import numpy as np import utool as ut def _fix_tags(tags): if ignore_case: return set([]) if tags is None else {six.text_type(t.lower()) for t in tags} else: return set([]) if tags is None else {six.text_type() for t in tags} if logic is None: logic = 'and' logic_func = { 'and': np.logical_and, 'or': np.logical_or, }[logic] default_func = { 'and': np.ones, 'or': np.zeros, }[logic] tags_list_ = [_fix_tags(tags_) for tags_ in tags_list] flags = default_func(len(tags_list_), dtype=np.bool) if min_num is not None: flags_ = [len(tags_) >= min_num for tags_ in tags_list_] logic_func(flags, flags_, out=flags) if max_num is not None: flags_ = [len(tags_) <= max_num for tags_ in tags_list_] logic_func(flags, flags_, out=flags) if has_any is not None: has_any = _fix_tags(set(ut.ensure_iterable(has_any))) flags_ = [len(has_any.intersection(tags_)) > 0 for tags_ in tags_list_] logic_func(flags, flags_, out=flags) if has_none is not None: has_none = _fix_tags(set(ut.ensure_iterable(has_none))) flags_ = [len(has_none.intersection(tags_)) == 0 for tags_ in tags_list_] logic_func(flags, flags_, out=flags) if has_all is not None: has_all = _fix_tags(set(ut.ensure_iterable(has_all))) flags_ = [len(has_all.intersection(tags_)) == len(has_all) for tags_ in tags_list_] logic_func(flags, flags_, out=flags) def _test_item(tags_, fields, op, compare): t_flags = [any([compare(t, f) for f in fields]) for t in tags_] num_passed = sum(t_flags) flag = op(num_passed, 0) return flag def _flag_tags(tags_list, fields, op, compare): flags = [_test_item(tags_, fields, op, compare) for tags_ in tags_list_] return flags def _exec_filter(flags, tags_list, fields, op, compare): if fields is not None: fields = ut.ensure_iterable(fields) if ignore_case: fields = [f.lower() for f in fields] flags_ = _flag_tags(tags_list, fields, op, compare) logic_func(flags, flags_, out=flags) return flags flags = _exec_filter( flags, tags_list, any_startswith, operator.gt, six.text_type.startswith) flags = _exec_filter( flags, tags_list, in_any, operator.gt, operator.contains) flags = _exec_filter( flags, tags_list, any_endswith, operator.gt, six.text_type.endswith) flags = _exec_filter( flags, tags_list, any_match, operator.gt, lambda t, f: re.match(f, t)) flags = _exec_filter( flags, tags_list, none_match, operator.eq, lambda t, f: re.match(f, t)) return flags
r""" maybe integrate into utool? Seems pretty general Args: tags_list (list): has_any (None): (default = None) has_all (None): (default = None) min_num (None): (default = None) max_num (None): (default = None) Notes: in_any should probably be ni_any TODO: make this function more natural CommandLine: python -m utool.util_tags --exec-filterflags_general_tags python -m utool.util_tags --exec-filterflags_general_tags:0 --helpx python -m utool.util_tags --exec-filterflags_general_tags:0 python -m utool.util_tags --exec-filterflags_general_tags:0 --none_match n python -m utool.util_tags --exec-filterflags_general_tags:0 --has_none=n,o python -m utool.util_tags --exec-filterflags_general_tags:1 python -m utool.util_tags --exec-filterflags_general_tags:2 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']] >>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list) >>> print('kwargs = %r' % (kwargs,)) >>> flags = filterflags_general_tags(tags_list, **kwargs) >>> print(flags) >>> result = ut.compress(tags_list, flags) >>> print('result = %r' % (result,)) Example1: >>> # ENABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']] >>> has_all = 'n' >>> min_num = 1 >>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num) >>> result = ut.compress(tags_list, flags) >>> print('result = %r' % (result,)) Example2: >>> # ENABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']] >>> kwargs = { >>> 'any_endswith': 'n', >>> 'any_match': None, >>> 'any_startswith': 'n', >>> 'has_all': None, >>> 'has_any': None, >>> 'has_none': None, >>> 'max_num': 3, >>> 'min_num': 1, >>> 'none_match': ['P'], >>> } >>> flags = filterflags_general_tags(tags_list, **kwargs) >>> filtered = ut.compress(tags_list, flags) >>> result = ('result = %s' % (ut.repr2(filtered),)) result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']]
def solve(self, S, dimK=None): """Compute sparse coding and dictionary update for training data `S`.""" # Use dimK specified in __init__ as default if dimK is None and self.dimK is not None: dimK = self.dimK # Start solve timer self.timer.start(['solve', 'solve_wo_eval']) # Solve CSC problem on S and do dictionary step self.init_vars(S, dimK) self.xstep(S, self.lmbda, dimK) self.dstep() # Stop solve timer self.timer.stop('solve_wo_eval') # Extract and record iteration stats self.manage_itstat() # Increment iteration count self.j += 1 # Stop solve timer self.timer.stop('solve') # Return current dictionary return self.getdict()
Compute sparse coding and dictionary update for training data `S`.
def set_type_by_schema(self, schema_obj, schema_type): """ Set property type by schema object Schema will create, if it doesn't exists in collection :param dict schema_obj: raw schema object :param str schema_type: """ schema_id = self._get_object_schema_id(schema_obj, schema_type) if not self.storage.contains(schema_id): schema = self.storage.create_schema( schema_obj, self.name, schema_type, root=self.root) assert schema.schema_id == schema_id self._type = schema_id
Set property type by schema object Schema will create, if it doesn't exists in collection :param dict schema_obj: raw schema object :param str schema_type:
def ray_bounds(ray_origins, ray_directions, bounds, buffer_dist=1e-5): """ Given a set of rays and a bounding box for the volume of interest where the rays will be passing through, find the bounding boxes of the rays as they pass through the volume. Parameters ------------ ray_origins: (m,3) float, ray origin points ray_directions: (m,3) float, ray direction vectors bounds: (2,3) bounding box (min, max) buffer_dist: float, distance to pad zero width bounding boxes Returns --------- ray_bounding: (n) set of AABB of rays passing through volume """ ray_origins = np.asanyarray(ray_origins, dtype=np.float64) ray_directions = np.asanyarray(ray_directions, dtype=np.float64) # bounding box we are testing against bounds = np.asanyarray(bounds) # find the primary axis of the vector axis = np.abs(ray_directions).argmax(axis=1) axis_bound = bounds.reshape((2, -1)).T[axis] axis_ori = np.array([ray_origins[i][a] for i, a in enumerate(axis)]).reshape((-1, 1)) axis_dir = np.array([ray_directions[i][a] for i, a in enumerate(axis)]).reshape((-1, 1)) # parametric equation of a line # point = direction*t + origin # p = dt + o # t = (p-o)/d t = (axis_bound - axis_ori) / axis_dir # prevent the bounding box from including triangles # behind the ray origin t[t < buffer_dist] = buffer_dist # the value of t for both the upper and lower bounds t_a = t[:, 0].reshape((-1, 1)) t_b = t[:, 1].reshape((-1, 1)) # the cartesion point for where the line hits the plane defined by # axis on_a = (ray_directions * t_a) + ray_origins on_b = (ray_directions * t_b) + ray_origins on_plane = np.column_stack( (on_a, on_b)).reshape( (-1, 2, ray_directions.shape[1])) ray_bounding = np.hstack((on_plane.min(axis=1), on_plane.max(axis=1))) # pad the bounding box by TOL_BUFFER # not sure if this is necessary, but if the ray is axis aligned # this function will otherwise return zero volume bounding boxes # which may or may not screw up the r-tree intersection queries ray_bounding += np.array([-1, -1, -1, 1, 1, 1]) * buffer_dist return ray_bounding
Given a set of rays and a bounding box for the volume of interest where the rays will be passing through, find the bounding boxes of the rays as they pass through the volume. Parameters ------------ ray_origins: (m,3) float, ray origin points ray_directions: (m,3) float, ray direction vectors bounds: (2,3) bounding box (min, max) buffer_dist: float, distance to pad zero width bounding boxes Returns --------- ray_bounding: (n) set of AABB of rays passing through volume
def parse_media_range(range): """Parse a media-range into its component parts. Carves up a media range and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/*;q=0.5' would get parsed into: ('application', '*', {'q', '0.5'}) In addition this function also guarantees that there is a value for 'q' in the params dictionary, filling it in with a proper default if necessary. :rtype: (str,str,dict) """ (type, subtype, params) = parse_mime_type(range) params.setdefault('q', params.pop('Q', None)) # q is case insensitive try: if not params['q'] or not 0 <= float(params['q']) <= 1: params['q'] = '1' except ValueError: # from float() params['q'] = '1' return (type, subtype, params)
Parse a media-range into its component parts. Carves up a media range and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/*;q=0.5' would get parsed into: ('application', '*', {'q', '0.5'}) In addition this function also guarantees that there is a value for 'q' in the params dictionary, filling it in with a proper default if necessary. :rtype: (str,str,dict)
def _batch_entry(self): """Entry point for the batcher thread.""" try: while True: self._batch_entry_run() except: self.exc_info = sys.exc_info() os.kill(self.pid, signal.SIGUSR1)
Entry point for the batcher thread.
def forward_word_extend_selection(self, e): # u"""Move forward to the end of the next word. Words are composed of letters and digits.""" self.l_buffer.forward_word_extend_selection(self.argument_reset) self.finalize()
u"""Move forward to the end of the next word. Words are composed of letters and digits.
def _stream_raw_result(self, response, chunk_size=1, decode=True): ''' Stream result for TTY-enabled container and raw binary data''' self._raise_for_status(response) for out in response.iter_content(chunk_size, decode): yield out
Stream result for TTY-enabled container and raw binary data
def ReadAllClientGraphSeries( self, client_label, report_type, time_range = None, ): """See db.Database.""" series_with_timestamps = {} for series_key, series in iteritems(self.client_graph_series): series_label, series_type, timestamp = series_key if series_label == client_label and series_type == report_type: if time_range is not None and not time_range.Includes(timestamp): continue series_with_timestamps[timestamp.Copy()] = series.Copy() return series_with_timestamps
See db.Database.
def is_valid(self, value # type: Any ): # type: (...) -> bool """ Validates the provided value and returns a boolean indicating success or failure. Any Exception happening in the validation process will be silently caught. :param value: the value to validate :return: a boolean flag indicating success or failure """ # noinspection PyBroadException try: # perform validation res = self.main_function(value) # return a boolean indicating if success or failure return result_is_success(res) except Exception: # caught exception means failure > return False return False
Validates the provided value and returns a boolean indicating success or failure. Any Exception happening in the validation process will be silently caught. :param value: the value to validate :return: a boolean flag indicating success or failure
def read(self, n): """ Receive *n* bytes from the socket. Args: n(int): The number of bytes to read. Returns: bytes: *n* bytes read from the socket. Raises: EOFError: If the socket was closed. """ d = b'' while n: try: block = self._socket.recv(n) except socket.error: block = None if not block: raise EOFError('Socket closed') d += block n -= len(block) return d
Receive *n* bytes from the socket. Args: n(int): The number of bytes to read. Returns: bytes: *n* bytes read from the socket. Raises: EOFError: If the socket was closed.
def fetch_all_records(self): r""" Returns a generator that yields all of the DNS records for the domain :rtype: generator of `DomainRecord`\ s :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return map(self._record, api.paginate(self.record_url, 'domain_records'))
r""" Returns a generator that yields all of the DNS records for the domain :rtype: generator of `DomainRecord`\ s :raises DOAPIError: if the API endpoint replies with an error
def get_settings(self): """ Returns current settings. Only accessible if authenticated as the user. """ url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name) return self._imgur._send_request(url)
Returns current settings. Only accessible if authenticated as the user.
async def register(*address_list, cluster=None, loop=None): """Start Raft node (server) Args: address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...] cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...] """ loop = loop or asyncio.get_event_loop() for address in address_list: host, port = address.rsplit(':', 1) node = Node(address=(host, int(port)), loop=loop) await node.start() for address in cluster: host, port = address.rsplit(':', 1) port = int(port) if (host, port) != (node.host, node.port): node.update_cluster((host, port))
Start Raft node (server) Args: address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...] cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
def _get_and_start_work(self): "return (async_result, work_unit) or (None, None)" worker_id = nice_identifier() work_unit = self.task_master.get_work(worker_id, available_gb=self.available_gb()) if work_unit is None: return None, None async_result = self.pool.apply_async( run_worker, (HeadlessWorker, self.task_master.registry.config, worker_id, work_unit.work_spec_name, work_unit.key), callback=self._finish_callback) return async_result, work_unit
return (async_result, work_unit) or (None, None)
def applymap(self, func, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a scalar and return a scalar subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler See Also -------- Styler.where """ self._todo.append((lambda instance: getattr(instance, '_applymap'), (func, subset), kwargs)) return self
Apply a function elementwise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a scalar and return a scalar subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler See Also -------- Styler.where
def valueFromString(self, value, context=None): """ Re-implements the orb.Column.valueFromString method to lookup a reference object based on the given value. :param value: <str> :param context: <orb.Context> || None :return: <orb.Model> || None """ model = self.referenceModel() return model(value, context=context)
Re-implements the orb.Column.valueFromString method to lookup a reference object based on the given value. :param value: <str> :param context: <orb.Context> || None :return: <orb.Model> || None
def __parse_drac(output): ''' Parse Dell DRAC output ''' drac = {} section = '' for i in output.splitlines(): if i.strip().endswith(':') and '=' not in i: section = i[0:-1] drac[section] = {} if i.rstrip() and '=' in i: if section in drac: drac[section].update(dict( [[prop.strip() for prop in i.split('=')]] )) else: section = i.strip() if section not in drac and section: drac[section] = {} return drac
Parse Dell DRAC output
def parse_hash(address): ''' str -> bytes There's probably a better way to do this. ''' raw = parse(address) # Cash addresses try: if address.find(riemann.network.CASHADDR_PREFIX) == 0: if raw.find(riemann.network.CASHADDR_P2SH) == 0: return raw[len(riemann.network.CASHADDR_P2SH):] if raw.find(riemann.network.CASHADDR_P2PKH) == 0: return raw[len(riemann.network.CASHADDR_P2PKH):] except TypeError: pass # Segwit addresses try: if address.find(riemann.network.BECH32_HRP) == 0: if raw.find(riemann.network.P2WSH_PREFIX) == 0: return raw[len(riemann.network.P2WSH_PREFIX):] if raw.find(riemann.network.P2WPKH_PREFIX) == 0: return raw[len(riemann.network.P2WPKH_PREFIX):] except TypeError: pass # Legacy Addresses if raw.find(riemann.network.P2SH_PREFIX) == 0: return raw[len(riemann.network.P2SH_PREFIX):] if raw.find(riemann.network.P2PKH_PREFIX) == 0: return raw[len(riemann.network.P2PKH_PREFIX):]
str -> bytes There's probably a better way to do this.
def delete(self): """Remove myself from my :class:`Character`. For symmetry with :class:`Thing` and :class`Place`. """ branch, turn, tick = self.engine._nbtt() self.engine._edges_cache.store( self.character.name, self.origin.name, self.destination.name, 0, branch, turn, tick, None ) self.engine.query.exist_edge( self.character.name, self.origin.name, self.destination.name, branch, turn, tick, False ) try: del self.engine._edge_objs[ (self.graph.name, self.orig, self.dest) ] except KeyError: pass self.character.portal[self.origin.name].send( self.character.portal[self.origin.name], key='dest', val=None )
Remove myself from my :class:`Character`. For symmetry with :class:`Thing` and :class`Place`.
def _compute(self, arrays, dates, assets, mask): """ Compute our result with numexpr, then re-apply `mask`. """ return super(NumExprFilter, self)._compute( arrays, dates, assets, mask, ) & mask
Compute our result with numexpr, then re-apply `mask`.
def determine_api_port(public_port, singlenode_mode=False): ''' Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service singlenode_mode: boolean: Shuffle ports when only a single unit is present returns: int: the correct listening port for the API service ''' i = 0 if singlenode_mode: i += 1 elif len(peer_units()) > 0 or is_clustered(): i += 1 if https(): i += 1 return public_port - (i * 10)
Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service singlenode_mode: boolean: Shuffle ports when only a single unit is present returns: int: the correct listening port for the API service
def extract_pk_blob_from_pyasn1(pyasn1_struct): """ Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1` structure (which must represent a certificate). """ pk = pyasn1_struct.getComponentByName( "tbsCertificate" ).getComponentByName( "subjectPublicKeyInfo" ) return pyasn1.codec.der.encoder.encode(pk)
Extract an ASN.1 encoded public key blob from the given :mod:`pyasn1` structure (which must represent a certificate).
def get_components(self, which): """ Get the component name dictionary for the desired object. The returned dictionary maps component names on this class to component names on the desired object. Parameters ---------- which : str Can either be ``'pos'`` or ``'vel'`` to get the components for the position or velocity object. """ mappings = self.representation_mappings.get( getattr(self, which).__class__, []) old_to_new = dict() for name in getattr(self, which).components: for m in mappings: if isinstance(m, RegexRepresentationMapping): pattr = re.match(m.repr_name, name) old_to_new[name] = m.new_name.format(*pattr.groups()) elif m.repr_name == name: old_to_new[name] = m.new_name mapping = OrderedDict() for name in getattr(self, which).components: mapping[old_to_new.get(name, name)] = name return mapping
Get the component name dictionary for the desired object. The returned dictionary maps component names on this class to component names on the desired object. Parameters ---------- which : str Can either be ``'pos'`` or ``'vel'`` to get the components for the position or velocity object.
def _connect_database(config): """Create simple connection with Mongodb config comes with settings from .ini file. """ settings = config.registry.settings mongo_uri = "mongodb://localhost:27017" mongodb_name = "test" if settings.get("mongo_url"): mongo_uri = settings["mongo_url"] if settings.get("mongodb_name"): mongodb_name = settings["mongodb_name"] return mongoengine.connect(mongodb_name, host=mongo_uri)
Create simple connection with Mongodb config comes with settings from .ini file.
def change_location(self, old_location_name, new_location_name, new_parent_name=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Find actual data object for location with old_location_name. Then call Location class change method to update location name and data. """ location = self.find_by_name(old_location_name, self.locations) if not location: print('-W- {} is not a currently existing location, so it cannot be updated.'.format(old_location_name)) return False location.change_location(new_location_name, new_er_data, new_pmag_data, replace_data) return location
Find actual data object for location with old_location_name. Then call Location class change method to update location name and data.
def cdn_request(self, uri, method, *args, **kwargs): """ If the service supports CDN, use this method to access CDN-specific URIs. """ if not self.cdn_management_url: raise exc.NotCDNEnabled("CDN is not enabled for this service.") cdn_uri = "%s%s" % (self.cdn_management_url, uri) mthd = self.method_dict.get(method.upper()) try: resp, resp_body = mthd(cdn_uri, *args, **kwargs) except exc.NotFound as e: # This could be due to either the container does not exist, or that # the container exists but is not CDN-enabled. try: mgt_uri = "%s%s" % (self.management_url, uri) resp, resp_body = self.method_head(mgt_uri) except exc.NotFound: raise raise exc.NotCDNEnabled("This container is not CDN-enabled.") return resp, resp_body
If the service supports CDN, use this method to access CDN-specific URIs.
def node_str(node): """ Returns the complete menu entry text for a menu node, or "" for invisible menu nodes. Invisible menu nodes are those that lack a prompt or that do not have a satisfied prompt condition. Example return value: "[*] Bool symbol (BOOL)" The symbol name is printed in parentheses to the right of the prompt. This is so that symbols can easily be referred to in the configuration interface. """ if not node.prompt: return "" # Even for menu nodes for symbols and choices, it's wrong to check # Symbol.visibility / Choice.visibility here. The reason is that a symbol # (and a choice, in theory) can be defined in multiple locations, giving it # multiple menu nodes, which do not necessarily all have the same prompt # visibility. Symbol.visibility / Choice.visibility is calculated as the OR # of the visibility of all the prompts. prompt, prompt_cond = node.prompt if not expr_value(prompt_cond): return "" if node.item == MENU: return " " + prompt if node.item == COMMENT: return " *** {} ***".format(prompt) # Symbol or Choice sc = node.item if sc.type == UNKNOWN: # Skip symbols defined without a type (these are obscure and generate # a warning) return "" # {:3} sets the field width to three. Gives nice alignment for empty string # values. res = "{:3} {}".format(value_str(sc), prompt) # Don't print the name for unnamed choices (the normal kind) if sc.name is not None: res += " ({})".format(sc.name) return res
Returns the complete menu entry text for a menu node, or "" for invisible menu nodes. Invisible menu nodes are those that lack a prompt or that do not have a satisfied prompt condition. Example return value: "[*] Bool symbol (BOOL)" The symbol name is printed in parentheses to the right of the prompt. This is so that symbols can easily be referred to in the configuration interface.
def append_child(self, child): """ Equivalent to 'node.children.append(child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.append(child) self.changed()
Equivalent to 'node.children.append(child)'. This method also sets the child's parent attribute appropriately.
def determine_frame_positions(self): """Record the file pointer position of each frame""" self.rewind_file() with ignored(struct.error): while True: pointer_position = self.blob_file.tell() length = struct.unpack('<i', self.blob_file.read(4))[0] self.blob_file.seek(length - 4, 1) self.frame_positions.append(pointer_position) self.rewind_file() log.info("Found {0} frames.".format(len(self.frame_positions)))
Record the file pointer position of each frame
def sentiment(self): """ The sentiment of this sentence :getter: Returns the sentiment value of this sentence :type: int """ if self._sentiment is None: self._sentiment = int(self._element.get('sentiment')) return self._sentiment
The sentiment of this sentence :getter: Returns the sentiment value of this sentence :type: int
def from_string(cls, string, version): """ Constructs an ARC record from a string and returns it. TODO: It might be best to merge this with the _read_arc_record function rather than reimplement the functionality here. """ header, payload = string.split("\n",1) if payload[0] == '\n': # There's an extra payload = payload[1:] if int(version) == 1: arc_header_re = ARC1_HEADER_RE elif int(version) == 2: arc_header_re = ARC2_HEADER_RE matches = arc_header_re.search(header) headers = matches.groupdict() arc_header = ARCHeader(**headers) return cls(header = arc_header, payload = payload, version = version)
Constructs an ARC record from a string and returns it. TODO: It might be best to merge this with the _read_arc_record function rather than reimplement the functionality here.
async def setnym(ini_path: str) -> int: """ Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send. Register exit hooks to close pool and trustee anchor. Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration. :param ini_path: path to configuration file :return: 0 for OK, 1 for failure """ config = inis2dict(ini_path) if config['Trustee Anchor']['name'] == config['VON Anchor']['name']: raise ExtantWallet('Wallet names must differ between VON Anchor and Trustee Anchor') cfg_van_role = config['VON Anchor'].get('role', None) or None # nudge empty value from '' to None if not ok_role(cfg_van_role): raise BadRole('Configured role {} is not valid'.format(cfg_van_role)) pool_data = NodePoolData( config['Node Pool']['name'], config['Node Pool'].get('genesis.txn.path', None) or None) an_data = { 'tan': AnchorData( Role.TRUSTEE, config['Trustee Anchor']['name'], config['Trustee Anchor'].get('seed', None) or None, config['Trustee Anchor'].get('did', None) or None, config['Trustee Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'], config['Trustee Anchor'].get('wallet.type', None) or None, config['Trustee Anchor'].get('wallet.access', None) or None), 'van': AnchorData( Role.get(cfg_van_role), config['VON Anchor']['name'], config['VON Anchor'].get('seed', None) or None, config['VON Anchor'].get('did', None) or None, config['VON Anchor'].get('wallet.create', '0').lower() in ['1', 'true', 'yes'], config['VON Anchor'].get('wallet.type', None) or None, config['VON Anchor'].get('wallet.access', None) or None) } an_wallet = await _set_wallets(an_data) p_mgr = NodePoolManager() if pool_data.name not in await p_mgr.list(): if pool_data.genesis_txn_path: await p_mgr.add_config(pool_data.name, pool_data.genesis_txn_path) else: raise AbsentPool('Node pool {} has no ledger configuration, but {} specifies no genesis txn path'.format( pool_data.name, ini_path)) async with an_wallet['tan'] as w_tan, ( an_wallet['van']) as w_van, ( p_mgr.get(pool_data.name)) as pool, ( TrusteeAnchor(w_tan, pool)) as tan, ( NominalAnchor(w_van, pool)) as van: send_verkey = van.verkey try: nym_role = await tan.get_nym_role(van.did) if an_data['van'].role == nym_role: return 0 # ledger is as per configuration send_verkey = None # only owner can touch verkey if nym_role != Role.USER: # only remove role when it is not already None on the ledger await tan.send_nym(van.did, send_verkey, van.wallet.name, Role.ROLE_REMOVE) except AbsentNym: pass # cryptonym not there yet, fall through await tan.send_nym(van.did, send_verkey, van.wallet.name, an_data['van'].role) return 0
Set configuration. Open pool, trustee anchor, and wallet of anchor whose nym to send. Register exit hooks to close pool and trustee anchor. Engage trustee anchor to send nym for VON anchor, if it differs on the ledger from configuration. :param ini_path: path to configuration file :return: 0 for OK, 1 for failure
def p_state_action_constraint_section(self, p): '''state_action_constraint_section : STATE_ACTION_CONSTRAINTS LCURLY state_cons_list RCURLY SEMI | STATE_ACTION_CONSTRAINTS LCURLY RCURLY SEMI''' if len(p) == 6: p[0] = ('constraints', p[3]) elif len(p) == 5: p[0] = ('constraints', []) self._print_verbose('state-action-constraints')
state_action_constraint_section : STATE_ACTION_CONSTRAINTS LCURLY state_cons_list RCURLY SEMI | STATE_ACTION_CONSTRAINTS LCURLY RCURLY SEMI
def extract_labels(self, f, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index]. Args: f: A file object that can be passed into a gzip reader. one_hot: Does one hot encoding for the result. num_classes: Number of classes for the one hot encoding. Returns: labels: a 1D unit8 numpy array. Raises: ValueError: If the bystream doesn't start with 2049. """ print('Extracting', f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = self._read32(bytestream) if magic != 2049: raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, f.name)) num_items = self._read32(bytestream) buf = bytestream.read(num_items) labels = np.frombuffer(buf, dtype=np.uint8) if one_hot: return self.dense_to_one_hot(labels, num_classes) return labels
Extract the labels into a 1D uint8 numpy array [index]. Args: f: A file object that can be passed into a gzip reader. one_hot: Does one hot encoding for the result. num_classes: Number of classes for the one hot encoding. Returns: labels: a 1D unit8 numpy array. Raises: ValueError: If the bystream doesn't start with 2049.
def zotero_inline_tags(parser, token): """ Render an inline formset of tags. Usage: {% zotero_inline_tags formset[ option] %} option = "all" | "media" | "formset" """ args = token.split_contents() length = len(args) if length == 2: rendered_node = RenderedAllNode(args[1]) elif length == 3 and args[2].lower() == u'all': rendered_node = RenderedAllNode(args[1]) elif length == 3 and args[2].lower() == u'media': rendered_node = RenderedMediaNode(args[1]) elif length == 3 and args[2].lower() == u'formset': rendered_node = RenderedFormsetNode(args[1]) else: raise t.TemplateSyntaxError('Incorrect arguments in %s.' % args[0]) return rendered_node
Render an inline formset of tags. Usage: {% zotero_inline_tags formset[ option] %} option = "all" | "media" | "formset"
def mpi_submit(nslave, worker_args, worker_envs): """ customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters nslave number of slave process to start up args arguments to launch each job this usually includes the parameters of master_uri and parameters passed into submit """ worker_args += ['%s=%s' % (k, str(v)) for k, v in worker_envs.items()] sargs = ' '.join(args.command + worker_args) if args.hostfile is None: cmd = ' '.join(['mpirun -n %d' % (nslave)] + args.command + worker_args) else: cmd = ' '.join(['mpirun -n %d --hostfile %s' % (nslave, args.hostfile)] + args.command + worker_args) print cmd subprocess.check_call(cmd, shell = True)
customized submit script, that submit nslave jobs, each must contain args as parameter note this can be a lambda function containing additional parameters in input Parameters nslave number of slave process to start up args arguments to launch each job this usually includes the parameters of master_uri and parameters passed into submit
def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str): """ Uses matplotlib for creating a visualization of the attention matrix. :param attention_matrix: The attention matrix. :param source_tokens: A list of source tokens. :param target_tokens: A list of target tokens. :param filename: The file to which the attention visualization will be written to. """ try: import matplotlib except ImportError: raise RuntimeError("Please install matplotlib.") matplotlib.use("Agg") import matplotlib.pyplot as plt assert attention_matrix.shape[0] == len(target_tokens) plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys") plt.xlabel("target") plt.ylabel("source") plt.gca().set_xticks([i for i in range(0, len(target_tokens))]) plt.gca().set_yticks([i for i in range(0, len(source_tokens))]) plt.gca().set_xticklabels(target_tokens, rotation='vertical') plt.gca().set_yticklabels(source_tokens) plt.tight_layout() plt.savefig(filename) logger.info("Saved alignment visualization to " + filename)
Uses matplotlib for creating a visualization of the attention matrix. :param attention_matrix: The attention matrix. :param source_tokens: A list of source tokens. :param target_tokens: A list of target tokens. :param filename: The file to which the attention visualization will be written to.
def base64_bytes(x): """Turn base64 into bytes""" if six.PY2: return base64.decodestring(x) return base64.decodebytes(bytes_encode(x))
Turn base64 into bytes
def socket_handler(name, logname, host, port): """ A Bark logging handler logging output to a stream (TCP) socket. The server listening at the given 'host' and 'port' will be sent a pickled dictionary. Similar to logging.handlers.SocketHandler. """ return wrap_log_handler(logging.handlers.SocketHandler(host, port))
A Bark logging handler logging output to a stream (TCP) socket. The server listening at the given 'host' and 'port' will be sent a pickled dictionary. Similar to logging.handlers.SocketHandler.
def store_vector(self, hash_name, bucket_key, v, data): """ Stores vector and JSON-serializable data in MongoDB with specified key. """ mongo_key = self._format_mongo_key(hash_name, bucket_key) val_dict = {} val_dict['lsh'] = mongo_key # Depending on type (sparse or not) fill value dict if scipy.sparse.issparse(v): # Make sure that we are using COO format (easy to handle) if not scipy.sparse.isspmatrix_coo(v): v = scipy.sparse.coo_matrix(v) # Construct list of [index, value] items, # one for each non-zero element of the sparse vector encoded_values = [] for k in range(v.data.size): row_index = v.row[k] value = v.data[k] encoded_values.append([int(row_index), value]) val_dict['sparse'] = 1 val_dict['nonzeros'] = encoded_values val_dict['dim'] = v.shape[0] else: # Make sure it is a 1d vector v = numpy.reshape(v, v.shape[0]) val_dict['vector'] = v.tostring() val_dict['dtype'] = v.dtype.name # Add data if set if data is not None: val_dict['data'] = data # Push JSON representation of dict to end of bucket list self.mongo_object.insert_one(val_dict)
Stores vector and JSON-serializable data in MongoDB with specified key.
def flush(self): """Flush all pending gauges""" writer = self.writer if writer is None: raise GaugedUseAfterFreeError self.flush_writer_position() keys = self.translate_keys() blocks = [] current_block = self.current_block statistics = self.statistics driver = self.driver flags = 0 # for future extensions, e.g. block compression for namespace, key, block in self.pending_blocks(): length = block.byte_length() if not length: continue key_id = keys[(namespace, key)] statistics[namespace].byte_count += length blocks.append((namespace, current_block, key_id, block.buffer(), flags)) if self.config.overwrite_blocks: driver.replace_blocks(blocks) else: driver.insert_or_append_blocks(blocks) if not Gauged.writer_flush_maps(writer, True): raise MemoryError update_namespace = driver.add_namespace_statistics for namespace, stats in statistics.iteritems(): update_namespace(namespace, self.current_block, stats.data_points, stats.byte_count) statistics.clear() driver.commit() self.flush_now = False
Flush all pending gauges
def wrap(self, value): ''' Expects a dictionary with the keys being instances of ``KVField.key_type`` and the values being instances of ``KVField.value_type``. After validation, the dictionary is transformed into a list of dictionaries with ``k`` and ``v`` fields set to the keys and values from the original dictionary. ''' self.validate_wrap(value) ret = [] for k, v in value.items(): k = self.key_type.wrap(k) v = self.value_type.wrap(v) ret.append( { 'k' : k, 'v' : v }) return ret
Expects a dictionary with the keys being instances of ``KVField.key_type`` and the values being instances of ``KVField.value_type``. After validation, the dictionary is transformed into a list of dictionaries with ``k`` and ``v`` fields set to the keys and values from the original dictionary.
def remap(input, keys, values, missing='ignore', inplace=False): """Given an input array, remap its entries corresponding to 'keys' to 'values' equivalent of output = [map.get(i, default=i) for i in input], if map were a dictionary of corresponding keys and values Parameters ---------- input : ndarray, [...] values to perform replacements in keys : ndarray, [...] values to perform replacements in values : ndarray, [...] values to perform replacements in missing : {'raise', 'ignore'} if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys' if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped inplace : bool, optional if True, input array is remapped in place if false, a copy is returned Returns ------- output : ndarray, [...] like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values' """ input = np.asarray(input) # FIXME: currently instances of Index are not allowed values = np.asarray(values) if missing == 'ignore': idx = indices(keys, input, missing='mask') mask = np.logical_not(idx.mask) idx = idx.data elif missing == 'raise': idx = indices(keys, input, missing='raise') mask = Ellipsis else: raise ValueError("'missing' should be either 'ignore' or 'raise'") output = input if inplace else input.copy() output[mask] = values[idx[mask]] return output
Given an input array, remap its entries corresponding to 'keys' to 'values' equivalent of output = [map.get(i, default=i) for i in input], if map were a dictionary of corresponding keys and values Parameters ---------- input : ndarray, [...] values to perform replacements in keys : ndarray, [...] values to perform replacements in values : ndarray, [...] values to perform replacements in missing : {'raise', 'ignore'} if `missing` is 'raise', a KeyError is raised if 'values' contains elements not present in 'keys' if `missing` is 'ignore', only elements of 'values' persent in 'keys' are remapped inplace : bool, optional if True, input array is remapped in place if false, a copy is returned Returns ------- output : ndarray, [...] like 'input', but with elements remapped according to the mapping defined by 'keys' and 'values'
def libvlc_video_set_teletext(p_mi, i_page): '''Set new teletext page to retrieve. @param p_mi: the media player. @param i_page: teletex page number requested. ''' f = _Cfunctions.get('libvlc_video_set_teletext', None) or \ _Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_int) return f(p_mi, i_page)
Set new teletext page to retrieve. @param p_mi: the media player. @param i_page: teletex page number requested.
def duration_expired(start_time, duration_seconds): """ Return True if ``duration_seconds`` have expired since ``start_time`` """ if duration_seconds is not None: delta_seconds = datetime_delta_to_seconds(dt.datetime.now() - start_time) if delta_seconds >= duration_seconds: return True return False
Return True if ``duration_seconds`` have expired since ``start_time``
def docker_to_uuid(uuid): ''' Get the image uuid from an imported docker image .. versionadded:: 2019.2.0 ''' if _is_uuid(uuid): return uuid if _is_docker_uuid(uuid): images = list_installed(verbose=True) for image_uuid in images: if 'name' not in images[image_uuid]: continue if images[image_uuid]['name'] == uuid: return image_uuid return None
Get the image uuid from an imported docker image .. versionadded:: 2019.2.0
def list_insert(lst, new_elements, index_or_name=None, after=True): """ Return a copy of the list with the new element(s) inserted. Args: lst (list): The original list. new_elements ("any" or list of "any"): The element(s) to insert in the list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Default: None (=append). after (bool): Whether to insert the new elements before or after the reference element. Default: True. Returns: (list) A copy of the original list containing the new element(s). """ if index_or_name is None: index = None else: try: index = get_list_index(lst, index_or_name) except ValueError: index = None to_return = lst[:] if index is None: # Append. to_return += new_elements elif index == 0: # Prepend. to_return = new_elements + to_return else: if after: index += 1 to_return = to_return[:index] + new_elements + to_return[index:] return to_return
Return a copy of the list with the new element(s) inserted. Args: lst (list): The original list. new_elements ("any" or list of "any"): The element(s) to insert in the list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Default: None (=append). after (bool): Whether to insert the new elements before or after the reference element. Default: True. Returns: (list) A copy of the original list containing the new element(s).
def config(name='ckeditor', custom_config='', **kwargs): """Config CKEditor. :param name: The target input field's name. If you use Flask-WTF/WTForms, it need to set to field's name. Default to ``'ckeditor'``. :param custom_config: The addition config, for example ``uiColor: '#9AB8F3'``. The proper syntax for each option is ``configuration name : configuration value``. You can use comma to separate multiple key-value pairs. See the list of available configuration settings on `CKEditor documentation <https://docs.ckeditor.com/ckeditor4/docs/#!/api/CKEDITOR.config>`_. :param kwargs: Mirror arguments to overwritten configuration variables, see docs for more details. .. versionadded:: 0.3 """ extra_plugins = kwargs.get('extra_plugins', current_app.config['CKEDITOR_EXTRA_PLUGINS']) file_uploader = kwargs.get('file_uploader', current_app.config['CKEDITOR_FILE_UPLOADER']) file_browser = kwargs.get('file_browser', current_app.config['CKEDITOR_FILE_BROWSER']) if file_uploader != '': file_uploader = get_url(file_uploader) if file_browser != '': file_browser = get_url(file_browser) if file_uploader or file_browser and 'filebrowser' not in extra_plugins: extra_plugins.append('filebrowser') language = kwargs.get('language', current_app.config['CKEDITOR_LANGUAGE']) height = kwargs.get('height', current_app.config['CKEDITOR_HEIGHT']) width = kwargs.get('width', current_app.config['CKEDITOR_WIDTH']) code_theme = kwargs.get('code_theme', current_app.config['CKEDITOR_CODE_THEME']) wrong_key_arg = kwargs.get('codesnippet', None) if wrong_key_arg: warnings.warn('Argument codesnippet was renamed to enable_codesnippet and will be removed in future.') enable_codesnippet = kwargs.get('enable_codesnippet', wrong_key_arg) or \ current_app.config['CKEDITOR_ENABLE_CODESNIPPET'] if enable_codesnippet and 'codesnippet' not in extra_plugins: extra_plugins.append('codesnippet') enable_csrf = kwargs.get('enable_csrf', current_app.config['CKEDITOR_ENABLE_CSRF']) if enable_csrf: if 'csrf' not in current_app.extensions: raise RuntimeError("CSRFProtect is not initialized. It's required to enable CSRF protect, \ see docs for more details.") csrf_header = render_template_string(''' fileTools_requestHeaders: { 'X-CSRFToken': '{{ csrf_token() }}', },''') else: csrf_header = '' return Markup(''' <script type="text/javascript"> CKEDITOR.replace( "%s", { language: "%s", height: %s, width: %s, codeSnippet_theme: "%s", imageUploadUrl: "%s", filebrowserUploadUrl: "%s", filebrowserBrowseUrl: "%s", extraPlugins: "%s", %s // CSRF token header for XHR request %s }); </script>''' % ( name, language, height, width, code_theme, file_uploader, file_uploader, file_browser, ','.join(extra_plugins), csrf_header, custom_config))
Config CKEditor. :param name: The target input field's name. If you use Flask-WTF/WTForms, it need to set to field's name. Default to ``'ckeditor'``. :param custom_config: The addition config, for example ``uiColor: '#9AB8F3'``. The proper syntax for each option is ``configuration name : configuration value``. You can use comma to separate multiple key-value pairs. See the list of available configuration settings on `CKEditor documentation <https://docs.ckeditor.com/ckeditor4/docs/#!/api/CKEDITOR.config>`_. :param kwargs: Mirror arguments to overwritten configuration variables, see docs for more details. .. versionadded:: 0.3
def set_menu(self, ): """Setup the menu that the menu_tb button uses :returns: None :rtype: None :raises: None """ self.menu = QtGui.QMenu(self) actions = self.reftrack.get_additional_actions() self.actions = [] for a in actions: if a.icon: qaction = QtGui.QAction(a.icon, a.name, self) else: qaction = QtGui.QAction(a.name, self) qaction.setCheckable(a.checkable) qaction.setChecked(a.checked) qaction.setEnabled(a.enabled) qaction.triggered.connect(a.action) self.actions.append(qaction) self.menu.addAction(qaction) self.menu_tb.setMenu(self.menu)
Setup the menu that the menu_tb button uses :returns: None :rtype: None :raises: None
def authAddress(val): """ # The C1 Tag extracts the address of the authors as given by WOS. **Warning** the mapping of author to address is not very good and is given in multiple ways. # Parameters _val_: `list[str]` > The raw data from a WOS file # Returns `list[str]` > A list of addresses """ ret = [] for a in val: if a[0] == '[': ret.append('] '.join(a.split('] ')[1:])) else: ret.append(a) return ret
# The C1 Tag extracts the address of the authors as given by WOS. **Warning** the mapping of author to address is not very good and is given in multiple ways. # Parameters _val_: `list[str]` > The raw data from a WOS file # Returns `list[str]` > A list of addresses
def new_value(self, name, value): """Create new value in data""" try: # We need to enclose values in a list to be able to send # them to the kernel in Python 2 svalue = [cloudpickle.dumps(value, protocol=PICKLE_PROTOCOL)] # Needed to prevent memory leaks. See issue 7158 if len(svalue) < MAX_SERIALIZED_LENGHT: self.shellwidget.set_value(name, svalue) else: QMessageBox.warning(self, _("Warning"), _("The object you are trying to modify is " "too big to be sent back to the kernel. " "Therefore, your modifications won't " "take place.")) except TypeError as e: QMessageBox.critical(self, _("Error"), "TypeError: %s" % to_text_string(e)) self.shellwidget.refresh_namespacebrowser()
Create new value in data
def sequence_cross_entropy_with_logits(logits: torch.FloatTensor, targets: torch.LongTensor, weights: torch.FloatTensor, average: str = "batch", label_smoothing: float = None) -> torch.FloatTensor: """ Computes the cross entropy loss of a sequence, weighted with respect to some user provided weights. Note that the weighting here is not the same as in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting classes; here we are weighting the loss contribution from particular elements in the sequence. This allows loss computations for models which use padding. Parameters ---------- logits : ``torch.FloatTensor``, required. A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes) which contains the unnormalized probability for each class. targets : ``torch.LongTensor``, required. A ``torch.LongTensor`` of size (batch, sequence_length) which contains the index of the true class for each corresponding step. weights : ``torch.FloatTensor``, required. A ``torch.FloatTensor`` of size (batch, sequence_length) average: str, optional (default = "batch") If "batch", average the loss across the batches. If "token", average the loss across each item in the input. If ``None``, return a vector of losses per batch element. label_smoothing : ``float``, optional (default = None) Whether or not to apply label smoothing to the cross-entropy loss. For example, with a label smoothing value of 0.2, a 4 class classification target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was the correct label. Returns ------- A torch.FloatTensor representing the cross entropy loss. If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar. If ``average is None``, the returned loss is a vector of shape (batch_size,). """ if average not in {None, "token", "batch"}: raise ValueError("Got average f{average}, expected one of " "None, 'token', or 'batch'") # shape : (batch * sequence_length, num_classes) logits_flat = logits.view(-1, logits.size(-1)) # shape : (batch * sequence_length, num_classes) log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1) # shape : (batch * max_len, 1) targets_flat = targets.view(-1, 1).long() if label_smoothing is not None and label_smoothing > 0.0: num_classes = logits.size(-1) smoothing_value = label_smoothing / num_classes # Fill all the correct indices with 1 - smoothing value. one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing) smoothed_targets = one_hot_targets + smoothing_value negative_log_likelihood_flat = - log_probs_flat * smoothed_targets negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True) else: # Contribution to the negative log likelihood only comes from the exact indices # of the targets, as the target distributions are one-hot. Here we use torch.gather # to extract the indices of the num_classes dimension which contribute to the loss. # shape : (batch * sequence_length, 1) negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat) # shape : (batch, sequence_length) negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size()) # shape : (batch, sequence_length) negative_log_likelihood = negative_log_likelihood * weights.float() if average == "batch": # shape : (batch_size,) per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13) num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13) return per_batch_loss.sum() / num_non_empty_sequences elif average == "token": return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13) else: # shape : (batch_size,) per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13) return per_batch_loss
Computes the cross entropy loss of a sequence, weighted with respect to some user provided weights. Note that the weighting here is not the same as in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting classes; here we are weighting the loss contribution from particular elements in the sequence. This allows loss computations for models which use padding. Parameters ---------- logits : ``torch.FloatTensor``, required. A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes) which contains the unnormalized probability for each class. targets : ``torch.LongTensor``, required. A ``torch.LongTensor`` of size (batch, sequence_length) which contains the index of the true class for each corresponding step. weights : ``torch.FloatTensor``, required. A ``torch.FloatTensor`` of size (batch, sequence_length) average: str, optional (default = "batch") If "batch", average the loss across the batches. If "token", average the loss across each item in the input. If ``None``, return a vector of losses per batch element. label_smoothing : ``float``, optional (default = None) Whether or not to apply label smoothing to the cross-entropy loss. For example, with a label smoothing value of 0.2, a 4 class classification target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was the correct label. Returns ------- A torch.FloatTensor representing the cross entropy loss. If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar. If ``average is None``, the returned loss is a vector of shape (batch_size,).
def taql(command, style='Python', tables=[], globals={}, locals={}): """Execute a TaQL command and return a table object. A `TaQL <../../doc/199.html>`_ command is an SQL-like command to do a selection of rows and/or columns in a table. The default style used in a TaQL command is python, which means 0-based indexing, C-ordered arrays, and non-inclusive end in ranges. It is possible to use python variables directly in the command using `$var` where `var` is the name of the variable to use. For example:: t = table('3c343.MS') value = 5.1 t1 = taql('select from $t where COL > $value') In this example the table `$t` is replaced by a sequence number (such as `$1`) and `$value` by its value 5.1. The table object of `t` will be appended to a copy of the `tables` argument such that the sequence number inserted matches the table object in the list. The more advanced user can already use `$n` in the query string and supply the associated table object in the `tables` argument (where `n` represents the (n-1)th `tables` element). The :func:`query` command makes use of this feature. The arguments `globals` and `locals` can be used to pass in a dict containing the possible variables used in the TaQL command. They can be obtained with the python functions locals() and globals(). If `locals` is empty, the local variables in the calling function will be used, so normally one does not need to use these arguments. """ # Substitute possible tables given as $name. cmd = command # Copy the tables argument and make sure it is a list tabs = [] for tab in tables: tabs += [tab] try: import casacore.util if len(locals) == 0: # local variables in caller are 3 levels up from getlocals locals = casacore.util.getlocals(3) cmd = casacore.util.substitute(cmd, [(table, '', tabs)], globals, locals) except Exception: pass if style: cmd = 'using style ' + style + ' ' + cmd tab = table(cmd, tabs, _oper=2) result = tab._getcalcresult() # If result is empty, it was a normal TaQL command resulting in a table. # Otherwise it is a record containing calc values. if len(result) == 0: return tab return result['values']
Execute a TaQL command and return a table object. A `TaQL <../../doc/199.html>`_ command is an SQL-like command to do a selection of rows and/or columns in a table. The default style used in a TaQL command is python, which means 0-based indexing, C-ordered arrays, and non-inclusive end in ranges. It is possible to use python variables directly in the command using `$var` where `var` is the name of the variable to use. For example:: t = table('3c343.MS') value = 5.1 t1 = taql('select from $t where COL > $value') In this example the table `$t` is replaced by a sequence number (such as `$1`) and `$value` by its value 5.1. The table object of `t` will be appended to a copy of the `tables` argument such that the sequence number inserted matches the table object in the list. The more advanced user can already use `$n` in the query string and supply the associated table object in the `tables` argument (where `n` represents the (n-1)th `tables` element). The :func:`query` command makes use of this feature. The arguments `globals` and `locals` can be used to pass in a dict containing the possible variables used in the TaQL command. They can be obtained with the python functions locals() and globals(). If `locals` is empty, the local variables in the calling function will be used, so normally one does not need to use these arguments.
def save(self): """Write the store dict to a file specified by store_file_path""" with open(self.store_file_path, 'w') as fh: fh.write(json.dumps(self.store, indent=4))
Write the store dict to a file specified by store_file_path
def send_external(self, http_verb, host, url, http_headers, chunk): """ Used with create_upload_url to send a chunk the the possibly external object store. :param http_verb: str PUT or POST :param host: str host we are sending the chunk to :param url: str url to use when sending :param http_headers: object headers to send with the request :param chunk: content to send :return: requests.Response containing the successful result """ if http_verb == 'PUT': return self.http.put(host + url, data=chunk, headers=http_headers) elif http_verb == 'POST': return self.http.post(host + url, data=chunk, headers=http_headers) else: raise ValueError("Unsupported http_verb:" + http_verb)
Used with create_upload_url to send a chunk the the possibly external object store. :param http_verb: str PUT or POST :param host: str host we are sending the chunk to :param url: str url to use when sending :param http_headers: object headers to send with the request :param chunk: content to send :return: requests.Response containing the successful result
def watch_docs(c): """ Watch both doc trees & rebuild them if files change. This includes e.g. rebuilding the API docs if the source code changes; rebuilding the WWW docs if the README changes; etc. Reuses the configuration values ``packaging.package`` or ``tests.package`` (the former winning over the latter if both defined) when determining which source directory to scan for API doc updates. """ # TODO: break back down into generic single-site version, then create split # tasks as with docs/www above. Probably wants invoke#63. # NOTE: 'www'/'docs' refer to the module level sub-collections. meh. # Readme & WWW triggers WWW www_c = Context(config=c.config.clone()) www_c.update(**www.configuration()) www_handler = make_handler( ctx=www_c, task_=www["build"], regexes=[r"\./README.rst", r"\./sites/www"], ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"], ) # Code and docs trigger API docs_c = Context(config=c.config.clone()) docs_c.update(**docs.configuration()) regexes = [r"\./sites/docs"] package = c.get("packaging", {}).get("package", None) if package is None: package = c.get("tests", {}).get("package", None) if package: regexes.append(r"\./{}/".format(package)) api_handler = make_handler( ctx=docs_c, task_=docs["build"], regexes=regexes, ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"], ) observe(www_handler, api_handler)
Watch both doc trees & rebuild them if files change. This includes e.g. rebuilding the API docs if the source code changes; rebuilding the WWW docs if the README changes; etc. Reuses the configuration values ``packaging.package`` or ``tests.package`` (the former winning over the latter if both defined) when determining which source directory to scan for API doc updates.
def iiif_info_handler(prefix=None, identifier=None, config=None, klass=None, auth=None, **args): """Handler for IIIF Image Information requests.""" if (not auth or degraded_request(identifier) or auth.info_authz()): # go ahead with request as made if (auth): logging.debug("Authorized for image %s" % identifier) i = IIIFHandler(prefix, identifier, config, klass, auth) try: return i.image_information_response() except IIIFError as e: return i.error_response(e) elif (auth.info_authn()): # authn but not authz -> 401 abort(401) else: # redirect to degraded response = redirect(host_port_prefix( config.host, config.port, prefix) + '/' + identifier + '-deg/info.json') response.headers['Access-control-allow-origin'] = '*' return response
Handler for IIIF Image Information requests.
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'fields') and self.fields is not None: _dict['fields'] = [x._to_dict() for x in self.fields] return _dict
Return a json dictionary representing this model.
def delete_pipeline(pipeline_id, region=None, key=None, keyid=None, profile=None): ''' Delete a pipeline, its pipeline definition, and its run history. This function is idempotent. CLI example: .. code-block:: bash salt myminion boto_datapipeline.delete_pipeline my_pipeline_id ''' client = _get_client(region, key, keyid, profile) r = {} try: client.delete_pipeline(pipelineId=pipeline_id) r['result'] = True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: r['error'] = six.text_type(e) return r
Delete a pipeline, its pipeline definition, and its run history. This function is idempotent. CLI example: .. code-block:: bash salt myminion boto_datapipeline.delete_pipeline my_pipeline_id
def get_facts(self, date, end_date=None, search_terms="", ongoing_days=0): """Returns facts for the time span matching the optional filter criteria. In search terms comma (",") translates to boolean OR and space (" ") to boolean AND. Filter is applied to tags, categories, activity names and description ongoing_days (int): look into the last `ongoing_days` days for still ongoing activities """ facts = [] if ongoing_days: # look for still ongoing activities earlier_start = date - dt.timedelta(days=ongoing_days) earlier_end = date - dt.timedelta(days=1) earlier_facts = self._get_facts(earlier_start, earlier_end, search_terms=search_terms) facts.extend(fact for fact in earlier_facts if not fact.end_time) # add facts between date and end_date facts.extend(self._get_facts(date, end_date, search_terms=search_terms)) return facts
Returns facts for the time span matching the optional filter criteria. In search terms comma (",") translates to boolean OR and space (" ") to boolean AND. Filter is applied to tags, categories, activity names and description ongoing_days (int): look into the last `ongoing_days` days for still ongoing activities
def system_monitor_LineCard_alert_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor") LineCard = ET.SubElement(system_monitor, "LineCard") alert = ET.SubElement(LineCard, "alert") state = ET.SubElement(alert, "state") state.text = kwargs.pop('state') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _registerHandler(self, handler): """ Registers a handler. :param handler: A handler object. """ self._logger.addHandler(handler) self._handlers.append(handler)
Registers a handler. :param handler: A handler object.
def from_jsonld(cls, data, __reference__=None, __source__=None): """Instantiate a JSON-LD class from data.""" if isinstance(data, cls): return data if not isinstance(data, dict): raise ValueError(data) if '@type' in data: type_ = tuple(sorted(data['@type'])) if type_ in cls.__type_registry__ and getattr( cls, '_jsonld_type', None ) != type_: new_cls = cls.__type_registry__[type_] if cls != new_cls: return new_cls.from_jsonld(data) if cls._jsonld_translate: data = ld.compact(data, {'@context': cls._jsonld_translate}) data.pop('@context', None) data.setdefault('@context', cls._jsonld_context) if data['@context'] != cls._jsonld_context: compacted = ld.compact(data, {'@context': cls._jsonld_context}) else: compacted = data # assert compacted['@type'] == cls._jsonld_type, '@type must be equal' # TODO update self(not cls)._jsonld_context with data['@context'] fields = cls._jsonld_fields if __reference__: with with_reference(__reference__): self = cls( **{ k.lstrip('_'): v for k, v in compacted.items() if k in fields } ) else: self = cls( **{ k.lstrip('_'): v for k, v in compacted.items() if k in fields } ) if __source__: setattr(self, '__source__', __source__) return self
Instantiate a JSON-LD class from data.
def purge(self, name=None): """ Disconnect from the given database and remove from local cache :param name: The name of the connection :type name: str :rtype: None """ self.disconnect(name) if name in self._connections: del self._connections[name]
Disconnect from the given database and remove from local cache :param name: The name of the connection :type name: str :rtype: None
def MakeZip(self, input_dir, output_file): """Creates a ZIP archive of the files in the input directory. Args: input_dir: the name of the input directory. output_file: the name of the output ZIP archive without extension. """ logging.info("Generating zip template file at %s", output_file) basename, _ = os.path.splitext(output_file) # TODO(user):pytype: incorrect make_archive() definition in typeshed. # pytype: disable=wrong-arg-types shutil.make_archive( basename, "zip", base_dir=".", root_dir=input_dir, verbose=True)
Creates a ZIP archive of the files in the input directory. Args: input_dir: the name of the input directory. output_file: the name of the output ZIP archive without extension.
def probe_git(): """Return a git repository instance if it exists.""" try: repo = git.Repo() except git.InvalidGitRepositoryError: LOGGER.warning( "We highly recommend keeping your model in a git repository." " It allows you to track changes and to easily collaborate with" " others via online platforms such as https://github.com.\n") return if repo.is_dirty(): LOGGER.critical( "Please git commit or git stash all changes before running" " the memote suite.") sys.exit(1) return repo
Return a git repository instance if it exists.
def get_upstream_artifacts_full_paths_per_task_id(context): """List the downloaded upstream artifacts. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict, dict: lists of the paths to upstream artifacts, sorted by task_id. First dict represents the existing upstream artifacts. The second one maps the optional artifacts that couldn't be downloaded Raises: scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist. """ upstream_artifacts = context.task['payload']['upstreamArtifacts'] task_ids_and_relative_paths = [ (artifact_definition['taskId'], artifact_definition['paths']) for artifact_definition in upstream_artifacts ] optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts) upstream_artifacts_full_paths_per_task_id = {} failed_paths_per_task_id = {} for task_id, paths in task_ids_and_relative_paths: for path in paths: try: path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path) add_enumerable_item_to_dict( dict_=upstream_artifacts_full_paths_per_task_id, key=task_id, item=path_to_add ) except ScriptWorkerTaskException: if path in optional_artifacts_per_task_id.get(task_id, []): log.warning('Optional artifact "{}" of task "{}" not found'.format(path, task_id)) add_enumerable_item_to_dict( dict_=failed_paths_per_task_id, key=task_id, item=path ) else: raise return upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id
List the downloaded upstream artifacts. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict, dict: lists of the paths to upstream artifacts, sorted by task_id. First dict represents the existing upstream artifacts. The second one maps the optional artifacts that couldn't be downloaded Raises: scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.
def get_annotationdefault(self): """ The AnnotationDefault attribute, only present upon fields in an annotaion. reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.20 """ # noqa buff = self.get_attribute("AnnotationDefault") if buff is None: return None with unpack(buff) as up: (ti, ) = up.unpack_struct(_H) return ti
The AnnotationDefault attribute, only present upon fields in an annotaion. reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.20
def store_array_elements(self, array, start_idx, data): """ Stores either a single element or a range of elements in the array. :param array: Reference to the array. :param start_idx: Starting index for the store. :param data: Either a single value or a list of values. """ # we process data as a list of elements # => if there is only a single element, wrap it in a list data = data if isinstance(data, list) else [data] # concretize start index concrete_start_idxes = self.concretize_store_idx(start_idx) if len(concrete_start_idxes) == 1: # only one start index # => concrete store concrete_start_idx = concrete_start_idxes[0] for i, value in enumerate(data): self._store_array_element_on_heap(array=array, idx=concrete_start_idx+i, value=value, value_type=array.element_type) # if the index was symbolic before concretization, this # constraint it to concrete start idx self.state.solver.add(concrete_start_idx == start_idx) else: # multiple start indexes # => symbolic store start_idx_options = [] for concrete_start_idx in concrete_start_idxes: start_idx_options.append(concrete_start_idx == start_idx) # we store elements condtioned with the start index: # => if concrete_start_idx == start_idx # then store the value # else keep the current value for i, value in enumerate(data): self._store_array_element_on_heap(array=array, idx=concrete_start_idx+i, value=value, value_type=array.element_type, store_condition=start_idx_options[-1]) # constraint start_idx, s.t. it evals to one of the concretized indexes constraint_on_start_idx = self.state.solver.Or(*start_idx_options) self.state.add_constraints(constraint_on_start_idx)
Stores either a single element or a range of elements in the array. :param array: Reference to the array. :param start_idx: Starting index for the store. :param data: Either a single value or a list of values.
def get_airport_weather(self, iata, page=1, limit=100): """Retrieve the weather at an airport Given the IATA code of an airport, this method returns the weather information. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_weather('HYD') f.get_airport_weather('HYD',page=1,limit=10) """ url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit) weather = self._fr24.get_airport_weather(url) mi = weather['sky']['visibility']['mi'] if (mi is not None) and (mi != "None"): mi = float(mi) km = mi * 1.6094 weather['sky']['visibility']['km'] = km return weather
Retrieve the weather at an airport Given the IATA code of an airport, this method returns the weather information. Args: iata (str): The IATA code for an airport, e.g. HYD page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_airport_weather('HYD') f.get_airport_weather('HYD',page=1,limit=10)
def apply_norm(x, norm_type, depth, epsilon, layer_collection=None): """Apply Normalization.""" if layer_collection is not None: assert norm_type == "layer" if norm_type == "layer": return layer_norm( x, filters=depth, epsilon=epsilon, layer_collection=layer_collection) if norm_type == "group": return group_norm(x, filters=depth, epsilon=epsilon) if norm_type == "batch": return layers().BatchNormalization(epsilon=epsilon)(x) if norm_type == "noam": return noam_norm(x, epsilon) if norm_type == "l2": return l2_norm(x, filters=depth, epsilon=epsilon) if norm_type == "none": return x raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch'," "'noam', 'lr', 'none'.")
Apply Normalization.
def getAuthenticatedRole(store): """ Get the base 'Authenticated' role for this store, which is the role that is given to every user who is explicitly identified by a non-anonymous username. """ def tx(): def addToEveryone(newAuthenticatedRole): newAuthenticatedRole.becomeMemberOf(getEveryoneRole(store)) return newAuthenticatedRole return store.findOrCreate(Role, addToEveryone, externalID=u'Authenticated') return store.transact(tx)
Get the base 'Authenticated' role for this store, which is the role that is given to every user who is explicitly identified by a non-anonymous username.
def get_resource_children(raml_resource): """ Get children of :raml_resource:. :param raml_resource: Instance of ramlfications.raml.ResourceNode. """ path = raml_resource.path return [res for res in raml_resource.root.resources if res.parent and res.parent.path == path]
Get children of :raml_resource:. :param raml_resource: Instance of ramlfications.raml.ResourceNode.
def Struct_plot(lS, lax=None, proj='all', element=None, dP=None, dI=None, dBs=None, dBv=None, dVect=None, dIHor=None, dBsHor=None, dBvHor=None, Lim=None, Nstep=None, dLeg=None, indices=False, draw=True, fs=None, wintit=None, tit=None, Test=True): """ Plot the projections of a list of Struct subclass instances D. VEZINET, Aug. 2014 Inputs : V A Ves instance Nstep An int (the number of points for evaluation of theta by np.linspace) axP A plt.Axes instance (if given) on which to plot the poloidal projection, otherwise ('None') a new figure/axes is created axT A plt.Axes instance (if given) on which to plot the toroidal projection, otherwise ('None') a new figure/axes is created Tdict A dictionnary specifying the style of the polygon plot dLeg A dictionnary specifying the style of the legend box (if None => no legend) Outputs : axP The plt.Axes instance on which the poloidal plot was performed axT The plt.Axes instance on which the toroidal plot was performed """ proj = proj.lower() if Test: msg = "Arg proj must be in ['cross','hor','all','3d'] !" assert proj in ['cross','hor','all','3d'], msg lax, C0, C1, C2 = _check_Lax(lax,n=2) assert type(draw) is bool, "Arg draw must be a bool !" C0 = issubclass(lS.__class__, utils.ToFuObject) C1 = (isinstance(lS,list) and all([issubclass(ss.__class__, utils.ToFuObject) for ss in lS])) msg = "Arg lves must be a Struct subclass or a list of such !" assert C0 or C1, msg if C0: lS = [lS] nS = len(lS) if wintit is None: wintit = _wintit kwa = dict(fs=fs, wintit=wintit, Test=Test) if proj=='3d': # Temporary matplotlib issue dLeg = None for ii in range(0,nS): dplot = _Struct_plot_format(lS[ii], proj=proj, Elt=element, dP=dP, dI=dI, dBs=dBs, dBv=dBv, dVect=dVect, dIHor=dIHor, dBsHor=dBsHor, dBvHor=dBvHor, Lim=Lim, Nstep=Nstep) for k in dplot.keys(): dplot[k].update(kwa) if proj=='3d': lax[0] = _Plot_3D_plt_Ves(lS[ii], ax=lax[0], LegDict=None, draw=False, **dplot[proj]) else: if proj=='cross': lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0], indices=indices, LegDict=None, draw=False, **dplot[proj]) elif proj=='hor': lax[0] = _Plot_HorProj_Ves(lS[ii], ax=lax[0], indices=indices, LegDict=None, draw=False, **dplot[proj]) elif proj=='all': if lax[0] is None or lax[1] is None: lax = list(_def.Plot_LOSProj_DefAxes('All', fs=fs, wintit=wintit, Type=lS[ii].Id.Type)) lax[0] = _Plot_CrossProj_Ves(lS[ii], ax=lax[0], LegDict=None, indices=indices, draw=False, **dplot['cross']) lax[1] = _Plot_HorProj_Ves(lS[ii], ax=lax[1], LegDict=None, indices=indices, draw=False, **dplot['hor']) # recompute the ax.dataLim lax[0].relim() if proj=='all': lax[1].relim() # update ax.viewLim using the new dataLim lax[0].autoscale_view() if proj=='all': lax[1].autoscale_view() if tit is not None: lax[0].figure.suptitle(tit) if not dLeg is None: lax[0].legend(**dLeg) if draw: lax[0].relim() lax[0].autoscale_view() if len(lax)==2 and lax[1] is not None: lax[1].relim() lax[1].autoscale_view() lax[0].figure.canvas.draw() lax = lax if proj=='all' else lax[0] return lax
Plot the projections of a list of Struct subclass instances D. VEZINET, Aug. 2014 Inputs : V A Ves instance Nstep An int (the number of points for evaluation of theta by np.linspace) axP A plt.Axes instance (if given) on which to plot the poloidal projection, otherwise ('None') a new figure/axes is created axT A plt.Axes instance (if given) on which to plot the toroidal projection, otherwise ('None') a new figure/axes is created Tdict A dictionnary specifying the style of the polygon plot dLeg A dictionnary specifying the style of the legend box (if None => no legend) Outputs : axP The plt.Axes instance on which the poloidal plot was performed axT The plt.Axes instance on which the toroidal plot was performed
def refresh_timer_cb(self, timer, flags): """Refresh timer callback. This callback will normally only be called internally. Parameters ---------- timer : a Ginga GUI timer A GUI-based Ginga timer flags : dict-like A set of flags controlling the timer """ # this is the timer call back, from the GUI thread start_time = time.time() if flags.get('done', False): return # calculate next deadline deadline = self.rf_deadline self.rf_deadline += self.rf_rate self.rf_timer_count += 1 delta = abs(start_time - deadline) self.rf_delta_total += delta adjust = 0.0 if start_time > deadline: # we are late self.rf_late_total += delta self.rf_late_count += 1 late_avg = self.rf_late_total / self.rf_late_count adjust = - (late_avg / 2.0) self.rf_skip_total += delta if self.rf_skip_total < self.rf_rate: self.rf_draw_count += 1 # TODO: can we optimize whence? self.redraw_now(whence=0) else: # <-- we are behind by amount of time equal to one frame. # skip a redraw and attempt to catch up some time self.rf_skip_total = 0 else: if start_time < deadline: # we are early self.rf_early_total += delta self.rf_early_count += 1 self.rf_skip_total = max(0.0, self.rf_skip_total - delta) early_avg = self.rf_early_total / self.rf_early_count adjust = early_avg / 4.0 self.rf_draw_count += 1 # TODO: can we optimize whence? self.redraw_now(whence=0) delay = max(0.0, self.rf_deadline - time.time() + adjust) timer.start(delay)
Refresh timer callback. This callback will normally only be called internally. Parameters ---------- timer : a Ginga GUI timer A GUI-based Ginga timer flags : dict-like A set of flags controlling the timer
def pause(self): """Pauses all the snippet clients under management. This clears the host port of a client because a new port will be allocated in `resume`. """ for client in self._snippet_clients.values(): self._device.log.debug( 'Clearing host port %d of SnippetClient<%s>.', client.host_port, client.package) client.clear_host_port()
Pauses all the snippet clients under management. This clears the host port of a client because a new port will be allocated in `resume`.
def get_storage(self, contract_hash, storage_key, id=None, endpoint=None): """ Returns a storage item of a specified contract Args: contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' storage_key: (str) storage key to lookup, for example 'totalSupply' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bytearray: bytearray value of the storage item """ result = self._call_endpoint(GET_STORAGE, params=[contract_hash, binascii.hexlify(storage_key.encode('utf-8')).decode('utf-8')], id=id, endpoint=endpoint) try: return bytearray(binascii.unhexlify(result.encode('utf-8'))) except Exception as e: raise NEORPCException("could not decode result %s " % e)
Returns a storage item of a specified contract Args: contract_hash: (str) hash of the contract to lookup, for example 'd7678dd97c000be3f33e9362e673101bac4ca654' storage_key: (str) storage key to lookup, for example 'totalSupply' id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: bytearray: bytearray value of the storage item
def updateWritingTone(user, writingTone, maintainHistory): """ updateWritingTone updates the user with the writing tones interpreted based on the specified thresholds @param: user a json object representing user information (tone) to be used in conversing with the Conversation Service @param: writingTone a json object containing the writing tones in the payload returned by the Tone Analyzer """ currentWriting = [] currentWritingObject = [] # Process each writing tone and determine if it is high or low for tone in writingTone['tones']: if tone['score'] >= WRITING_HIGH_SCORE_THRESHOLD: currentWriting.append(tone['tone_name'].lower() + '_high') currentWritingObject.append({ 'tone_name': tone['tone_name'].lower(), 'score': tone['score'], 'interpretation': 'likely high' }) elif tone['score'] <= WRITING_NO_SCORE_THRESHOLD: currentWritingObject.append({ 'tone_name': tone['tone_name'].lower(), 'score': tone['score'], 'interpretation': 'no evidence' }) else: currentWritingObject.append({ 'tone_name': tone['tone_name'].lower(), 'score': tone['score'], 'interpretation': 'likely medium' }) # update user writing tone user['tone']['writing']['current'] = currentWriting if maintainHistory: if 'history' not in user['tone']['writing']: user['tone']['writing']['history'] = [] user['tone']['writing']['history'].append(currentWritingObject)
updateWritingTone updates the user with the writing tones interpreted based on the specified thresholds @param: user a json object representing user information (tone) to be used in conversing with the Conversation Service @param: writingTone a json object containing the writing tones in the payload returned by the Tone Analyzer
def getSiblings(self, textId, subreference: CtsReference): """ Retrieve the siblings of a textual node :param textId: CtsTextMetadata Identifier :type textId: str :param subreference: CapitainsCtsPassage CtsReference :type subreference: str :return: Tuple of references :rtype: (str, str) """ text, inventory = self.__getText__(textId) if not isinstance(subreference, CtsReference): subreference = CtsReference(subreference) passage = text.getTextualNode(subreference) return passage.siblingsId
Retrieve the siblings of a textual node :param textId: CtsTextMetadata Identifier :type textId: str :param subreference: CapitainsCtsPassage CtsReference :type subreference: str :return: Tuple of references :rtype: (str, str)
def file_transfer( ssh_conn, source_file, dest_file, file_system=None, direction="put", disable_md5=False, inline_transfer=False, overwrite_file=False, ): """Use Secure Copy or Inline (IOS-only) to transfer files to/from network devices. inline_transfer ONLY SUPPORTS TEXT FILES and will not support binary file transfers. return { 'file_exists': boolean, 'file_transferred': boolean, 'file_verified': boolean, } """ transferred_and_verified = { "file_exists": True, "file_transferred": True, "file_verified": True, } transferred_and_notverified = { "file_exists": True, "file_transferred": True, "file_verified": False, } nottransferred_but_verified = { "file_exists": True, "file_transferred": False, "file_verified": True, } if "cisco_ios" in ssh_conn.device_type or "cisco_xe" in ssh_conn.device_type: cisco_ios = True else: cisco_ios = False if not cisco_ios and inline_transfer: raise ValueError("Inline Transfer only supported for Cisco IOS/Cisco IOS-XE") scp_args = { "ssh_conn": ssh_conn, "source_file": source_file, "dest_file": dest_file, "direction": direction, } if file_system is not None: scp_args["file_system"] = file_system TransferClass = InLineTransfer if inline_transfer else FileTransfer with TransferClass(**scp_args) as scp_transfer: if scp_transfer.check_file_exists(): if overwrite_file: if not disable_md5: if scp_transfer.compare_md5(): return nottransferred_but_verified else: # File exists, you can overwrite it, MD5 is wrong (transfer file) verifyspace_and_transferfile(scp_transfer) if scp_transfer.compare_md5(): return transferred_and_verified else: raise ValueError( "MD5 failure between source and destination files" ) else: # File exists, you can overwrite it, but MD5 not allowed (transfer file) verifyspace_and_transferfile(scp_transfer) return transferred_and_notverified else: # File exists, but you can't overwrite it. if not disable_md5: if scp_transfer.compare_md5(): return nottransferred_but_verified msg = "File already exists and overwrite_file is disabled" raise ValueError(msg) else: verifyspace_and_transferfile(scp_transfer) # File doesn't exist if not disable_md5: if scp_transfer.compare_md5(): return transferred_and_verified else: raise ValueError("MD5 failure between source and destination files") else: return transferred_and_notverified
Use Secure Copy or Inline (IOS-only) to transfer files to/from network devices. inline_transfer ONLY SUPPORTS TEXT FILES and will not support binary file transfers. return { 'file_exists': boolean, 'file_transferred': boolean, 'file_verified': boolean, }
def json_encode_default(obj): ''' Convert datetime.datetime to timestamp :param obj: value to (possibly) convert ''' if isinstance(obj, (datetime, date)): result = dt2ts(obj) else: result = json_encoder.default(obj) return to_encoding(result)
Convert datetime.datetime to timestamp :param obj: value to (possibly) convert
def _create_and_update(self): """ Create or update request, re-read object from Artifactory :return: None """ data_json = self._create_json() data_json.update(self.additional_params) request_url = self._artifactory.drive + '/api/{uri}/{x.name}'.format(uri=self._uri, x=self) r = self._session.put( request_url, json=data_json, headers={'Content-Type': 'application/json'}, auth=self._auth, ) r.raise_for_status() rest_delay() self.read()
Create or update request, re-read object from Artifactory :return: None
def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None): """ Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal. njobs : int Number of jobs. Otherwise tenetoBIDS.njobs is run. update_pipeline : bool update pipeline with '_clean' tag for new files created overwrite : bool tag : str Returns ------- Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end. Note ---- There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not self.confounds and not confounds: raise ValueError( 'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.') if not tag: tag = '' else: tag = 'desc-' + tag if confounds: self.set_confounds(confounds) files = sorted(self.get_selected_files(quiet=1)) confound_files = sorted( self.get_selected_files(quiet=1, pipeline='confound')) files, confound_files = confound_matching(files, confound_files) if not clean_params: clean_params = {} with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)} for j in as_completed(job): j.result() self.set_pipeline('teneto_' + teneto.__version__) self.set_bids_suffix('roi') if tag: self.set_bids_tags({'desc': tag.split('-')[1]})
Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal. njobs : int Number of jobs. Otherwise tenetoBIDS.njobs is run. update_pipeline : bool update pipeline with '_clean' tag for new files created overwrite : bool tag : str Returns ------- Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end. Note ---- There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data.