positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def send(self, from_, to, subject, text='', html='', cc=[], bcc=[], headers={}, attachments=[]): """ Send an email. """ if isinstance(to, string_types): raise TypeError('"to" parameter must be enumerable') if text == '' and html == '': raise ValueError('"text" and "html" must not both be empty') return self._session.post('{}/send'.format(self._url), json={ 'from': from_, 'to': to, 'cc': cc, 'bcc': bcc, 'subject': subject, 'headers': headers, 'text': text, 'html': html, 'attachments': list(self._process_attachments(attachments)), }).json()
Send an email.
def plot_paired(data=None, dv=None, within=None, subject=None, order=None, boxplot=True, figsize=(4, 4), dpi=100, ax=None, colors=['green', 'grey', 'indianred'], pointplot_kwargs={'scale': .6, 'markers': '.'}, boxplot_kwargs={'color': 'lightslategrey', 'width': .2}): """ Paired plot. Parameters ---------- data : pandas DataFrame Long-format dataFrame. dv : string Name of column containing the dependant variable. within : string Name of column containing the within-subject factor. Note that ``within`` must have exactly two within-subject levels (= two unique values). subject : string Name of column containing the subject identifier. order : list of str List of values in ``within`` that define the order of elements on the x-axis of the plot. If None, uses alphabetical order. boxplot : boolean If True, add a boxplot to the paired lines using the :py:func:`seaborn.boxplot` function. figsize : tuple Figsize in inches dpi : int Resolution of the figure in dots per inches. ax : matplotlib axes Axis on which to draw the plot. colors : list of str Line colors names. Default is green when value increases from A to B, indianred when value decreases from A to B and grey when the value is the same in both measurements. pointplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.pointplot` function. boxplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.boxplot` function. Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- Data must be a long-format pandas DataFrame. Examples -------- Default paired plot: .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova') >>> df = df.query("Group == 'Meditation' and Subject > 40") >>> df = df.query("Time == 'August' or Time == 'June'") >>> import pingouin as pg >>> ax = pg.plot_paired(data=df, dv='Scores', within='Time', ... subject='Subject', dpi=150) Paired plot on an existing axis (no boxplot and uniform color): .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova').query("Time != 'January'") >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4)) >>> pg.plot_paired(data=df[df['Group'] == 'Meditation'], ... dv='Scores', within='Time', subject='Subject', ... ax=ax1, boxplot=False, ... colors=['grey', 'grey', 'grey']) # doctest: +SKIP """ from pingouin.utils import _check_dataframe, remove_rm_na # Validate args _check_dataframe(data=data, dv=dv, within=within, subject=subject, effects='within') # Remove NaN values data = remove_rm_na(dv=dv, within=within, subject=subject, data=data) # Extract subjects subj = data[subject].unique() # Extract within-subject level (alphabetical order) x_cat = np.unique(data[within]) assert len(x_cat) == 2, 'Within must have exactly two unique levels.' if order is None: order = x_cat else: assert len(order) == 2, 'Order must have exactly two elements.' # Start the plot if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) for idx, s in enumerate(subj): tmp = data.loc[data[subject] == s, [dv, within, subject]] x_val = tmp[tmp[within] == order[0]][dv].values[0] y_val = tmp[tmp[within] == order[1]][dv].values[0] if x_val < y_val: color = colors[0] elif x_val > y_val: color = colors[2] elif x_val == y_val: color = colors[1] # Plot individual lines using Seaborn sns.pointplot(data=tmp, x=within, y=dv, order=order, color=color, ax=ax, **pointplot_kwargs) if boxplot: sns.boxplot(data=data, x=within, y=dv, order=order, ax=ax, **boxplot_kwargs) # Despine and trim sns.despine(trim=True, ax=ax) return ax
Paired plot. Parameters ---------- data : pandas DataFrame Long-format dataFrame. dv : string Name of column containing the dependant variable. within : string Name of column containing the within-subject factor. Note that ``within`` must have exactly two within-subject levels (= two unique values). subject : string Name of column containing the subject identifier. order : list of str List of values in ``within`` that define the order of elements on the x-axis of the plot. If None, uses alphabetical order. boxplot : boolean If True, add a boxplot to the paired lines using the :py:func:`seaborn.boxplot` function. figsize : tuple Figsize in inches dpi : int Resolution of the figure in dots per inches. ax : matplotlib axes Axis on which to draw the plot. colors : list of str Line colors names. Default is green when value increases from A to B, indianred when value decreases from A to B and grey when the value is the same in both measurements. pointplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.pointplot` function. boxplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.boxplot` function. Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- Data must be a long-format pandas DataFrame. Examples -------- Default paired plot: .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova') >>> df = df.query("Group == 'Meditation' and Subject > 40") >>> df = df.query("Time == 'August' or Time == 'June'") >>> import pingouin as pg >>> ax = pg.plot_paired(data=df, dv='Scores', within='Time', ... subject='Subject', dpi=150) Paired plot on an existing axis (no boxplot and uniform color): .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova').query("Time != 'January'") >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4)) >>> pg.plot_paired(data=df[df['Group'] == 'Meditation'], ... dv='Scores', within='Time', subject='Subject', ... ax=ax1, boxplot=False, ... colors=['grey', 'grey', 'grey']) # doctest: +SKIP
def activate_right(self, token): """Make a copy of the received token and call `_activate_right`.""" watchers.MATCHER.debug( "Node <%s> activated right with token %r", self, token) return self._activate_right(token.copy())
Make a copy of the received token and call `_activate_right`.
def dskstl(keywrd, dpval): """ Set the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html :param keywrd: Code specifying parameter to set. :type keywrd: int :param dpval: Value of parameter. :type dpval: float :return: """ keywrd = ctypes.c_int(keywrd) dpval = ctypes.c_double(dpval) libspice.dskstl_c(keywrd, dpval)
Set the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html :param keywrd: Code specifying parameter to set. :type keywrd: int :param dpval: Value of parameter. :type dpval: float :return:
def container_to_etree(obj, parent=None, to_str=None, **options): """ Convert a dict-like object to XML ElementTree. :param obj: Container instance to convert to :param parent: XML ElementTree parent node object or None :param to_str: Callable to convert value to string or None :param options: Keyword options, - tags: Dict of tags for special nodes to keep XML info, attributes, text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"} """ if to_str is None: to_str = _to_str_fn(**options) if not anyconfig.utils.is_dict_like(obj): if parent is not None and obj: parent.text = to_str(obj) # Parent is a leaf text node. return parent # All attributes and text should be set already. options = _complement_tag_options(options) (attrs, text, children) = operator.itemgetter(*_ATC)(options) for key, val in anyconfig.compat.iteritems(obj): if key == attrs: _elem_set_attrs(val, parent, to_str) elif key == text: parent.text = to_str(val) elif key == children: for celem in _elem_from_descendants(val, **options): parent.append(celem) else: parent = _get_or_update_parent(key, val, to_str, parent=parent, **options) return ET.ElementTree(parent)
Convert a dict-like object to XML ElementTree. :param obj: Container instance to convert to :param parent: XML ElementTree parent node object or None :param to_str: Callable to convert value to string or None :param options: Keyword options, - tags: Dict of tags for special nodes to keep XML info, attributes, text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"}
def set_node_attr(self, name, attr, value): ''' API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated. ''' self.get_node(name).set_attr(attr, value)
API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated.
def recreate_article_body(self): ''' Handles case where article body contained page or image. Assumes all articles and images have been created. ''' for foreign_id, body in iteritems(self.record_keeper.article_bodies): try: local_page_id = self.record_keeper.get_local_page(foreign_id) page = Page.objects.get(id=local_page_id).specific # iterate through the body new_body = [] for item in body: if not item['value']: continue if item['type'] == 'page': new_page_id = self.record_keeper.get_local_page( item['value']) item['value'] = new_page_id elif item['type'] == 'image': new_image_id = self.record_keeper.get_local_image( item['value']) item['value'] = new_image_id new_body.append(item) setattr(page, 'body', json.dumps(new_body)) page.save_revision().publish() except Exception as e: self.log(ERROR, "recreating article body", { "exception": e, "foreign_id": foreign_id, "body": body, }, depth=1)
Handles case where article body contained page or image. Assumes all articles and images have been created.
def surfacemass(self,R,romberg=False,nsigma=None,relative=False): """ NAME: surfacemass PURPOSE: calculate the surface-mass at R by marginalizing over velocity INPUT: R - radius at which to calculate the surfacemass density (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: surface mass at R HISTORY: 2010-03-XX - Written - Bovy (NYU) """ if nsigma == None: nsigma= _NSIGMA logSigmaR= self.targetSurfacemass(R,log=True,use_physical=False) sigmaR2= self.targetSigma2(R,use_physical=False) sigmaR1= sc.sqrt(sigmaR2) logsigmaR2= sc.log(sigmaR2) if relative: norm= 1. else: norm= sc.exp(logSigmaR) #Use the asymmetric drift equation to estimate va va= sigmaR2/2./R**self._beta*(1./self._gamma**2.-1. -R*self._surfaceSigmaProfile.surfacemassDerivative(R,log=True) -R*self._surfaceSigmaProfile.sigma2Derivative(R,log=True)) if math.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center if romberg: return sc.real(bovy_dblquad(_surfaceIntegrand, self._gamma*(R**self._beta-va)/sigmaR1-nsigma, self._gamma*(R**self._beta-va)/sigmaR1+nsigma, lambda x: 0., lambda x: nsigma, [R,self,logSigmaR,logsigmaR2,sigmaR1, self._gamma], tol=10.**-8)/sc.pi*norm) else: return integrate.dblquad(_surfaceIntegrand, self._gamma*(R**self._beta-va)/sigmaR1-nsigma, self._gamma*(R**self._beta-va)/sigmaR1+nsigma, lambda x: 0., lambda x: nsigma, (R,self,logSigmaR,logsigmaR2,sigmaR1, self._gamma), epsrel=_EPSREL)[0]/sc.pi*norm
NAME: surfacemass PURPOSE: calculate the surface-mass at R by marginalizing over velocity INPUT: R - radius at which to calculate the surfacemass density (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: surface mass at R HISTORY: 2010-03-XX - Written - Bovy (NYU)
def msvs_parse_version(s): """ Split a Visual Studio version, which may in fact be something like '7.0Exp', into is version number (returned as a float) and trailing "suite" portion. """ num, suite = version_re.match(s).groups() return float(num), suite
Split a Visual Studio version, which may in fact be something like '7.0Exp', into is version number (returned as a float) and trailing "suite" portion.
def _get_raw_objects(self): """ Helper function to populate the first page of raw objects for this tag. This has the side effect of creating the ``_raw_objects`` attribute of this object. """ if not hasattr(self, '_raw_objects'): result = self._client.get(type(self).api_endpoint, model=self) # I want to cache this to avoid making duplicate requests, but I don't # want it in the __init__ self._raw_objects = result # pylint: disable=attribute-defined-outside-init return self._raw_objects
Helper function to populate the first page of raw objects for this tag. This has the side effect of creating the ``_raw_objects`` attribute of this object.
def handle_authenticated_user(self, response): """ Handles the ULogin response if user is already authenticated """ current_user = get_user(self.request) ulogin, registered = ULoginUser.objects.get_or_create( uid=response['uid'], network=response['network'], defaults={'identity': response['identity'], 'user': current_user}) if not registered: ulogin_user = ulogin.user logger.debug('uLogin user already exists') if current_user != ulogin_user: logger.debug( "Mismatch: %s is not a %s. Take over it!" % (current_user, ulogin_user) ) ulogin.user = current_user ulogin.save() return get_user(self.request), ulogin, registered
Handles the ULogin response if user is already authenticated
def parse_cli_args(): """parse args from the CLI and return a dict""" parser = argparse.ArgumentParser(description='2048 in your terminal') parser.add_argument('--mode', dest='mode', type=str, default=None, help='colors mode (dark or light)') parser.add_argument('--az', dest='azmode', action='store_true', help='Use the letters a-z instead of numbers') parser.add_argument('--resume', dest='resume', action='store_true', help='restart the game from where you left') parser.add_argument('-v', '--version', action='store_true') parser.add_argument('-r', '--rules', action='store_true') return vars(parser.parse_args())
parse args from the CLI and return a dict
def serve(self, app, conf): """ A very simple approach for a WSGI server. """ if self.args.reload: try: self.watch_and_spawn(conf) except ImportError: print('The `--reload` option requires `watchdog` to be ' 'installed.') print(' $ pip install watchdog') else: self._serve(app, conf)
A very simple approach for a WSGI server.
def c_metadata(api, args, verbose=False): """ Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal. """ obj = api.get_object(args['<URL>'].split('/')[-1]) if not set_metadata(args['<JSON>'], obj): return json.dumps(obj.metadata.read(), indent=4)
Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal.
def key_file_public(self): '''str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory) ''' if not hasattr(self, '_key_file_public'): self.key_file_public = '~/.ssh/{key}.pub'.format(key=self.key_name) return self._key_file_public
str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory)
def read(self, size=None): """Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ data = b'' while ((size and len(data) < size) and self._current_offset < self.uncompressed_data_size): member = self._GetMemberForOffset(self._current_offset) member_offset = self._current_offset - member.uncompressed_data_offset data_read = member.ReadAtOffset(member_offset, size) if data_read: self._current_offset += len(data_read) data = b''.join([data, data_read]) return data
Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
def services(self): """ Gets the services object which will provide the ArcGIS Server's admin information about services and folders. """ if self._resources is None: self.__init() if "services" in self._resources: url = self._url + "/services" return _services.Services(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
Gets the services object which will provide the ArcGIS Server's admin information about services and folders.
def ssh(self, *args, **kwargs): ''' Run salt-ssh commands synchronously Wraps :py:meth:`salt.client.ssh.client.SSHClient.cmd_sync`. :return: Returns the result from the salt-ssh command ''' ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts, disable_custom_roster=True) return ssh_client.cmd_sync(kwargs)
Run salt-ssh commands synchronously Wraps :py:meth:`salt.client.ssh.client.SSHClient.cmd_sync`. :return: Returns the result from the salt-ssh command
def p_let_arr_substr_in_args3(p): """ statement : LET ARRAY_ID LP arguments COMMA TO RP EQ expr | ARRAY_ID LP arguments COMMA TO RP EQ expr """ i = 2 if p[1].upper() == 'LET' else 1 id_ = p[i] arg_list = p[i + 2] substr = (make_number(0, lineno=p.lineno(i + 4)), make_number(gl.MAX_STRSLICE_IDX, lineno=p.lineno(i + 3))) expr_ = p[i + 7] p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, substr, expr_)
statement : LET ARRAY_ID LP arguments COMMA TO RP EQ expr | ARRAY_ID LP arguments COMMA TO RP EQ expr
def infer(self, data, initial_proposal=None, full_output=False,**kwargs): """ Infer the model parameters, given the data. auto_convergence=True, walkers=100, burn=2000, sample=2000, minimum_sample=2000, convergence_check_frequency=1000, a=2.0, threads=1, """ # Apply data masks now so we don't have to do it on the fly. data, pixels_affected = self._apply_data_mask(data) # Any channels / parameters to ignore? matched_channels, missing_channels, ignore_parameters \ = self._match_channels_to_data(data) parameters = [p for p in self.parameters if p not in ignore_parameters] #parameters = list(set(self.parameters).difference(ignore_parameters)) logger.debug("Inferring {0} parameters: {1}".format(len(parameters), ", ".join(parameters))) # What sampling behaviour will we have? # - Auto-convergence: # + Sample for `minimum_sample` (default 2000, 200 walkers) # + Calculate the maximum exponential autocorrelation time for # all parameters # + For the rest of the chain, calculate the autocorrelation time # + Ensure that the number of samples we have is more than # `effectively_independent_samples` (default 100) times. # - Specified convergence: # + Burn for `burn` (default 2000) steps # + Sample for `sample` (default 2000) steps kwd = { "auto_convergence": False, # TODO CHANGE ME "walkers": 100, "burn": 2000, "sample": 2000, # The minimum_sample, n_tau_exp_as_burn_in, minimum_eis are only # used if auto_convergence is turned on. "minimum_sample": 2000, "maximum_sample": 100000, "n_tau_exp_as_burn_in": 3, "minimum_effective_independent_samples": 100, "check_convergence_frequency": 1000, "a": 2.0, "threads": 1 } # Update from the model, then update from any keyword arguments given. kwd.update(self._configuration.get("infer", {}).copy()) kwd.update(**kwargs) # Make some checks. if kwd["walkers"] % 2 > 0 or kwd["walkers"] < 2 * len(parameters): raise ValueError("the number of walkers must be an even number and " "be at least twice the number of model parameters") check_keywords = ["threads", "a"] if kwd["auto_convergence"]: logger.info("Convergence will be estimated automatically.") check_keywords += ["minimum_sample", "check_convergence_frequency", "minimum_effective_independent_samples", "n_tau_exp_as_burn_in", "maximum_sample"] else: check_keywords += ["burn", "sample"] logger.warn("No convergence checks will be done!") logger.info("Burning for {0} steps and sampling for {1} with {2} "\ "walkers".format(kwd["burn"], kwd["sample"], kwd["walkers"])) for keyword in check_keywords: if kwd[keyword] < 1: raise ValueError("keyword {} must be a positive value".format( keyword)) # Check for non-standard proposal scales. if kwd["a"] != 2.0: logger.warn("Using proposal scale of {0:.2f}".format(kwd["a"])) # If no initial proposal given, estimate the model parameters. if initial_proposal is None: initial_proposal = self.estimate(data) # Initial proposal could be: # - an array (N_walkers, N_dimensions) # - a dictionary containing key/value pairs for the dimensions if isinstance(initial_proposal, dict): wavelengths_required = [] for channel, spectrum in zip(matched_channels, data): if channel is None: continue z = initial_proposal.get("z", initial_proposal.get("z_{}".format(channel), 0)) wavelengths_required.append( [spectrum.disp[0] * (1 - z), spectrum.disp[-1] * (1 - z)]) closest_point = [initial_proposal[p] \ for p in self.grid_points.dtype.names] subset_bounds = self._initialise_approximator( closest_point=closest_point, wavelengths_required=wavelengths_required, force=True, **kwargs) initial_proposal = self._initial_proposal_distribution( parameters, initial_proposal, kwd["walkers"]) elif isinstance(initial_proposal, np.ndarray): initial_proposal = np.atleast_2d(initial_proposal) if initial_proposal.shape != (kwd["walkers"], len(parameters)): raise ValueError("initial proposal must be an array of shape "\ "(N_parameters, N_walkers) ({0}, {1})".format(kwd["walkers"], len(parameters))) # Prepare the convolution functions. self._create_convolution_functions(matched_channels, data, parameters) # Create the sampler. logger.info("Creating sampler with {0} walkers and {1} threads".format( kwd["walkers"], kwd["threads"])) debug = kwargs.get("debug", False) sampler = emcee.EnsembleSampler(kwd["walkers"], len(parameters), inference.ln_probability, a=kwd["a"], threads=kwd["threads"], args=(parameters, self, data, debug), kwargs={"matched_channels": matched_channels}) # Regardless of whether we automatically check for convergence or not, # we will still need to burn in for some minimum amount of time. if kwd["auto_convergence"]: # Sample for `minimum_sample` period. descr, iterations = "", kwd["minimum_sample"] else: # Sample for `burn` period descr, iterations = "burn-in", kwd["burn"] # Start sampling. t_init = time() acceptance_fractions = [] progress_bar = kwargs.get("__show_progress_bar", True) sampler, init_acceptance_fractions, pos, lnprob, rstate, init_elapsed \ = self._sample(sampler, initial_proposal, iterations, descr=descr, parameters=parameters, __show_progress_bar=progress_bar) acceptance_fractions.append(init_acceptance_fractions) # If we don't have to check for convergence, it's easy: if not kwd["auto_convergence"]: # Save the chain and log probabilities before we reset the chain. burn, sample = kwd["burn"], kwd["sample"] converged = None # we don't know! burn_chains = sampler.chain burn_ln_probabilities = sampler.lnprobability # Reset the chain. logger.debug("Resetting chain...") sampler.reset() # Sample the posterior. sampler, prod_acceptance_fractions, pos, lnprob, rstate, t_elapsed \ = self._sample(sampler, pos, kwd["sample"], lnprob0=lnprob, rstate0=rstate, descr="production", parameters=parameters, __show_progress_bar=progress_bar) production_chains = sampler.chain production_ln_probabilities = sampler.lnprobability acceptance_fractions.append(prod_acceptance_fractions) else: # Start checking for convergence at a frequency # of check_convergence_frequency last_state = [pos, lnprob, rstate] converged, total_steps = False, 0 + iterations min_eis_required = kwd["minimum_effective_independent_samples"] while not converged and kwd["maximum_sample"] > total_steps: # Check for convergence. # Estimate the exponential autocorrelation time. try: tau_exp, rho, rho_max_fit \ = utils.estimate_tau_exp(sampler.chain) except: logger.exception("Exception occurred when trying to " "estimate the exponential autocorrelation time:") logger.info("To recover, we are temporarily setting tau_exp" " to {0}".format(total_steps)) tau_exp = total_steps logger.info("Estimated tau_exp at {0} is {1:.0f}".format( total_steps, tau_exp)) # Grab everything n_tau_exp_as_burn_in times that. burn = int(np.ceil(tau_exp)) * kwd["n_tau_exp_as_burn_in"] sample = sampler.chain.shape[1] - burn if 1 > sample: logger.info("Sampler has not converged because {0}x the " "estimated exponential autocorrelation time of {1:.0f}" " is step {2}, and we are only at step {3}".format( kwd["n_tau_exp_as_burn_in"], tau_exp, burn, total_steps)) else: # Calculate the integrated autocorrelation time in the # remaining sample, for every parameter. tau_int = utils.estimate_tau_int(sampler.chain[:, burn:]) # Calculate the effective number of independent samples in # each parameter. num_effective = (kwd["walkers"] * sample)/(2*tau_int) logger.info("Effective number of independent samples in " "each parameter:") for parameter, n_eis in zip(parameters, num_effective): logger.info("\t{0}: {1:.0f}".format(parameter, n_eis)) if num_effective.min() > min_eis_required: # Converged. converged = True logger.info("Convergence achieved ({0:.0f} > {1:.0f})"\ .format(num_effective.min() > min_eis_required)) # Separate the samples into burn and production.. burn_chains = sampler.chain[:, :burn, :] burn_ln_probabilities = sampler.lnprobability[:burn] production_chains = sampler.chain[:, burn:, :] production_ln_probabilities = sampler.lnprobability[burn:] break else: # Nope. logger.info("Sampler has not converged because it did " "not meet the minimum number of effective " "independent samples ({0:.0f})".format(kwd["n"])) # Keep sampling. iterations = kwd["check_convergence_frequency"] logger.info("Trying for another {0} steps".format(iterations)) pos, lnprob, rstate = last_state sampler, af, pos, lnprob, rstate, t_elapsed = self._sample( sampler, pos, iterations, lnprob0=lnprob, rstate0=rstate, descr="", parameters=parameters, __show_progress_bar=progress_bar) total_steps += iterations acceptance_fractions.append(af) last_state.extend(pos, lnprob, rstate) del last_state[:3] if not converged: logger.warn("Maximum number of samples ({:.0f}) reached without" "convergence!".format(kwd["maximum_sample"])) logger.info("Total time elapsed: {0} seconds".format(time() - t_init)) if sampler.pool: sampler.pool.close() sampler.pool.join() # Stack burn and production information together. chains = np.hstack([burn_chains, production_chains]) lnprobability = np.hstack([ burn_ln_probabilities, production_ln_probabilities]) acceptance_fractions = np.hstack(acceptance_fractions) chi_sq, dof, model_fluxes = self._chi_sq(dict(zip(parameters, [np.percentile(chains[:, burn:, i], 50) for i in range(len(parameters))])), data) # Convert velocity scales. symbol, scale, units = self._preferred_redshift_scale labels = [] + parameters scales = np.ones(len(parameters)) if symbol != "z": for i, parameter in enumerate(parameters): if parameter == "z" or parameter.startswith("z_"): chains[:, :, i] *= scale scales[i] = scale if "_" in parameter: labels[i] = "_".join([symbol, parameter.split("_")[1:]]) else: labels[i] = symbol logger.debug("Scaled {0} (now {1}) to units of {2}".format( parameter, labels[i], units)) # Calculate MAP values and associated uncertainties. theta = OrderedDict() for i, label in enumerate(labels): l, c, u = np.percentile(chains[:, burn:, i], [16, 50, 84]) theta[label] = (c, u-c, l-c) # Re-arrange the chains to be in the same order as the model parameters. indices = np.array([parameters.index(p) \ for p in self.parameters if p in parameters]) chains = chains[:, :, indices] # Remove the convolution functions. if not kwargs.get("__keep_convolution_functions", False): self._destroy_convolution_functions() if full_output: metadata = { "burn": burn, "walkers": kwd["walkers"], "sample": sample, "parameters": labels, "scales": scales, "chi_sq": chi_sq, "dof": dof } return (theta, chains, lnprobability, acceptance_fractions, sampler, metadata) return theta
Infer the model parameters, given the data. auto_convergence=True, walkers=100, burn=2000, sample=2000, minimum_sample=2000, convergence_check_frequency=1000, a=2.0, threads=1,
def access_keys(opts): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' # TODO: Need a way to get all available users for systems not supported by pwd module. # For now users pattern matching will not work for publisher_acl. keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.user.get_user()) for user in acl_users: log.info('Preparing the %s key for local communication', user) key = mk_key(opts, user) if key is not None: keys[user] = key # Check other users matching ACL patterns if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterapi access_keys function') for user in pwd.getpwall(): user = user.pw_name if user not in keys and salt.utils.stringutils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) log.profile('End pwd.getpwall() call in masterapi access_keys function') return keys
A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root.
def compare(self, vertex0, vertex1, subject_graph): """Returns true when the two vertices are of the same kind""" return ( self.pattern_graph.vertex_fingerprints[vertex0] == subject_graph.vertex_fingerprints[vertex1] ).all()
Returns true when the two vertices are of the same kind
def resolve_metric_as_tuple(metric): """ Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric` """ if "." in metric: _, metric = metric.split(".") r = [ (operator, match) for operator, match in ALL_METRICS if match[0] == metric ] if not r or len(r) == 0: raise ValueError(f"Metric {metric} not recognised.") else: return r[0]
Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric`
def delete_variable(self, key): """Deletes a global variable :param key: the key of the global variable to be deleted :raises exceptions.AttributeError: if the global variable does not exist """ key = str(key) if self.is_locked(key): raise RuntimeError("Global variable is locked") with self.__global_lock: if key in self.__global_variable_dictionary: access_key = self.lock_variable(key, block=True) del self.__global_variable_dictionary[key] self.unlock_variable(key, access_key) del self.__variable_locks[key] del self.__variable_references[key] else: raise AttributeError("Global variable %s does not exist!" % str(key)) logger.debug("Global variable %s was deleted!" % str(key))
Deletes a global variable :param key: the key of the global variable to be deleted :raises exceptions.AttributeError: if the global variable does not exist
def classify_file(f): """Examine the column names to determine which type of file this is. Return a tuple: retvalue[0] = "file is non-parameterized" retvalue[1] = "file contains error column" """ cols=f[1].columns if len(cols) == 2: #Then we must have a simple file return (True,False) elif len(cols) == 3 and ('ERROR' in cols.names): return (True,True) elif len(cols) > 2 and ('ERROR' not in cols.names): return (True,False) else: return (False,True)
Examine the column names to determine which type of file this is. Return a tuple: retvalue[0] = "file is non-parameterized" retvalue[1] = "file contains error column"
def do_open(self, args): """Open resource by number, resource name or alias: open 3""" if not args: print('A resource name must be specified.') return if self.current: print('You can only open one resource at a time. Please close the current one first.') return if args.isdigit(): try: args = self.resources[int(args)][0] except IndexError: print('Not a valid resource number. Use the command "list".') return try: self.current = self.resource_manager.open_resource(args) print('{} has been opened.\n' 'You can talk to the device using "write", "read" or "query".\n' 'The default end of message is added to each message.'.format(args)) self.py_attr = [] self.vi_attr = [] for attr in getattr(self.current, 'visa_attributes_classes', ()): if attr.py_name: self.py_attr.append(attr.py_name) self.vi_attr.append(attr.visa_name) self.prompt = '(open) ' except Exception as e: print(e)
Open resource by number, resource name or alias: open 3
def configure_visual_baseline(self): """Configure baseline directory""" # Get baseline name baseline_name = self.config.get_optional('VisualTests', 'baseline_name', '{Driver_type}') for section in self.config.sections(): for option in self.config.options(section): option_value = self.config.get(section, option) baseline_name = baseline_name.replace('{{{0}_{1}}}'.format(section, option), option_value) # Configure baseline directory if baseline name has changed if self.baseline_name != baseline_name: self.baseline_name = baseline_name self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory, get_valid_filename(baseline_name))
Configure baseline directory
def lock(name, zk_hosts=None, identifier=None, max_concurrency=1, timeout=None, ephemeral_lease=False, profile=None, scheme=None, username=None, password=None, default_acl=None): ''' Block state execution until you are able to get the lock (or hit the timeout) ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} conn_kwargs = {'profile': profile, 'scheme': scheme, 'username': username, 'password': password, 'default_acl': default_acl} if __opts__['test']: ret['result'] = None ret['comment'] = 'Attempt to acquire lock' return ret if identifier is None: identifier = __grains__['id'] locked = __salt__['zk_concurrency.lock'](name, zk_hosts, identifier=identifier, max_concurrency=max_concurrency, timeout=timeout, ephemeral_lease=ephemeral_lease, **conn_kwargs) if locked: ret['result'] = True ret['comment'] = 'lock acquired' else: ret['comment'] = 'Unable to acquire lock' return ret
Block state execution until you are able to get the lock (or hit the timeout)
def _update_fobj(self): """Updates fobj from GUI. Opposite of _update_gui().""" # print("PPPPPPPPPPPPPPPPPPPRINTANDO O STACK") # traceback.print_stack() emsg, flag_error = "", False fieldname = None try: self._before_update_fobj() for item in self._map: self._f.obj[item.fieldname] = item.get_value() self._after_update_fobj() except Exception as E: flag_error = True if fieldname is not None: emsg = "Field '{}': {}".format(fieldname, str(E)) else: emsg = str(E) self.add_log_error(emsg) self._flag_valid = not flag_error if not flag_error: self.status("")
Updates fobj from GUI. Opposite of _update_gui().
def query_most_pic(num, kind='1'): ''' Query most pics. ''' return TabPost.select().where( (TabPost.kind == kind) & (TabPost.logo != "") ).order_by(TabPost.view_count.desc()).limit(num)
Query most pics.
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc. """ # we need normalize subgraphs because these graphs are used as patterns. graphs0 = [ self.get_subgraph(group, normalize=True) for group in self.independent_vertices ] graphs1 = [ other.get_subgraph(group) for group in other.independent_vertices ] if len(graphs0) != len(graphs1): return matches = [] for graph0 in graphs0: pattern = EqualPattern(graph0) found_match = False for i, graph1 in enumerate(graphs1): local_matches = list(GraphSearch(pattern)(graph1, one_match=True)) if len(local_matches) == 1: match = local_matches[0] # we need to restore the relation between the normalized # graph0 and its original indexes old_to_new = OneToOne(( (j, i) for i, j in enumerate(graph0._old_vertex_indexes) )) matches.append(match * old_to_new) del graphs1[i] found_match = True break if not found_match: return result = OneToOne() for match in matches: result.add_relations(match.forward.items()) return result
Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc.
def save(self, directory, parameters='all'): """ Saves results to disk. Depending on which results are selected and if they exist, the following directories and files are created: * `powerflow_results` directory * `voltages_pu.csv` See :py:attr:`~pfa_v_mag_pu` for more information. * `currents.csv` See :func:`~i_res` for more information. * `active_powers.csv` See :py:attr:`~pfa_p` for more information. * `reactive_powers.csv` See :py:attr:`~pfa_q` for more information. * `apparent_powers.csv` See :func:`~s_res` for more information. * `grid_losses.csv` See :py:attr:`~grid_losses` for more information. * `hv_mv_exchanges.csv` See :py:attr:`~hv_mv_exchanges` for more information. * `pypsa_network` directory See :py:func:`pypsa.Network.export_to_csv_folder` * `grid_expansion_results` directory * `grid_expansion_costs.csv` See :py:attr:`~grid_expansion_costs` for more information. * `equipment_changes.csv` See :py:attr:`~equipment_changes` for more information. * `unresolved_issues.csv` See :py:attr:`~unresolved_issues` for more information. * `curtailment_results` directory Files depend on curtailment specifications. There will be one file for each curtailment specification, that is for every key in :py:attr:`~curtailment` dictionary. * `storage_integration_results` directory * `storages.csv` See :func:`~storages` for more information. Parameters ---------- directory : :obj:`str` Directory to save the results in. parameters : :obj:`str` or :obj:`list` of :obj:`str` Specifies which results will be saved. By default all results are saved. To only save certain results set `parameters` to one of the following options or choose several options by providing a list: * 'pypsa_network' * 'powerflow_results' * 'grid_expansion_results' * 'curtailment_results' * 'storage_integration_results' """ def _save_power_flow_results(target_dir): if self.pfa_v_mag_pu is not None: # create directory os.makedirs(target_dir, exist_ok=True) # voltage self.pfa_v_mag_pu.to_csv( os.path.join(target_dir, 'voltages_pu.csv')) # current self.i_res.to_csv( os.path.join(target_dir, 'currents.csv')) # active power self.pfa_p.to_csv( os.path.join(target_dir, 'active_powers.csv')) # reactive power self.pfa_q.to_csv( os.path.join(target_dir, 'reactive_powers.csv')) # apparent power self.s_res().to_csv( os.path.join(target_dir, 'apparent_powers.csv')) # grid losses self.grid_losses.to_csv( os.path.join(target_dir, 'grid_losses.csv')) # grid exchanges self.hv_mv_exchanges.to_csv(os.path.join( target_dir, 'hv_mv_exchanges.csv')) def _save_pypsa_network(target_dir): if self.network.pypsa: # create directory os.makedirs(target_dir, exist_ok=True) self.network.pypsa.export_to_csv_folder(target_dir) def _save_grid_expansion_results(target_dir): if self.grid_expansion_costs is not None: # create directory os.makedirs(target_dir, exist_ok=True) # grid expansion costs self.grid_expansion_costs.to_csv(os.path.join( target_dir, 'grid_expansion_costs.csv')) # unresolved issues pd.DataFrame(self.unresolved_issues).to_csv(os.path.join( target_dir, 'unresolved_issues.csv')) # equipment changes self.equipment_changes.to_csv(os.path.join( target_dir, 'equipment_changes.csv')) def _save_curtailment_results(target_dir): if self.curtailment is not None: # create directory os.makedirs(target_dir, exist_ok=True) for key, curtailment_df in self.curtailment.items(): if type(key) == tuple: type_prefix = '-'.join([key[0], str(key[1])]) elif type(key) == str: type_prefix = key else: raise KeyError("Unknown key type {} for key {}".format( type(key), key)) filename = os.path.join( target_dir, '{}.csv'.format(type_prefix)) curtailment_df.to_csv(filename, index_label=type_prefix) def _save_storage_integration_results(target_dir): storages = self.storages if not storages.empty: # create directory os.makedirs(target_dir, exist_ok=True) # general storage information storages.to_csv(os.path.join(target_dir, 'storages.csv')) # storages time series ts_p, ts_q = self.storages_timeseries() ts_p.to_csv(os.path.join( target_dir, 'storages_active_power.csv')) ts_q.to_csv(os.path.join( target_dir, 'storages_reactive_power.csv')) if not self.storages_costs_reduction is None: self.storages_costs_reduction.to_csv( os.path.join(target_dir, 'storages_costs_reduction.csv')) # dictionary with function to call to save each parameter func_dict = { 'powerflow_results': _save_power_flow_results, 'pypsa_network': _save_pypsa_network, 'grid_expansion_results': _save_grid_expansion_results, 'curtailment_results': _save_curtailment_results, 'storage_integration_results': _save_storage_integration_results } # if string is given convert to list if isinstance(parameters, str): if parameters == 'all': parameters = ['powerflow_results', 'pypsa_network', 'grid_expansion_results', 'curtailment_results', 'storage_integration_results'] else: parameters = [parameters] # save each parameter for parameter in parameters: try: func_dict[parameter](os.path.join(directory, parameter)) except KeyError: message = "Invalid input {} for `parameters` when saving " \ "results. Must be any or a list of the following: " \ "'pypsa_network', 'powerflow_results', " \ "'grid_expansion_results', 'curtailment_results', " \ "'storage_integration_results'.".format(parameter) logger.error(message) raise KeyError(message) except: raise # save measures pd.DataFrame(data={'measure': self.measures}).to_csv( os.path.join(directory, 'measures.csv')) # save configs with open(os.path.join(directory, 'configs.csv'), 'w') as f: writer = csv.writer(f) rows = [ ['{}'.format(key)] + [value for item in values.items() for value in item] for key, values in self.network.config._data.items()] writer.writerows(rows)
Saves results to disk. Depending on which results are selected and if they exist, the following directories and files are created: * `powerflow_results` directory * `voltages_pu.csv` See :py:attr:`~pfa_v_mag_pu` for more information. * `currents.csv` See :func:`~i_res` for more information. * `active_powers.csv` See :py:attr:`~pfa_p` for more information. * `reactive_powers.csv` See :py:attr:`~pfa_q` for more information. * `apparent_powers.csv` See :func:`~s_res` for more information. * `grid_losses.csv` See :py:attr:`~grid_losses` for more information. * `hv_mv_exchanges.csv` See :py:attr:`~hv_mv_exchanges` for more information. * `pypsa_network` directory See :py:func:`pypsa.Network.export_to_csv_folder` * `grid_expansion_results` directory * `grid_expansion_costs.csv` See :py:attr:`~grid_expansion_costs` for more information. * `equipment_changes.csv` See :py:attr:`~equipment_changes` for more information. * `unresolved_issues.csv` See :py:attr:`~unresolved_issues` for more information. * `curtailment_results` directory Files depend on curtailment specifications. There will be one file for each curtailment specification, that is for every key in :py:attr:`~curtailment` dictionary. * `storage_integration_results` directory * `storages.csv` See :func:`~storages` for more information. Parameters ---------- directory : :obj:`str` Directory to save the results in. parameters : :obj:`str` or :obj:`list` of :obj:`str` Specifies which results will be saved. By default all results are saved. To only save certain results set `parameters` to one of the following options or choose several options by providing a list: * 'pypsa_network' * 'powerflow_results' * 'grid_expansion_results' * 'curtailment_results' * 'storage_integration_results'
def _get_service_state(service_id: str): """Get the Service state object for the specified id.""" LOG.debug('Getting state of service %s', service_id) services = get_service_id_list() service_ids = [s for s in services if service_id in s] if len(service_ids) != 1: return 'Service not found! services = {}'.format(str(services)) subsystem, name, version = service_ids[0].split(':') return ServiceState(subsystem, name, version)
Get the Service state object for the specified id.
def do(self, changes, task_handle=taskhandle.NullTaskHandle()): """Perform the change and add it to the `self.undo_list` Note that uninteresting changes (changes to ignored files) will not be appended to `self.undo_list`. """ try: self.current_change = changes changes.do(change.create_job_set(task_handle, changes)) finally: self.current_change = None if self._is_change_interesting(changes): self.undo_list.append(changes) self._remove_extra_items() del self.redo_list[:]
Perform the change and add it to the `self.undo_list` Note that uninteresting changes (changes to ignored files) will not be appended to `self.undo_list`.
def stopObserver(self): """ Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically. """ self._observer.isStopped = True self._observer.isRunning = False
Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically.
def create_authentication_string(username, password): ''' Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string. ''' username_utf8 = username.encode('utf-8') userpw_utf8 = password.encode('utf-8') username_perc = quote(username_utf8) userpw_perc = quote(userpw_utf8) authinfostring = username_perc + ':' + userpw_perc authinfostring_base64 = base64.b64encode(authinfostring.encode('utf-8')).decode('utf-8') return authinfostring_base64
Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string.
def positions(self, word): """ Returns a list of positions where the word can be hyphenated. E.g. for the dutch word 'lettergrepen' this method returns the list [3, 6, 9]. Each position is a 'data int' (dint) with a data attribute. If the data attribute is not None, it contains a tuple with information about nonstandard hyphenation at that point: (change, index, cut) change: is a string like 'ff=f', that describes how hyphenation should take place. index: where to substitute the change, counting from the current point cut: how many characters to remove while substituting the nonstandard hyphenation """ word = word.lower() points = self.cache.get(word) if points is None: prepWord = '.%s.' % word res = [0] * (len(prepWord) + 1) for i in range(len(prepWord) - 1): for j in range(i + 1, min(i + self.maxlen, len(prepWord)) + 1): p = self.patterns.get(prepWord[i:j]) if p: offset, value = p s = slice(i + offset, i + offset + len(value)) res[s] = map(max, value, res[s]) points = [dint(i - 1, ref=r) for i, r in enumerate(res) if r % 2] self.cache[word] = points return points
Returns a list of positions where the word can be hyphenated. E.g. for the dutch word 'lettergrepen' this method returns the list [3, 6, 9]. Each position is a 'data int' (dint) with a data attribute. If the data attribute is not None, it contains a tuple with information about nonstandard hyphenation at that point: (change, index, cut) change: is a string like 'ff=f', that describes how hyphenation should take place. index: where to substitute the change, counting from the current point cut: how many characters to remove while substituting the nonstandard hyphenation
def _start_refresh_timer(self): """Start the Vim timer. """ if not self._timer: self._timer = self._vim.eval( "timer_start({}, 'EnTick', {{'repeat': -1}})" .format(REFRESH_TIMER) )
Start the Vim timer.
def loo(data, pointwise=False, reff=None, scale="deviance"): """Pareto-smoothed importance sampling leave-one-out cross-validation. Calculates leave-one-out (LOO) cross-validation for out of sample predictive model fit, following Vehtari et al. (2017). Cross-validation is computed using Pareto-smoothed importance sampling (PSIS). Parameters ---------- data : result of MCMC run pointwise: bool, optional if True the pointwise predictive accuracy will be returned. Defaults to False reff : float, optional Relative MCMC efficiency, `effective_n / n` i.e. number of effective samples divided by the number of actual samples. Computed from trace by default. scale : str Output scale for loo. Available options are: - `deviance` : (default) -2 * (log-score) - `log` : 1 * log-score (after Vehtari et al. (2017)) - `negative_log` : -1 * (log-score) Returns ------- pandas.Series with the following columns: loo: approximated Leave-one-out cross-validation loo_se: standard error of loo p_loo: effective number of parameters shape_warn: 1 if the estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples loo_i: array of pointwise predictive accuracy, only if pointwise True pareto_k: array of Pareto shape values, only if pointwise True loo_scale: scale of the loo results """ inference_data = convert_to_inference_data(data) for group in ("posterior", "sample_stats"): if not hasattr(inference_data, group): raise TypeError( "Must be able to extract a {group}" "group from data!".format(group=group) ) if "log_likelihood" not in inference_data.sample_stats: raise TypeError("Data must include log_likelihood in sample_stats") posterior = inference_data.posterior log_likelihood = inference_data.sample_stats.log_likelihood n_samples = log_likelihood.chain.size * log_likelihood.draw.size new_shape = (n_samples,) + log_likelihood.shape[2:] log_likelihood = log_likelihood.values.reshape(*new_shape) if scale.lower() == "deviance": scale_value = -2 elif scale.lower() == "log": scale_value = 1 elif scale.lower() == "negative_log": scale_value = -1 else: raise TypeError('Valid scale values are "deviance", "log", "negative_log"') if reff is None: n_chains = len(posterior.chain) if n_chains == 1: reff = 1.0 else: ess = effective_sample_size(posterior) # this mean is over all data variables reff = np.hstack([ess[v].values.flatten() for v in ess.data_vars]).mean() / n_samples log_weights, pareto_shape = psislw(-log_likelihood, reff) log_weights += log_likelihood warn_mg = 0 if np.any(pareto_shape > 0.7): warnings.warn( """Estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples. You should consider using a more robust model, this is because importance sampling is less likely to work well if the marginal posterior and LOO posterior are very different. This is more likely to happen with a non-robust model and highly influential observations.""" ) warn_mg = 1 loo_lppd_i = scale_value * _logsumexp(log_weights, axis=0) loo_lppd = loo_lppd_i.sum() loo_lppd_se = (len(loo_lppd_i) * np.var(loo_lppd_i)) ** 0.5 lppd = np.sum(_logsumexp(log_likelihood, axis=0, b_inv=log_likelihood.shape[0])) p_loo = lppd - loo_lppd / scale_value if pointwise: if np.equal(loo_lppd, loo_lppd_i).all(): # pylint: disable=no-member warnings.warn( """The point-wise LOO is the same with the sum LOO, please double check the Observed RV in your model to make sure it returns element-wise logp. """ ) return pd.Series( data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, loo_lppd_i, pareto_shape, scale], index=["loo", "loo_se", "p_loo", "warning", "loo_i", "pareto_k", "loo_scale"], ) else: return pd.Series( data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, scale], index=["loo", "loo_se", "p_loo", "warning", "loo_scale"], )
Pareto-smoothed importance sampling leave-one-out cross-validation. Calculates leave-one-out (LOO) cross-validation for out of sample predictive model fit, following Vehtari et al. (2017). Cross-validation is computed using Pareto-smoothed importance sampling (PSIS). Parameters ---------- data : result of MCMC run pointwise: bool, optional if True the pointwise predictive accuracy will be returned. Defaults to False reff : float, optional Relative MCMC efficiency, `effective_n / n` i.e. number of effective samples divided by the number of actual samples. Computed from trace by default. scale : str Output scale for loo. Available options are: - `deviance` : (default) -2 * (log-score) - `log` : 1 * log-score (after Vehtari et al. (2017)) - `negative_log` : -1 * (log-score) Returns ------- pandas.Series with the following columns: loo: approximated Leave-one-out cross-validation loo_se: standard error of loo p_loo: effective number of parameters shape_warn: 1 if the estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples loo_i: array of pointwise predictive accuracy, only if pointwise True pareto_k: array of Pareto shape values, only if pointwise True loo_scale: scale of the loo results
def _update_proxy(self, change): """ An observer which sends the state change to the proxy. """ if change['type'] in ['event', 'update'] and self.proxy_is_active: handler = getattr(self.proxy, 'set_' + change['name'], None) if handler is not None: handler(change['value'])
An observer which sends the state change to the proxy.
def edit(self, config={}, events=[], add_events=[], rm_events=[], active=True): """Edit this hook. :param dict config: (optional), key-value pairs of settings for this hook :param list events: (optional), which events should this be triggered for :param list add_events: (optional), events to be added to the list of events that this hook triggers for :param list rm_events: (optional), events to be remvoed from the list of events that this hook triggers for :param bool active: (optional), should this event be active :returns: bool """ data = {'config': config, 'active': active} if events: data['events'] = events if add_events: data['add_events'] = add_events if rm_events: data['remove_events'] = rm_events json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
Edit this hook. :param dict config: (optional), key-value pairs of settings for this hook :param list events: (optional), which events should this be triggered for :param list add_events: (optional), events to be added to the list of events that this hook triggers for :param list rm_events: (optional), events to be remvoed from the list of events that this hook triggers for :param bool active: (optional), should this event be active :returns: bool
def rsa_base64_encrypt(self, plain, b64=True): """ 使用公钥加密 ``可见数据`` - 由于rsa公钥加密相对耗时, 多用来 ``加密数据量小`` 的数据 .. note:: 1. 使用aes加密数据 2. 然后rsa用来加密aes加密数据时使用的key :param plain: :type plain: :param b64: :type b64: :return: :rtype: """ with open(self.key_file) as fp: key_ = RSA.importKey(fp.read()) plain = helper.to_bytes(plain) cipher = PKCS1_v1_5.new(key_).encrypt(plain) cip = base64.b64encode(cipher) if b64 else cipher return helper.to_str(cip)
使用公钥加密 ``可见数据`` - 由于rsa公钥加密相对耗时, 多用来 ``加密数据量小`` 的数据 .. note:: 1. 使用aes加密数据 2. 然后rsa用来加密aes加密数据时使用的key :param plain: :type plain: :param b64: :type b64: :return: :rtype:
def django_include(context, template_name, **kwargs): ''' Mako tag to include a Django template withing the current DMP (Mako) template. Since this is a Django template, it is search for using the Django search algorithm (instead of the DMP app-based concept). See https://docs.djangoproject.com/en/2.1/topics/templates/. The current context is sent to the included template, which makes all context variables available to the Django template. Any additional kwargs are added to the context. ''' try: djengine = engines['django'] except KeyError as e: raise TemplateDoesNotExist("Django template engine not configured in settings, so template cannot be found: {}".format(template_name)) from e djtemplate = djengine.get_template(template_name) djcontext = {} djcontext.update(context) djcontext.update(kwargs) return djtemplate.render(djcontext, context['request'])
Mako tag to include a Django template withing the current DMP (Mako) template. Since this is a Django template, it is search for using the Django search algorithm (instead of the DMP app-based concept). See https://docs.djangoproject.com/en/2.1/topics/templates/. The current context is sent to the included template, which makes all context variables available to the Django template. Any additional kwargs are added to the context.
def _add_bad_rc(self, rc, result): """ Sets an error with a bad return code. Handles 'quiet' logic :param rc: The error code """ if not rc: return self.all_ok = False if rc == C.LCB_KEY_ENOENT and self._quiet: return try: raise pycbc_exc_lcb(rc) except PyCBC.default_exception as e: e.all_results = self e.key = result.key e.result = result self._add_err(sys.exc_info())
Sets an error with a bad return code. Handles 'quiet' logic :param rc: The error code
def finditer(self, expr): """Return an iterator over all matches in `expr` Iterate over all :class:`MatchDict` results of matches for any matching (sub-)expressions in `expr`. The order of the matches conforms to the equivalent matched expressions returned by :meth:`findall`. """ try: for arg in expr.args: for m in self.finditer(arg): yield m for arg in expr.kwargs.values(): for m in self.finditer(arg): yield m except AttributeError: pass m = self.match(expr) if m: yield m
Return an iterator over all matches in `expr` Iterate over all :class:`MatchDict` results of matches for any matching (sub-)expressions in `expr`. The order of the matches conforms to the equivalent matched expressions returned by :meth:`findall`.
def execute(command, shell=None, working_dir=".", echo=False, echo_indent=0): """Execute a command on the command-line. :param str,list command: The command to run :param bool shell: Whether or not to use the shell. This is optional; if ``command`` is a basestring, shell will be set to True, otherwise it will be false. You can override this behavior by setting this parameter directly. :param str working_dir: The directory in which to run the command. :param bool echo: Whether or not to print the output from the command to stdout. :param int echo_indent: Any number of spaces to indent the echo for clarity :returns: tuple: (return code, stdout) Example >>> from executor import execute >>> return_code, text = execute("dir") """ if shell is None: shell = True if isinstance(command, str) else False p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=shell, cwd=working_dir) if echo: stdout = "" while p.poll() is None: # This blocks until it receives a newline. line = p.stdout.readline() print(" " * echo_indent, line, end="") stdout += line # Read any last bits line = p.stdout.read() print(" " * echo_indent, line, end="") print() stdout += line else: stdout, _ = p.communicate() return (p.returncode, stdout)
Execute a command on the command-line. :param str,list command: The command to run :param bool shell: Whether or not to use the shell. This is optional; if ``command`` is a basestring, shell will be set to True, otherwise it will be false. You can override this behavior by setting this parameter directly. :param str working_dir: The directory in which to run the command. :param bool echo: Whether or not to print the output from the command to stdout. :param int echo_indent: Any number of spaces to indent the echo for clarity :returns: tuple: (return code, stdout) Example >>> from executor import execute >>> return_code, text = execute("dir")
async def _query_json( self, path, method="GET", *, params=None, data=None, headers=None, timeout=None ): """ A shorthand of _query() that treats the input as JSON. """ if headers is None: headers = {} headers["content-type"] = "application/json" if not isinstance(data, (str, bytes)): data = json.dumps(data) response = await self._query( path, method, params=params, data=data, headers=headers, timeout=timeout ) data = await parse_result(response) return data
A shorthand of _query() that treats the input as JSON.
def module_for_loader(fxn): """Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. """ warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) @functools.wraps(fxn) def module_for_loader_wrapper(self, fullname, *args, **kwargs): with _module_to_load(fullname) as module: module.__loader__ = self try: is_package = self.is_package(fullname) except (ImportError, AttributeError): pass else: if is_package: module.__package__ = fullname else: module.__package__ = fullname.rpartition('.')[0] # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) return module_for_loader_wrapper
Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument.
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001): """ Optimise value of x using levenberg-marquardt """ x_new = x x_old = x-1 # dummy value f_old = f(x_new, a, c) while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new x_tmp = levenberg_marquardt_update(x_old, a, c, damping) f_new = f(x_tmp, a, c) if f_new < f_old: damping = np.max(damping/10., 1e-20) x_new = x_tmp f_old = f_new else: damping *= 10. return x_new
Optimise value of x using levenberg-marquardt
def _parse_table( self, parent_name=None ): # type: (Optional[str]) -> Tuple[Key, Union[Table, AoT]] """ Parses a table element. """ if self._current != "[": raise self.parse_error( InternalParserError, "_parse_table() called on non-bracket character." ) indent = self.extract() self.inc() # Skip opening bracket if self.end(): raise self.parse_error(UnexpectedEofError) is_aot = False if self._current == "[": if not self.inc(): raise self.parse_error(UnexpectedEofError) is_aot = True # Key self.mark() while self._current != "]" and self.inc(): if self.end(): raise self.parse_error(UnexpectedEofError) pass name = self.extract() if not name.strip(): raise self.parse_error(EmptyTableNameError) key = Key(name, sep="") name_parts = tuple(self._split_table_name(name)) missing_table = False if parent_name: parent_name_parts = tuple(self._split_table_name(parent_name)) else: parent_name_parts = tuple() if len(name_parts) > len(parent_name_parts) + 1: missing_table = True name_parts = name_parts[len(parent_name_parts) :] values = Container(True) self.inc() # Skip closing bracket if is_aot: # TODO: Verify close bracket self.inc() cws, comment, trail = self._parse_comment_trail() result = Null() if len(name_parts) > 1: if missing_table: # Missing super table # i.e. a table initialized like this: [foo.bar] # without initializing [foo] # # So we have to create the parent tables table = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and name_parts[0].key in self._aot_stack, is_super_table=True, name=name_parts[0].key, ) result = table key = name_parts[0] for i, _name in enumerate(name_parts[1:]): if _name in table: child = table[_name] else: child = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and i == len(name_parts[1:]) - 1, is_super_table=i < len(name_parts[1:]) - 1, name=_name.key, display_name=name if i == len(name_parts[1:]) - 1 else None, ) if is_aot and i == len(name_parts[1:]) - 1: table.append(_name, AoT([child], name=table.name, parsed=True)) else: table.append(_name, child) table = child values = table.value else: if name_parts: key = name_parts[0] while not self.end(): item = self._parse_item() if item: _key, item = item if not self._merge_ws(item, values): if _key is not None and _key.is_dotted(): self._handle_dotted_key(values, _key, item) else: values.append(_key, item) else: if self._current == "[": is_aot_next, name_next = self._peek_table() if self._is_child(name, name_next): key_next, table_next = self._parse_table(name) values.append(key_next, table_next) # Picking up any sibling while not self.end(): _, name_next = self._peek_table() if not self._is_child(name, name_next): break key_next, table_next = self._parse_table(name) values.append(key_next, table_next) break else: raise self.parse_error( InternalParserError, "_parse_item() returned None on a non-bracket character.", ) if isinstance(result, Null): result = Table( values, Trivia(indent, cws, comment, trail), is_aot, name=name, display_name=name, ) if is_aot and (not self._aot_stack or name != self._aot_stack[-1]): result = self._parse_aot(result, name) return key, result
Parses a table element.
def inputs_valid(self, outputs=None): """Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid. """ if self.operation == Transaction.CREATE: # NOTE: Since in the case of a `CREATE`-transaction we do not have # to check for outputs, we're just submitting dummy # values to the actual method. This simplifies it's logic # greatly, as we do not have to check against `None` values. return self._inputs_valid(['dummyvalue' for _ in self.inputs]) elif self.operation == Transaction.TRANSFER: return self._inputs_valid([output.fulfillment.condition_uri for output in outputs]) else: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS) raise TypeError('`operation` must be one of {}' .format(allowed_ops))
Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid.
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs): """ Create a multi-GPU model similar to the basic cnn in the tutorials. """ model = make_basic_cnn() layers = model.layers model = MLPnGPU(nb_classes, layers, input_shape) return model
Create a multi-GPU model similar to the basic cnn in the tutorials.
def case_stmt_handle(self, loc, tokens): """Process case blocks.""" if len(tokens) == 2: item, cases = tokens default = None elif len(tokens) == 3: item, cases, default = tokens else: raise CoconutInternalException("invalid case tokens", tokens) check_var = case_check_var + "_" + str(self.case_check_count) self.case_check_count += 1 out = ( match_to_var + " = " + item + "\n" + match_case_tokens(loc, cases[0], check_var, True) ) for case in cases[1:]: out += ( "if not " + check_var + ":\n" + openindent + match_case_tokens(loc, case, check_var, False) + closeindent ) if default is not None: out += "if not " + check_var + default return out
Process case blocks.
def bed(args): """ %prog bed frgscffile Convert the frgscf posmap file to bed format. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) frgscffile, = args bedfile = frgscffile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") fp = open(frgscffile) for row in fp: f = FrgScfLine(row) print(f.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
%prog bed frgscffile Convert the frgscf posmap file to bed format.
def exception( # type: ignore self, msg, *args, exc_info=True, **kwargs ) -> Task: """ Convenience method for logging an ERROR with exception information. """ return self.error(msg, *args, exc_info=exc_info, **kwargs)
Convenience method for logging an ERROR with exception information.
def validate_read_preference_tags(name, value): """Parse readPreferenceTags if passed as a client kwarg. """ if not isinstance(value, list): value = [value] tag_sets = [] for tag_set in value: if tag_set == '': tag_sets.append({}) continue try: tag_sets.append(dict([tag.split(":") for tag in tag_set.split(",")])) except Exception: raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) return tag_sets
Parse readPreferenceTags if passed as a client kwarg.
def output_buffer_size(self, output_buffer_size_b): """output_buffer_size (nsqd 0.2.21+) the size in bytes of the buffer nsqd will use when writing to this client. Valid range: 64 <= output_buffer_size <= configured_max (-1 disables output buffering) --max-output-buffer-size (nsqd flag) controls the max Defaults to 16kb """ assert issubclass(output_buffer_size_b.__class__, int) return self.__push('output_buffer_size', output_buffer_size_b)
output_buffer_size (nsqd 0.2.21+) the size in bytes of the buffer nsqd will use when writing to this client. Valid range: 64 <= output_buffer_size <= configured_max (-1 disables output buffering) --max-output-buffer-size (nsqd flag) controls the max Defaults to 16kb
def patch(self, id_or_uri, operation, path, value, timeout=-1, custom_headers=None): """ Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args: id_or_uri: Can be either the resource ID or the resource URI. operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Updated resource. """ patch_request_body = [{'op': operation, 'path': path, 'value': value}] return self.patch_request(id_or_uri=id_or_uri, body=patch_request_body, timeout=timeout, custom_headers=custom_headers)
Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args: id_or_uri: Can be either the resource ID or the resource URI. operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Updated resource.
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries): ''' Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`: ''' tsv = True boundaries = on_word_boundaries with open_file(pattern_filename) as input_file: for line in input_file: line = line.decode(encoding) if line.count('\t') != 1: tsv = False if '\\b' in line: boundaries = True if boundaries and not tsv: break return tsv, boundaries
Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`:
def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB """ if port_number not in self._mappings: raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._mappings[port_number] data_link_type = data_link_type.lower() if data_link_type.startswith("dlt_"): data_link_type = data_link_type[4:] if nio.input_filter[0] is not None and nio.output_filter[0] is not None: raise DynamipsError("Port {} has already a filter applied".format(port_number)) yield from nio.bind_filter("both", "capture") yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('Ethernet hub "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number))
Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
def eval(self, x): """This method returns the evaluation of the function with input x :param x: this is the input as a Long """ aes = AES.new(self.key, AES.MODE_CFB, "\0" * AES.block_size) while True: nonce = 0 data = KeyedPRF.pad(SHA256.new(str(x + nonce).encode()).digest(), (number.size(self.range) + 7) // 8) num = self.mask & number.bytes_to_long(aes.encrypt(data)) if (num < self.range): return num nonce += 1
This method returns the evaluation of the function with input x :param x: this is the input as a Long
def parseExtensionArgs(self, args, strict=False): """Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object. """ policies_str = args.get('auth_policies') if policies_str and policies_str != 'none': self.auth_policies = policies_str.split(' ') nist_level_str = args.get('nist_auth_level') if nist_level_str: try: nist_level = int(nist_level_str) except ValueError: if strict: raise ValueError('nist_auth_level must be an integer between ' 'zero and four, inclusive') else: self.nist_auth_level = None else: if 0 <= nist_level < 5: self.nist_auth_level = nist_level auth_time = args.get('auth_time') if auth_time: if TIME_VALIDATOR.match(auth_time): self.auth_time = auth_time elif strict: raise ValueError("auth_time must be in RFC3339 format")
Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object.
def list_migration_issues_courses(self, course_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
List migration issues. Returns paginated migration issues
def unindent(self): """ Un-indents text at cursor position. """ _logger().debug('unindent') cursor = self.editor.textCursor() _logger().debug('cursor has selection %r', cursor.hasSelection()) if cursor.hasSelection(): cursor.beginEditBlock() self.unindent_selection(cursor) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: tab_len = self.editor.tab_length indentation = cursor.positionInBlock() indentation -= self.min_column if indentation == 0: return max_spaces = indentation % tab_len if max_spaces == 0: max_spaces = tab_len spaces = self.count_deletable_spaces(cursor, max_spaces) _logger().info('deleting %d space before cursor' % spaces) cursor.beginEditBlock() for _ in range(spaces): cursor.deletePreviousChar() cursor.endEditBlock() self.editor.setTextCursor(cursor) _logger().debug(cursor.block().text())
Un-indents text at cursor position.
def fn_kwargs(callable): """Returns a dict with the kwargs from the provided function. Example >>> def x(a, b=0, *args, **kwargs): pass >>> func_kwargs(x) == { 'b': 0 } """ fn = get_fn(callable) (args, _, _, defaults) = _inspect.getargspec(fn) if defaults is None: return { } return dict(list(zip(reversed(args), reversed(defaults))))
Returns a dict with the kwargs from the provided function. Example >>> def x(a, b=0, *args, **kwargs): pass >>> func_kwargs(x) == { 'b': 0 }
def update_or_create(cls, append_lists=True, with_status=False, **kwargs): """ Update or create an IPList. :param bool append_lists: append to existing IP List :param dict kwargs: provide at minimum the name attribute and optionally match the create constructor values :raises FetchElementFailed: Reason for retrieval failure """ was_created, was_modified = False, False element = None try: element = cls.get(kwargs.get('name')) if append_lists: iplist = element.iplist diff = [i for i in kwargs.get('iplist', []) if i not in iplist] if diff: iplist.extend(diff) else: iplist = [] else: iplist = kwargs.get('iplist', []) if iplist: element.upload(json={'ip': iplist}, as_type='json') was_modified = True except ElementNotFound: element = cls.create( kwargs.get('name'), iplist = kwargs.get('iplist', [])) was_created = True if with_status: return element, was_modified, was_created return element
Update or create an IPList. :param bool append_lists: append to existing IP List :param dict kwargs: provide at minimum the name attribute and optionally match the create constructor values :raises FetchElementFailed: Reason for retrieval failure
def segment(self, *args): """Segment one or more datasets with this subword field. Arguments: Positional arguments: Dataset objects or other indexable mutable sequences to segment. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. """ sources = [] for arg in args: if isinstance(arg, Dataset): sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self] else: sources.append(arg) for data in sources: for x in tqdm(data, 'segmenting'): x[:] = self.vocab.segment(x)
Segment one or more datasets with this subword field. Arguments: Positional arguments: Dataset objects or other indexable mutable sequences to segment. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly.
def get_uppermost_library_root_state(self): """Find state_copy of uppermost LibraryState Method checks if there is a parent library root state and assigns it to be the current library root state till there is no further parent library root state. """ library_root_state = self.get_next_upper_library_root_state() parent_library_root_state = library_root_state # initial a library root state has to be found and if there is no further parent root state # parent_library_root_state and library_root_state are no more identical while parent_library_root_state and library_root_state is parent_library_root_state: if library_root_state: parent_library_root_state = library_root_state.parent.get_next_upper_library_root_state() if parent_library_root_state: library_root_state = parent_library_root_state return library_root_state
Find state_copy of uppermost LibraryState Method checks if there is a parent library root state and assigns it to be the current library root state till there is no further parent library root state.
def _set_igmp_snooping_state(self, v, load=False): """ Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_igmp_snooping_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmp_snooping_state() directly. YANG Description: IGMP Snooping Root MO """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """igmp_snooping_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True)""", }) self.__igmp_snooping_state = t if hasattr(self, '_set'): self._set()
Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_igmp_snooping_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmp_snooping_state() directly. YANG Description: IGMP Snooping Root MO
def get_file(self): """ Load data into a file and return file path. :return: path to file as string """ content = self._load() if not content: return None filename = "temporary_file.bin" with open(filename, "wb") as file_name: file_name.write(content) return filename
Load data into a file and return file path. :return: path to file as string
def BatchNorm(inputs, training=None, momentum=0.9, epsilon=1e-5, center=True, scale=True, gamma_initializer=tf.ones_initializer(), data_format='channels_last', internal_update=False): """ Mostly equivalent to `tf.layers.batch_normalization`, but difference in the following: 1. Accepts `data_format` rather than `axis`. For 2D input, this argument will be ignored. 2. Default value for `momentum` and `epsilon` is different. 3. Default value for `training` is automatically obtained from `TowerContext`. 4. Support the `internal_update` option. Args: internal_update (bool): if False, add EMA update ops to `tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer by control dependencies. Variable Names: * ``beta``: the bias term. Will be zero-inited by default. * ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``. * ``mean/EMA``: the moving average of mean. * ``variance/EMA``: the moving average of variance. Note: 1. About multi-GPU training: moving averages across GPUs are not aggregated. Batch statistics are computed independently. This is consistent with most frameworks. 2. Combinations of ``training`` and ``ctx.is_training``: * ``training == ctx.is_training``: standard BN, EMA are maintained during training and used during inference. This is the default. * ``training and not ctx.is_training``: still use batch statistics in inference. * ``not training and ctx.is_training``: use EMA to normalize in training. This is useful when you load a pre-trained BN and don't want to fine tune the EMA. EMA will not be updated in this case. """ data_format = get_data_format(data_format, keras_mode=False) shape = inputs.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] if ndims == 2: data_format = 'NHWC' if data_format == 'NCHW': n_out = shape[1] else: n_out = shape[-1] # channel assert n_out is not None, "Input to BatchNorm cannot have unknown channels!" beta, gamma, moving_mean, moving_var = get_bn_variables(n_out, scale, center, gamma_initializer) ctx = get_current_tower_context() use_local_stat = training if use_local_stat is None: use_local_stat = ctx.is_training use_local_stat = bool(use_local_stat) if use_local_stat: if ndims == 2: inputs = tf.reshape(inputs, [-1, 1, 1, n_out]) # fused_bn only takes 4D input # fused_bn has error using NCHW? (see #190) xn, batch_mean, batch_var = tf.nn.fused_batch_norm( inputs, gamma, beta, epsilon=epsilon, is_training=True, data_format=data_format) if ndims == 2: xn = tf.squeeze(xn, [1, 2]) else: if ctx.is_training: assert get_tf_version_tuple() >= (1, 4), \ "Fine tuning a BatchNorm model with fixed statistics is only " \ "supported after https://github.com/tensorflow/tensorflow/pull/12580 " if ctx.is_main_training_tower: # only warn in first tower logger.warn("[BatchNorm] Using moving_mean/moving_variance in training.") # Using moving_mean/moving_variance in training, which means we # loaded a pre-trained BN and only fine-tuning the affine part. xn, _, _ = tf.nn.fused_batch_norm( inputs, gamma, beta, mean=moving_mean, variance=moving_var, epsilon=epsilon, data_format=data_format, is_training=False) else: if ndims == 4: xn, _, _ = tf.nn.fused_batch_norm( inputs, gamma, beta, mean=moving_mean, variance=moving_var, epsilon=epsilon, data_format=data_format, is_training=False) else: xn = tf.nn.batch_normalization( inputs, moving_mean, moving_var, beta, gamma, epsilon) # maintain EMA only on one GPU is OK, even in replicated mode. # because training time doesn't use EMA if ctx.is_main_training_tower: add_model_variable(moving_mean) add_model_variable(moving_var) if ctx.is_main_training_tower and use_local_stat: ret = update_bn_ema(xn, batch_mean, batch_var, moving_mean, moving_var, momentum, internal_update) else: ret = tf.identity(xn, name='output') vh = ret.variables = VariableHolder(mean=moving_mean, variance=moving_var) if scale: vh.gamma = gamma if center: vh.beta = beta return ret
Mostly equivalent to `tf.layers.batch_normalization`, but difference in the following: 1. Accepts `data_format` rather than `axis`. For 2D input, this argument will be ignored. 2. Default value for `momentum` and `epsilon` is different. 3. Default value for `training` is automatically obtained from `TowerContext`. 4. Support the `internal_update` option. Args: internal_update (bool): if False, add EMA update ops to `tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer by control dependencies. Variable Names: * ``beta``: the bias term. Will be zero-inited by default. * ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``. * ``mean/EMA``: the moving average of mean. * ``variance/EMA``: the moving average of variance. Note: 1. About multi-GPU training: moving averages across GPUs are not aggregated. Batch statistics are computed independently. This is consistent with most frameworks. 2. Combinations of ``training`` and ``ctx.is_training``: * ``training == ctx.is_training``: standard BN, EMA are maintained during training and used during inference. This is the default. * ``training and not ctx.is_training``: still use batch statistics in inference. * ``not training and ctx.is_training``: use EMA to normalize in training. This is useful when you load a pre-trained BN and don't want to fine tune the EMA. EMA will not be updated in this case.
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type): """ Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised. """ other_meta_type = get_node_meta_type(manager, other_handle_id) if other_meta_type == 'Location' and rel_type == 'Has': return _create_relationship(manager, location_handle_id, other_handle_id, rel_type) raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised.
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080): """Returns `(ip_address, port)` from string `ip_addr_port`""" addr = defaultaddr port = defaultport ip = ip.split(":", 1) if len(ip) == 1: if not ip[0]: pass elif validipaddr(ip[0]): addr = ip[0] elif validipport(ip[0]): port = int(ip[0]) else: raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' elif len(ip) == 2: addr, port = ip if not validipaddr(addr) and validipport(port): raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' port = int(port) else: raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' return (addr, port)
Returns `(ip_address, port)` from string `ip_addr_port`
def get_firmware_manifest(self, manifest_id): """Get manifest with provided manifest_id. :param str manifest_id: ID of manifest to retrieve (Required) :return: FirmwareManifest """ api = self._get_api(update_service.DefaultApi) return FirmwareManifest(api.firmware_manifest_retrieve(manifest_id=manifest_id))
Get manifest with provided manifest_id. :param str manifest_id: ID of manifest to retrieve (Required) :return: FirmwareManifest
def _python_to_lua(pval): """ Convert Python object(s) into Lua object(s), as at times Python object(s) are not compatible with Lua functions """ import lua if pval is None: # Python None --> Lua None return lua.eval("") if isinstance(pval, (list, tuple, set)): # Python list --> Lua table # e.g.: in lrange # in Python returns: [v1, v2, v3] # in Lua returns: {v1, v2, v3} lua_list = lua.eval("{}") lua_table = lua.eval("table") for item in pval: lua_table.insert(lua_list, Script._python_to_lua(item)) return lua_list elif isinstance(pval, dict): # Python dict --> Lua dict # e.g.: in hgetall # in Python returns: {k1:v1, k2:v2, k3:v3} # in Lua returns: {k1, v1, k2, v2, k3, v3} lua_dict = lua.eval("{}") lua_table = lua.eval("table") for k, v in pval.iteritems(): lua_table.insert(lua_dict, Script._python_to_lua(k)) lua_table.insert(lua_dict, Script._python_to_lua(v)) return lua_dict elif isinstance(pval, str): # Python string --> Lua userdata return pval elif isinstance(pval, bool): # Python bool--> Lua boolean return lua.eval(str(pval).lower()) elif isinstance(pval, (int, long, float)): # Python int --> Lua number lua_globals = lua.globals() return lua_globals.tonumber(str(pval)) raise RuntimeError("Invalid Python type: " + str(type(pval)))
Convert Python object(s) into Lua object(s), as at times Python object(s) are not compatible with Lua functions
def build_GTK_KDE(self): """Build the Key Data Encapsulation for GTK KeyID: 0 Ref: 802.11i p81 """ return b''.join([ b'\xdd', # Type KDE chb(len(self.gtk_full) + 6), b'\x00\x0f\xac', # OUI b'\x01', # GTK KDE b'\x00\x00', # KeyID - Tx - Reserved x2 self.gtk_full, ])
Build the Key Data Encapsulation for GTK KeyID: 0 Ref: 802.11i p81
def _visit_shape_te(self, te: ShExJ.tripleExpr, visit_center: _VisitorCenter) -> None: """ Visit a triple expression that was reached through a shape. This, in turn, is used to visit additional shapes that are referenced by a TripleConstraint :param te: Triple expression reached through a Shape.expression :param visit_center: context used in shape visitor """ if isinstance(te, ShExJ.TripleConstraint) and te.valueExpr is not None: visit_center.f(visit_center.arg_cntxt, te.valueExpr, self)
Visit a triple expression that was reached through a shape. This, in turn, is used to visit additional shapes that are referenced by a TripleConstraint :param te: Triple expression reached through a Shape.expression :param visit_center: context used in shape visitor
def get_exclusions(path): """ Generates exclusion patterns from a ``.dockerignore`` file located in the given path. Returns ``None`` if the file does not exist. :param path: Path to look up the ``.dockerignore`` in. :type path: unicode | str :return: List of patterns, that can be passed into :func:`get_filter_func`. :rtype: list[(__RegEx, bool)] """ if not os.path.isdir(path): return None dockerignore_file = os.path.join(path, '.dockerignore') if not os.path.isfile(dockerignore_file): return None with open(dockerignore_file, 'rb') as dif: return list(preprocess_matches(dif.readlines()))
Generates exclusion patterns from a ``.dockerignore`` file located in the given path. Returns ``None`` if the file does not exist. :param path: Path to look up the ``.dockerignore`` in. :type path: unicode | str :return: List of patterns, that can be passed into :func:`get_filter_func`. :rtype: list[(__RegEx, bool)]
def set(self, key: Any, value: Any) -> None: """ Sets the value of a key to a supplied value """ if key is not None: self[key] = value
Sets the value of a key to a supplied value
def gen_row_lines(self, row, style, inner_widths, height): r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line. """ cells_in_row = list() # Resize row if it doesn't have enough cells. if len(row) != len(inner_widths): row = row + [''] * (len(inner_widths) - len(row)) # Pad and align each cell. Split each cell into lines to support multi-line cells. for i, cell in enumerate(row): align = (self.justify_columns.get(i),) inner_dimensions = (inner_widths[i], height) padding = (self.padding_left, self.padding_right, 0, 0) cells_in_row.append(align_and_pad_cell(cell, align, inner_dimensions, padding)) # Determine border characters. if style == 'heading': left = self.CHAR_H_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_H_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_H_OUTER_RIGHT_VERTICAL if self.outer_border else '' elif style == 'footing': left = self.CHAR_F_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_F_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_F_OUTER_RIGHT_VERTICAL if self.outer_border else '' else: left = self.CHAR_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_OUTER_RIGHT_VERTICAL if self.outer_border else '' # Yield each line. for line in build_row(cells_in_row, left, center, right): yield line
r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line.
def _process_download(self, url): """ See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. """ if self.platform_check and self._is_platform_dependent(url): info = None else: info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info
See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value.
def get_route_templates(self): """ Generate Openshift route templates or playbook tasks. Each port on a service definition found in container.yml represents an externally exposed port. """ def _get_published_ports(service_config): result = [] for port in service_config.get('ports', []): protocol = 'TCP' if isinstance(port, string_types) and '/' in port: port, protocol = port.split('/') if isinstance(port, string_types) and ':' in port: host, container = port.split(':') else: host = port result.append({'port': host, 'protocol': protocol.lower()}) return result templates = [] for name, service_config in self._services.items(): state = service_config.get(self.CONFIG_KEY, {}).get('state', 'present') force = service_config.get(self.CONFIG_KEY, {}).get('force', False) published_ports = _get_published_ports(service_config) if state != 'present': continue for port in published_ports: route_name = "%s-%s" % (name, port['port']) labels = dict( app=self._namespace_name, service=name ) template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = 'Route' template['force'] = force template['metadata'] = CommentedMap([ ('name', route_name), ('namespace', self._namespace_name), ('labels', labels.copy()) ]) template['spec'] = CommentedMap([ ('to', CommentedMap([ ('kind', 'Service'), ('name', name) ])), ('port', CommentedMap([ ('targetPort', 'port-{}-{}'.format(port['port'], port['protocol'])) ])) ]) if service_config.get(self.CONFIG_KEY, {}).get('routes'): for route in service_config[self.CONFIG_KEY]['routes']: if str(route.get('port')) == str(port['port']): for key, value in route.items(): if key not in ('force', 'port'): self.copy_attribute(template['spec'], key, value) templates.append(template) return templates
Generate Openshift route templates or playbook tasks. Each port on a service definition found in container.yml represents an externally exposed port.
def add_custom_metadata(self, key, value, meta_type=None): """ Add custom metadata to the Video. meta_type is required for XML API. """ self.metadata.append({'key': key, 'value': value, 'type': meta_type})
Add custom metadata to the Video. meta_type is required for XML API.
def login(self, user, password, exe_path, comm_password=None, **kwargs): """ 登陆客户端 :param user: 账号 :param password: 明文密码 :param exe_path: 客户端路径类似 'C:\\中国银河证券双子星3.2\\Binarystar.exe', 默认 'C:\\中国银河证券双子星3.2\\Binarystar.exe' :param comm_password: 通讯密码, 华泰需要,可不设 :return: """ try: self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=1 ) # pylint: disable=broad-except except Exception: self._app = pywinauto.Application().start(exe_path) is_xiadan = True if "xiadan.exe" in exe_path else False # wait login window ready while True: try: self._app.top_window().Edit1.wait("ready") break except RuntimeError: pass self._app.top_window().Edit1.type_keys(user) self._app.top_window().Edit2.type_keys(password) while True: self._app.top_window().Edit3.type_keys( self._handle_verify_code(is_xiadan) ) self._app.top_window()["确定" if is_xiadan else "登录"].click() # detect login is success or not try: self._app.top_window().wait_not("exists visible", 10) break # pylint: disable=broad-except except Exception: if is_xiadan: self._app.top_window()["确定"].click() self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=10 ) self._close_prompt_windows() self._main = self._app.window(title="网上股票交易系统5.0") try: self._main.child_window( control_id=129, class_name="SysTreeView32" ).wait("ready", 2) # pylint: disable=broad-except except Exception: self.wait(2) self._switch_window_to_normal_mode()
登陆客户端 :param user: 账号 :param password: 明文密码 :param exe_path: 客户端路径类似 'C:\\中国银河证券双子星3.2\\Binarystar.exe', 默认 'C:\\中国银河证券双子星3.2\\Binarystar.exe' :param comm_password: 通讯密码, 华泰需要,可不设 :return:
def calc_distance(lng1, lat1, lng2, lat2): """Calc distance (km) by geo-coordinates. @:param lng1: first coordinate.lng @:param lat1: first coordinate.lat @:param lng2: second coordinate.lng @:param lat2: second coordinate.lat @:return distance: km """ ra = 6378.140 # 赤道半径 (km) rb = 6356.755 # 极半径 (km) flatten = (ra - rb) / ra # 地球扁率 rad_lat_1 = math.radians(lat1) rad_lng_1 = math.radians(lng1) rad_lat_2 = math.radians(lat2) rad_lng_2 = math.radians(lng2) p1 = math.atan(rb / ra * math.tan(rad_lat_1)) p2 = math.atan(rb / ra * math.tan(rad_lat_2)) xx = math.acos(math.sin(p1) * math.sin(p2) + math.cos(p1) * math.cos(p2) * math.cos(rad_lng_1 - rad_lng_2)) c1 = (math.sin(xx) - xx) * (math.sin(p1) + math.sin(p2)) ** 2 / math.cos(xx / 2) ** 2 c2 = (math.sin(xx) + xx) * (math.sin(p1) - math.sin(p2)) ** 2 / math.sin(xx / 2) ** 2 dr = flatten / 8 * (c1 - c2) distance = ra * (xx + dr) return distance
Calc distance (km) by geo-coordinates. @:param lng1: first coordinate.lng @:param lat1: first coordinate.lat @:param lng2: second coordinate.lng @:param lat2: second coordinate.lat @:return distance: km
def render(self, context): """ Render the tag, with extra context layer. """ extra_context = self.context_expr.resolve(context) if not isinstance(extra_context, dict): raise TemplateSyntaxError("{% withdict %} expects the argument to be a dictionary.") with context.push(**extra_context): return self.nodelist.render(context)
Render the tag, with extra context layer.
def _compute_u(K): """ Estimate an approximation of the ratio of stationary over empirical distribution from the basis. Parameters: ----------- K0, ndarray(M+1, M+1), time-lagged correlation matrix for the whitened and padded data set. Returns: -------- u : ndarray(M,) coefficients of the ratio stationary / empirical dist. from the whitened and expanded basis. """ M = K.shape[0] - 1 # Compute right and left eigenvectors: l, U = scl.eig(K.T) l, U = sort_by_norm(l, U) # Extract the eigenvector for eigenvalue one and normalize: u = np.real(U[:, 0]) v = np.zeros(M+1) v[M] = 1.0 u = u / np.dot(u, v) return u
Estimate an approximation of the ratio of stationary over empirical distribution from the basis. Parameters: ----------- K0, ndarray(M+1, M+1), time-lagged correlation matrix for the whitened and padded data set. Returns: -------- u : ndarray(M,) coefficients of the ratio stationary / empirical dist. from the whitened and expanded basis.
def load_data(self, path): """Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file """ path = pathlib.Path(path).resolve() # Get metadata meta = {} with path.open() as fd: while True: line = fd.readline().strip() if line.startswith("# - "): line = line.strip("#- ") var, val = line.split(":") if val.strip().replace(".", "").isdigit(): # channel width, flow rate, viscosity val = float(val) else: # columns, calculation val = val.strip().lower() meta[var.strip()] = val elif line and not line.startswith("#"): break assert meta["column 1"] in dfn.scalar_feature_names assert meta["column 2"] in dfn.scalar_feature_names assert meta["column 3"] == "emodulus" assert meta["method"] in VALID_METHODS # Load isoelasics with path.open("rb") as isfd: isodata = np.loadtxt(isfd) # Slice out individual isoelastics emoduli = np.unique(isodata[:, 2]) isoel = [] for emod in emoduli: where = isodata[:, 2] == emod isoel.append(isodata[where]) # Add isoelastics to instance self.add(isoel=isoel, col1=meta["column 1"], col2=meta["column 2"], channel_width=meta["channel width [um]"], flow_rate=meta["flow rate [ul/s]"], viscosity=meta["viscosity [mPa*s]"], method=meta["method"])
Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file
def is_unicode_string(string): """ Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool """ if string is None: return None if PY2: return isinstance(string, unicode) return isinstance(string, str)
Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool
def main(): """ Generate sequences.""" parser = OptionParser(conflict_handler="resolve") parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)') parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)') parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)') parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)') parser.add_option('--VDJ_model_folder', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model') parser.add_option('--VJ_model_folder', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model') parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences to PATH/TO/FILE') parser.add_option('-n', '--num_seqs', type='float', metavar='N', default = 0, dest='num_seqs_to_generate', help='specify the number of sequences to generate.') parser.add_option('--seed', type='int', dest='seed', help='set seed for pseudorandom number generator. Default is to not set a seed.') parser.add_option('--seqs_per_time_update', type='float', default = 100000, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5') parser.add_option('--conserved_J_residues', type='string', default = 'FVW', dest='conserved_J_residues', help="specify conserved J residues. Default is 'FVW'.") parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off.') parser.add_option('--seq_type', type='choice', default = 'all', dest='seq_type', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'") parser.add_option('--record_genes_off', action='store_false', dest="record_genes", default=True, help='turn off recording V and J gene info.') parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare delimiter choice as a raw string.") (options, args) = parser.parse_args() main_folder = os.path.dirname(__file__) default_models = {} default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ'] default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ'] default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ'] default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ'] num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)]) if num_models_specified == 1: #exactly one model specified try: d_model = [x for x in default_models.keys() if getattr(options, x)][0] model_folder = default_models[d_model][0] recomb_type = default_models[d_model][1] except IndexError: if options.vdj_model_folder: #custom VDJ model specified model_folder = options.vdj_model_folder recomb_type = 'VDJ' elif options.vj_model_folder: #custom VJ model specified model_folder = options.vj_model_folder recomb_type = 'VJ' elif num_models_specified == 0: print 'Need to indicate generative model.' print 'Exiting...' return -1 elif num_models_specified > 1: print 'Only specify one model' print 'Exiting...' return -1 #Check that all model and genomic files exist in the indicated model folder if not os.path.isdir(model_folder): print 'Check pathing... cannot find the model folder: ' + model_folder print 'Exiting...' return -1 params_file_name = os.path.join(model_folder,'model_params.txt') marginals_file_name = os.path.join(model_folder,'model_marginals.txt') V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv') J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv') for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]: if not os.path.isfile(x): print 'Cannot find: ' + x print 'Please check the files (and naming conventions) in the model folder ' + model_folder print 'Exiting...' return -1 if options.outfile_name is not None: outfile_name = options.outfile_name if os.path.isfile(outfile_name): if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']: print 'Exiting...' return -1 #Parse arguments num_seqs_to_generate = int(options.num_seqs_to_generate) if num_seqs_to_generate <= 0: print 'Need to specify num_seqs (number of sequences to generate).' print 'Exiting...' return -1 #Parse default delimiter delimiter = options.delimiter if delimiter is None: delimiter = '\t' if options.outfile_name is not None: if outfile_name.endswith('.tsv'): delimiter = '\t' elif outfile_name.endswith('.csv'): delimiter = ',' else: try: delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter] except KeyError: pass #Other raw string. #Optional flags seq_type = {'all': 'all', 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[options.seq_type] record_genes = options.record_genes seqs_per_time_update = int(options.seqs_per_time_update) time_updates = options.time_updates conserved_J_residues = options.conserved_J_residues if options.seed is not None: np.random.seed(options.seed) #VDJ recomb case --- used for TCRB and IGH if recomb_type == 'VDJ': genomic_data = load_model.GenomicDataVDJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVDJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVDJ(generative_model, genomic_data) #VJ recomb case --- used for TCRA and light chain elif recomb_type == 'VJ': genomic_data = load_model.GenomicDataVJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVJ(generative_model, genomic_data) V_gene_names = [V[0].split('*')[0] for V in genomic_data.genV] J_gene_names = [J[0].split('*')[0] for J in genomic_data.genJ] if options.outfile_name is not None: outfile = open(outfile_name, 'w') print 'Starting sequence generation... ' start_time = time.time() for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] outfile.write(current_line_out + '\n') if (i+1)%seqs_per_time_update == 0 and time_updates: c_time = time.time() - start_time eta = ((num_seqs_to_generate - (i+1))/float(i+1))*c_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) if eta > 86400: #more than a day eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 3600: #more than an hr eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 60: #more than a min eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60) else: eta_str = '%.2f seconds.'%(eta) print '%d sequences generated in %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str) c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed generating all %d sequences in %s'%(num_seqs_to_generate, c_time_str) outfile.close() else: #print to stdout for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] print current_line_out
Generate sequences.
def flag_all(self, thresh_dict=None, include=None, exclude=None): ''' Returns indices of (rows, columns) that satisfy flag() on any diagnostic. Uses user-provided thresholds in thresh_dict/ Args: thresh_dict (dict): dictionary of diagnostic->threshold functions include (list): optional sublist of diagnostics to flag exclude (list): optional sublist of diagnostics to not flag ''' if thresh_dict is None: thresh_dict = {} row_idx = set() col_idx = set() include = self.results if include is None else include include = list( set(include) - set(exclude)) if exclude is not None else include for diagnostic in include: if diagnostic in thresh_dict: flagged = self.flag(diagnostic, thresh_dict[diagnostic]) else: flagged = self.flag(diagnostic) if diagnostic == 'RowMahalanobisDistances': row_idx = row_idx.union(flagged) else: col_idx = col_idx.union(flagged) return sorted(list(row_idx)), sorted(list(col_idx))
Returns indices of (rows, columns) that satisfy flag() on any diagnostic. Uses user-provided thresholds in thresh_dict/ Args: thresh_dict (dict): dictionary of diagnostic->threshold functions include (list): optional sublist of diagnostics to flag exclude (list): optional sublist of diagnostics to not flag
def create_attributes(klass, attributes, previous_object=None): """Attributes for space creation.""" if previous_object is not None: return {'name': attributes.get('name', previous_object.name)} return { 'name': attributes.get('name', ''), 'defaultLocale': attributes['default_locale'] }
Attributes for space creation.
def merge_parts(self, version_id=None, **kwargs): """Merge parts into object version.""" self.file.update_checksum(**kwargs) with db.session.begin_nested(): obj = ObjectVersion.create( self.bucket, self.key, _file_id=self.file_id, version_id=version_id ) self.delete() return obj
Merge parts into object version.
def whitespace_around_comma(logical_line): r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2) """ line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = m.start() + 1 if '\t' in m.group(): yield found, "E242 tab after '%s'" % m.group()[0] else: yield found, "E241 multiple spaces after '%s'" % m.group()[0]
r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2)
def _remove(self, client_kwargs): """ Remove an object. args: client_kwargs (dict): Client arguments. """ with _handle_client_error(): # Object if 'Key' in client_kwargs: return self.client.delete_object(**client_kwargs) # Bucket return self.client.delete_bucket(Bucket=client_kwargs['Bucket'])
Remove an object. args: client_kwargs (dict): Client arguments.
def read_hyperparameters(): # type: () -> dict """Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters. """ hyperparameters = _read_json(hyperparameters_file_dir) deserialized_hps = {} for k, v in hyperparameters.items(): try: v = json.loads(v) except (ValueError, TypeError): logger.info("Failed to parse hyperparameter %s value %s to Json.\n" "Returning the value itself", k, v) deserialized_hps[k] = v return deserialized_hps
Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters.
def get_transformed_feature_indices(features, stats): """Returns information about the transformed features. Returns: List in the from [(transformed_feature_name, {size: int, index_start: int})] """ feature_indices = [] index_start = 1 for name, transform in sorted(six.iteritems(features)): transform_name = transform['transform'] source_column = transform['source_column'] info = {} if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]: info['size'] = 1 elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]: info['size'] = stats['column_stats'][source_column]['vocab_size'] elif transform_name == IMAGE_TRANSFORM: info['size'] = IMAGE_BOTTLENECK_TENSOR_SIZE elif transform_name == TARGET_TRANSFORM: info['size'] = 0 else: raise ValueError('xgboost does not support transform "%s"' % transform) info['index_start'] = index_start index_start += info['size'] feature_indices.append((name, info)) return feature_indices
Returns information about the transformed features. Returns: List in the from [(transformed_feature_name, {size: int, index_start: int})]
def _find_bck(self, chunk): """ Simply finds the free chunk that would be the backwards chunk relative to the chunk at ptr. Hence, the free head and all other metadata are unaltered by this function. """ cur = self.free_head_chunk if cur is None: return None fwd = cur.fwd_chunk() if cur == fwd: return cur # At this point there should be at least two free chunks in the heap if cur < chunk: while cur < fwd < chunk: cur = fwd fwd = cur.fwd_chunk() return cur else: while fwd != self.free_head_chunk: cur = fwd fwd = cur.fwd_chunk() return cur
Simply finds the free chunk that would be the backwards chunk relative to the chunk at ptr. Hence, the free head and all other metadata are unaltered by this function.
def file_rows(self, fo): """Return the lines in the file as a list. fo is the open file object.""" rows = [] for i in range(NUMROWS): line = fo.readline() if not line: break rows += [line] return rows
Return the lines in the file as a list. fo is the open file object.
def flair(self, r, name, text, css_class): """Login required. Sets flair for a user. See https://github.com/reddit/reddit/wiki/API%3A-flair. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response. URL: ``http://www.reddit.com/api/flair`` :param r: name of subreddit :param name: name of the user :param text: flair text to assign :param css_class: CSS class to assign to flair text """ data = dict(r=r, name=name, text=text, css_class=css_class) j = self.post('api', 'flair', data=data) return assert_truthy(j)
Login required. Sets flair for a user. See https://github.com/reddit/reddit/wiki/API%3A-flair. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response. URL: ``http://www.reddit.com/api/flair`` :param r: name of subreddit :param name: name of the user :param text: flair text to assign :param css_class: CSS class to assign to flair text