code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def parse(file_contents, file_name): """ This takes a list of filenames and their paths of expected yaml files and tried to parse them, erroring if there are any parsing issues. Args: file_contents (str): Contents of a yml file Raises: yaml.parser.ParserError: Raises an error if the file contents cannot be parsed and interpreted as yaml """ try: yaml.load(file_contents) except Exception: _, exc_value, _ = sys.exc_info() return("Cannot Parse: {file_name}: \n {exc_value}" .format(file_name=file_name, exc_value=exc_value))
This takes a list of filenames and their paths of expected yaml files and tried to parse them, erroring if there are any parsing issues. Args: file_contents (str): Contents of a yml file Raises: yaml.parser.ParserError: Raises an error if the file contents cannot be parsed and interpreted as yaml
def validate_owner_repo_package(ctx, param, value): """Ensure that owner/repo/package is formatted correctly.""" # pylint: disable=unused-argument form = "OWNER/REPO/PACKAGE" return validate_slashes(param, value, minimum=3, maximum=3, form=form)
Ensure that owner/repo/package is formatted correctly.
def other_object_webhook_handler(event): """Handle updates to transfer, charge, invoice, invoiceitem, plan, product and source objects. Docs for: - charge: https://stripe.com/docs/api#charges - coupon: https://stripe.com/docs/api#coupons - invoice: https://stripe.com/docs/api#invoices - invoiceitem: https://stripe.com/docs/api#invoiceitems - plan: https://stripe.com/docs/api#plans - product: https://stripe.com/docs/api#products - source: https://stripe.com/docs/api#sources """ if event.parts[:2] == ["charge", "dispute"]: # Do not attempt to handle charge.dispute.* events. # We do not have a Dispute model yet. target_cls = models.Dispute else: target_cls = { "charge": models.Charge, "coupon": models.Coupon, "invoice": models.Invoice, "invoiceitem": models.InvoiceItem, "plan": models.Plan, "product": models.Product, "transfer": models.Transfer, "source": models.Source, }.get(event.category) _handle_crud_like_event(target_cls=target_cls, event=event)
Handle updates to transfer, charge, invoice, invoiceitem, plan, product and source objects. Docs for: - charge: https://stripe.com/docs/api#charges - coupon: https://stripe.com/docs/api#coupons - invoice: https://stripe.com/docs/api#invoices - invoiceitem: https://stripe.com/docs/api#invoiceitems - plan: https://stripe.com/docs/api#plans - product: https://stripe.com/docs/api#products - source: https://stripe.com/docs/api#sources
def freeze(self, dest_dir): """Freezes every resource within a context""" for resource in self.resources(): if resource.present: resource.freeze(dest_dir)
Freezes every resource within a context
def ungeometrize_stops(geo_stops: DataFrame) -> DataFrame: """ The inverse of :func:`geometrize_stops`. Parameters ---------- geo_stops : GeoPandas GeoDataFrame Looks like a GTFS stops table, but has a ``'geometry'`` column of Shapely Point objects that replaces the ``'stop_lon'`` and ``'stop_lat'`` columns. Returns ------- DataFrame A GTFS stops table where the ``'stop_lon'`` and ``'stop_lat'`` columns are derived from the points in the given GeoDataFrame and are in WGS84 coordinates regardless of the coordinate reference system of the given GeoDataFrame. """ f = geo_stops.copy().to_crs(cs.WGS84) f["stop_lon"], f["stop_lat"] = zip( *f["geometry"].map(lambda p: [p.x, p.y]) ) del f["geometry"] return f
The inverse of :func:`geometrize_stops`. Parameters ---------- geo_stops : GeoPandas GeoDataFrame Looks like a GTFS stops table, but has a ``'geometry'`` column of Shapely Point objects that replaces the ``'stop_lon'`` and ``'stop_lat'`` columns. Returns ------- DataFrame A GTFS stops table where the ``'stop_lon'`` and ``'stop_lat'`` columns are derived from the points in the given GeoDataFrame and are in WGS84 coordinates regardless of the coordinate reference system of the given GeoDataFrame.
async def current_position( self, mount: top_types.Mount, critical_point: CriticalPoint = None) -> Dict[Axis, float]: """ Return the postion (in deck coords) of the critical point of the specified mount. This returns cached position to avoid hitting the smoothie driver unless ``refresh`` is ``True``. If `critical_point` is specified, that critical point will be applied instead of the default one. For instance, if `critical_point=CriticalPoints.MOUNT` then the position of the mount will be returned. If the critical point specified does not exist, then the next one down is returned - for instance, if there is no tip on the specified mount but `CriticalPoint.TIP` was specified, the position of the nozzle will be returned. """ if not self._current_position: raise MustHomeError async with self._motion_lock: if mount == mount.RIGHT: offset = top_types.Point(0, 0, 0) else: offset = top_types.Point(*self.config.mount_offset) z_ax = Axis.by_mount(mount) plunger_ax = Axis.of_plunger(mount) cp = self._critical_point_for(mount, critical_point) return { Axis.X: self._current_position[Axis.X] + offset[0] + cp.x, Axis.Y: self._current_position[Axis.Y] + offset[1] + cp.y, z_ax: self._current_position[z_ax] + offset[2] + cp.z, plunger_ax: self._current_position[plunger_ax] }
Return the postion (in deck coords) of the critical point of the specified mount. This returns cached position to avoid hitting the smoothie driver unless ``refresh`` is ``True``. If `critical_point` is specified, that critical point will be applied instead of the default one. For instance, if `critical_point=CriticalPoints.MOUNT` then the position of the mount will be returned. If the critical point specified does not exist, then the next one down is returned - for instance, if there is no tip on the specified mount but `CriticalPoint.TIP` was specified, the position of the nozzle will be returned.
def update(self, arg, allow_overwrite=False): """ Update our providers from either an ICatalog subclass/instance or a mapping. If arg is an ICatalog, we update from it's ._providers attribute. :param arg: Di/Catalog/Mapping to update from. :type arg: ICatalog or collections.Mapping :param allow_overwrite: If True, allow overwriting existing keys :type allow_overwrite: bool """ # If arg happens to be an ICatalog subclass if inspect.isclass(arg) and issubclass(arg, ICatalog) or isinstance(arg, ICatalog): arg = arg._providers if not allow_overwrite: for key in arg: if key in self._providers: raise KeyError("Key %s already exists" % key) super(ProviderMapping, self).update(arg)
Update our providers from either an ICatalog subclass/instance or a mapping. If arg is an ICatalog, we update from it's ._providers attribute. :param arg: Di/Catalog/Mapping to update from. :type arg: ICatalog or collections.Mapping :param allow_overwrite: If True, allow overwriting existing keys :type allow_overwrite: bool
def calc_effective_permeability(self, inlets=None, outlets=None, domain_area=None, domain_length=None): r""" This calculates the effective permeability in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes. """ phase = self.project.phases()[self.settings['phase']] d_normal = self._calc_eff_prop(inlets=inlets, outlets=outlets, domain_area=domain_area, domain_length=domain_length) K = d_normal * sp.mean(phase['pore.viscosity']) return K
r""" This calculates the effective permeability in this linear transport algorithm. Parameters ---------- inlets : array_like The pores where the inlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. outlets : array_like The pores where the outlet pressure boundary conditions were applied. If not given an attempt is made to infer them from the algorithm. domain_area : scalar, optional The area of the inlet (and outlet) boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. domain_length : scalar, optional The length of the domain between the inlet and outlet boundary faces. If not given then an attempt is made to estimate it, but it is usually underestimated. Notes ----- The area and length of the domain are found using the bounding box around the inlet and outlet pores which do not necessarily lie on the edge of the domain, resulting in underestimation of sizes.
def _inspect(self,obj,objtype=None): """Return the last generated value for this parameter.""" gen=super(Dynamic,self).__get__(obj,objtype) if hasattr(gen,'_Dynamic_last'): return gen._Dynamic_last else: return gen
Return the last generated value for this parameter.
def generate_data_for_edit_page(self): """ Generate a custom representation of table's fields in dictionary type if exist edit form else use default representation. :return: dict """ if not self.can_edit: return {} if self.edit_form: return self.edit_form.to_dict() return self.generate_simple_data_page()
Generate a custom representation of table's fields in dictionary type if exist edit form else use default representation. :return: dict
def get_relationship_mdata(): """Return default mdata map for Relationship""" return { 'source': { 'element_label': { 'text': 'source', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, 'destination': { 'element_label': { 'text': 'destination', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, }
Return default mdata map for Relationship
def create_tx(network, spendables, payables, fee="standard", lock_time=0, version=1): """ This function provides the easiest way to create an unsigned transaction. All coin values are in satoshis. :param spendables: a list of Spendable objects, which act as inputs. Each item in the list can be a Spendable, or text from Spendable.as_text, or a dictionary from Spendable.as_dict (which might be easier for airgapped transactions, for example). :param payables: a list where each entry is a address, or a tuple of (address, coin_value). If the coin_value is missing or zero, this address is thrown into the "split pool". Funds not explicitly claimed by the fee or an address are shared as equally as possible among the split pool. All coins are consumed: if the amount to be split does not divide evenly, some of the earlier addresses will get an extra satoshi. :param fee: an integer, or the (deprecated) string "standard" for it to be calculated :param version: (optional) the version to use in the transaction. Defaults to 1. :param lock_time: (optional) the lock_time to use in the transaction. Defaults to 0. :return: :class:`Tx <Tx>` object, with unspents populated :rtype: pycoin.tx.Tx.Tx Usage:: >>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH") >>> tx = create_tx(network, spendables, ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"], fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never). """ Tx = network.tx def _fix_spendable(s): if isinstance(s, Tx.Spendable): return s if not hasattr(s, "keys"): return Tx.Spendable.from_text(s) return Tx.Spendable.from_dict(s) spendables = [_fix_spendable(s) for s in spendables] txs_in = [spendable.tx_in() for spendable in spendables] txs_out = [] for payable in payables: if len(payable) == 2: address, coin_value = payable else: address = payable coin_value = 0 script = network.contract.for_address(address) txs_out.append(Tx.TxOut(coin_value, script)) tx = Tx(version=version, txs_in=txs_in, txs_out=txs_out, lock_time=lock_time) tx.set_unspents(spendables) distribute_from_split_pool(tx, fee) return tx
This function provides the easiest way to create an unsigned transaction. All coin values are in satoshis. :param spendables: a list of Spendable objects, which act as inputs. Each item in the list can be a Spendable, or text from Spendable.as_text, or a dictionary from Spendable.as_dict (which might be easier for airgapped transactions, for example). :param payables: a list where each entry is a address, or a tuple of (address, coin_value). If the coin_value is missing or zero, this address is thrown into the "split pool". Funds not explicitly claimed by the fee or an address are shared as equally as possible among the split pool. All coins are consumed: if the amount to be split does not divide evenly, some of the earlier addresses will get an extra satoshi. :param fee: an integer, or the (deprecated) string "standard" for it to be calculated :param version: (optional) the version to use in the transaction. Defaults to 1. :param lock_time: (optional) the lock_time to use in the transaction. Defaults to 0. :return: :class:`Tx <Tx>` object, with unspents populated :rtype: pycoin.tx.Tx.Tx Usage:: >>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH") >>> tx = create_tx(network, spendables, ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"], fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never).
def add(config, username, filename): """Add user's SSH public key to their LDAP entry.""" try: client = Client() client.prepare_connection() user_api = UserApi(client) key_api = API(client) key_api.add(username, user_api, filename) except (ldap3.core.exceptions.LDAPNoSuchAttributeResult, ldap_tools.exceptions.InvalidResult, ldap3.core.exceptions.LDAPAttributeOrValueExistsResult ) as err: # pragma: no cover print('{}: {}'.format(type(err), err.args[0])) except Exception as err: # pragma: no cover raise err from None
Add user's SSH public key to their LDAP entry.
def map_hmms(input_model, mapping): """Create a new HTK HMM model given a model and a mapping dictionary. :param input_model: The model to transform of type dict :param mapping: A dictionary from string -> list(string) :return: The transformed model of type dict """ output_model = copy.copy(input_model) o_hmms = [] for i_hmm in input_model['hmms']: i_hmm_name = i_hmm['name'] o_hmm_names = mapping.get(i_hmm_name, [i_hmm_name]) for o_hmm_name in o_hmm_names: o_hmm = copy.copy(i_hmm) o_hmm['name'] = o_hmm_name o_hmms.append(o_hmm) output_model['hmms'] = o_hmms return output_model
Create a new HTK HMM model given a model and a mapping dictionary. :param input_model: The model to transform of type dict :param mapping: A dictionary from string -> list(string) :return: The transformed model of type dict
def var_window_closed(self, widget): """ Called if user clicked close button on var window :param widget: :return: """ # TODO - Clean up the menu handling stuff its a bit spagetti right now self.action_group.get_action('vars').set_active(False) self.show_vars = False self.var_window = None
Called if user clicked close button on var window :param widget: :return:
def addvlan(self, vlanid, vlan_name): """ Function operates on the IMCDev object. Takes input of vlanid (1-4094), str of vlan_name, auth and url to execute the create_dev_vlan method on the IMCDev object. Device must be supported in the HPE IMC Platform VLAN Manager module. :param vlanid: str of VLANId ( valid 1-4094 ) :param vlan_name: str of vlan_name :return: """ create_dev_vlan( vlanid, vlan_name, self.auth, self.url, devid = self.devid)
Function operates on the IMCDev object. Takes input of vlanid (1-4094), str of vlan_name, auth and url to execute the create_dev_vlan method on the IMCDev object. Device must be supported in the HPE IMC Platform VLAN Manager module. :param vlanid: str of VLANId ( valid 1-4094 ) :param vlan_name: str of vlan_name :return:
def get_regulate_amounts(self): """Extract INDRA RegulateAmount Statements from the BioPAX model. This method extracts IncreaseAmount/DecreaseAmount Statements from the BioPAX model. It fully reuses BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.controlsExpressionWithTemplateReac pattern to find TemplateReactions which control the expression of a protein. """ p = pb.controlsExpressionWithTemplateReac() s = _bpp('Searcher') res = s.searchPlain(self.model, p) res_array = [_match_to_array(m) for m in res.toArray()] stmts = [] for res in res_array: # FIXME: for some reason labels are not accessible # for these queries. It would be more reliable # to get results by label instead of index. ''' controller_er = res[p.indexOf('controller ER')] generic_controller_er = res[p.indexOf('generic controller ER')] controller_simple_pe = res[p.indexOf('controller simple PE')] controller_pe = res[p.indexOf('controller PE')] control = res[p.indexOf('Control')] conversion = res[p.indexOf('Conversion')] input_pe = res[p.indexOf('input PE')] input_simple_pe = res[p.indexOf('input simple PE')] changed_generic_er = res[p.indexOf('changed generic ER')] output_pe = res[p.indexOf('output PE')] output_simple_pe = res[p.indexOf('output simple PE')] changed_er = res[p.indexOf('changed ER')] ''' # TODO: here, res[3] is the complex physical entity # for instance http://pathwaycommons.org/pc2/ # Complex_43c6b8330562c1b411d21e9d1185bae9 # consists of 3 components: JUN, FOS and NFAT # where NFAT further contains 3 member physical entities. # # However, res[2] iterates over all 5 member physical entities # of the complex which doesn't represent the underlying # structure faithfully. It would be better to use res[3] # (the complex itself) and look at components and then # members. However, then, it would not be clear how to # construct an INDRA Agent for the controller. controller = self._get_agents_from_entity(res[2]) controlled_pe = res[6] controlled = self._get_agents_from_entity(controlled_pe) conversion = res[5] direction = conversion.getTemplateDirection() if direction is not None: direction = direction.name() if direction != 'FORWARD': logger.warning('Unhandled conversion direction %s' % direction) continue # Sometimes interaction type is annotated as # term=='TRANSCRIPTION'. Other times this is not # annotated. int_type = conversion.getInteractionType().toArray() if int_type: for it in int_type: for term in it.getTerm().toArray(): pass control = res[4] control_type = control.getControlType() if control_type: control_type = control_type.name() ev = self._get_evidence(control) for subj, obj in itertools.product(_listify(controller), _listify(controlled)): subj_act = ActivityCondition('transcription', True) subj.activity = subj_act if control_type == 'ACTIVATION': st = IncreaseAmount(subj, obj, evidence=ev) elif control_type == 'INHIBITION': st = DecreaseAmount(subj, obj, evidence=ev) else: logger.warning('Unhandled control type %s' % control_type) continue st_dec = decode_obj(st, encoding='utf-8') self.statements.append(st_dec)
Extract INDRA RegulateAmount Statements from the BioPAX model. This method extracts IncreaseAmount/DecreaseAmount Statements from the BioPAX model. It fully reuses BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.controlsExpressionWithTemplateReac pattern to find TemplateReactions which control the expression of a protein.
def is_full(self): """Return whether the activity is full.""" capacity = self.get_true_capacity() if capacity != -1: num_signed_up = self.eighthsignup_set.count() return num_signed_up >= capacity return False
Return whether the activity is full.
def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux, err_flux, priorbounds): ''' Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta. ''' u = [] for ix, key in enumerate(sorted(priorbounds.keys())): if key == 'rp': params.rp = theta[ix] elif key == 't0': params.t0 = theta[ix] elif key == 'sma': params.a = theta[ix] elif key == 'incl': params.inc = theta[ix] elif key == 'period': params.per = theta[ix] elif key == 'ecc': params.per = theta[ix] elif key == 'omega': params.w = theta[ix] elif key == 'u_linear': u.append(theta[ix]) elif key == 'u_quadratic': u.append(theta[ix]) params.u = u elif key == 'poly_order0': poly_order0 = theta[ix] elif key == 'poly_order1': poly_order1 = theta[ix] try: poly_order0 except Exception as e: poly_order0 = 0 else: pass transit = model.light_curve(params) line = poly_order0 + t*poly_order1 model = transit + line residuals = data_flux - model log_likelihood = -0.5*( np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2)) ) return log_likelihood
Given a batman TransitModel and its proposed parameters (theta), update the batman params object with the proposed parameters and evaluate the gaussian likelihood. Note: the priorbounds are only needed to parse theta.
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True): """Execute autorg. Inputs: filename: either a name of an ascii file, or an instance of Curve. mininterval: the minimum number of points in the Guinier range qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0 qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3 noprint: if the output of autorg should be redirected to the null device. Outputs: Rg as an ErrorValue I0 as an ErrorValue qmin: the lower end of the chosen Guinier range qmax: the upper end of the chosen Guinier range quality: the quality parameter, between 0 and 1 aggregation: float, the extent of aggregation """ if isinstance(filename, Curve): curve = filename with tempfile.NamedTemporaryFile('w+b', delete=False) as f: curve.save(f) filename = f.name cmdline = ['autorg', filename, '-f', 'ssv'] if mininterval is not None: cmdline.extend(['--mininterval', str(mininterval)]) if qminrg is not None: cmdline.extend(['--sminrg', str(qminrg)]) if qmaxrg is not None: cmdline.extend(['--smaxrg', str(qmaxrg)]) result = execute_command(cmdline, noprint=noprint) Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8) try: curve except NameError: curve = Curve.new_from_file(filename) else: os.unlink(filename) return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[ int(idxlast) - 1], float(quality), float(aggregation)
Execute autorg. Inputs: filename: either a name of an ascii file, or an instance of Curve. mininterval: the minimum number of points in the Guinier range qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0 qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3 noprint: if the output of autorg should be redirected to the null device. Outputs: Rg as an ErrorValue I0 as an ErrorValue qmin: the lower end of the chosen Guinier range qmax: the upper end of the chosen Guinier range quality: the quality parameter, between 0 and 1 aggregation: float, the extent of aggregation
def try_unbuffered_file(file, _alreadyopen={}): """ Try re-opening a file in an unbuffered mode and return it. If that fails, just return the original file. This function remembers the file descriptors it opens, so it never opens the same one twice. This is meant for files like sys.stdout or sys.stderr. """ try: fileno = file.fileno() except (AttributeError, UnsupportedOperation): # Unable to use fileno to re-open unbuffered. Oh well. # The output may be line buffered, which isn't that great for # repeatedly drawing and erasing text, or hiding/showing the cursor. return file filedesc = _alreadyopen.get(fileno, None) if filedesc is not None: return filedesc filedesc = fdopen(fileno, 'wb', 0) _alreadyopen[fileno] = filedesc # TODO: sys.stdout/stderr don't need to be closed. # But would it be worth it to try and close these opened files? return filedesc
Try re-opening a file in an unbuffered mode and return it. If that fails, just return the original file. This function remembers the file descriptors it opens, so it never opens the same one twice. This is meant for files like sys.stdout or sys.stderr.
def scatter(h1: Histogram1D, ax: Axes, *, errors: bool = False, **kwargs): """Scatter plot of 1D histogram.""" show_stats = kwargs.pop("show_stats", False) show_values = kwargs.pop("show_values", False) density = kwargs.pop("density", False) cumulative = kwargs.pop("cumulative", False) value_format = kwargs.pop("value_format", None) text_kwargs = pop_kwargs_with_prefix("text_", kwargs) data = get_data(h1, cumulative=cumulative, density=density) if "cmap" in kwargs: cmap = _get_cmap(kwargs) _, cmap_data = _get_cmap_data(data, kwargs) kwargs["color"] = cmap(cmap_data) else: kwargs["color"] = kwargs.pop("color", "blue") _apply_xy_lims(ax, h1, data, kwargs) _add_ticks(ax, h1, kwargs) _add_labels(ax, h1, kwargs) if errors: err_data = get_err_data(h1, cumulative=cumulative, density=density) ax.errorbar(h1.bin_centers, data, yerr=err_data, fmt=kwargs.pop("fmt", "o"), ecolor=kwargs.pop("ecolor", "black"), ms=0) ax.scatter(h1.bin_centers, data, **kwargs) if show_values: _add_values(ax, h1, data, value_format=value_format, **text_kwargs) if show_stats: _add_stats_box(h1, ax, stats=show_stats)
Scatter plot of 1D histogram.
def _get_serializer(self, _type): """Gets a serializer for a particular type. For primitives, returns the serializer from the module-level serializers. For arrays and objects, uses the special _get_T_serializer methods to build the encoders and decoders. """ if _type in _serializers: # _serializers is module level return _serializers[_type] # array and object are special types elif _type == 'array': return self._get_array_serializer() elif _type == 'object': return self._get_object_serializer() raise ValueError('Unknown type: {}'.format(_type))
Gets a serializer for a particular type. For primitives, returns the serializer from the module-level serializers. For arrays and objects, uses the special _get_T_serializer methods to build the encoders and decoders.
def must_be_same(self, klass): """Called to make sure a Node is a Dir. Since we're an Entry, we can morph into one.""" if self.__class__ is not klass: self.__class__ = klass self._morph() self.clear()
Called to make sure a Node is a Dir. Since we're an Entry, we can morph into one.
def from_pandas(cls, index): """Create baloo Index from pandas Index. Parameters ---------- index : pandas.base.Index Returns ------- Index """ from pandas import Index as PandasIndex check_type(index, PandasIndex) return Index(index.values, index.dtype, index.name)
Create baloo Index from pandas Index. Parameters ---------- index : pandas.base.Index Returns ------- Index
def pay(self, transactionAmount, senderTokenId, recipientTokenId=None, callerTokenId=None, chargeFeeTo="Recipient", callerReference=None, senderReference=None, recipientReference=None, senderDescription=None, recipientDescription=None, callerDescription=None, metadata=None, transactionDate=None, reserve=False): """ Make a payment transaction. You must specify the amount. This can also perform a Reserve request if 'reserve' is set to True. """ params = {} params['SenderTokenId'] = senderTokenId # this is for 2008-09-17 specification params['TransactionAmount.Amount'] = str(transactionAmount) params['TransactionAmount.CurrencyCode'] = "USD" #params['TransactionAmount'] = str(transactionAmount) params['ChargeFeeTo'] = chargeFeeTo params['RecipientTokenId'] = ( recipientTokenId if recipientTokenId is not None else boto.config.get("FPS", "recipient_token") ) params['CallerTokenId'] = ( callerTokenId if callerTokenId is not None else boto.config.get("FPS", "caller_token") ) if(transactionDate != None): params['TransactionDate'] = transactionDate if(senderReference != None): params['SenderReference'] = senderReference if(recipientReference != None): params['RecipientReference'] = recipientReference if(senderDescription != None): params['SenderDescription'] = senderDescription if(recipientDescription != None): params['RecipientDescription'] = recipientDescription if(callerDescription != None): params['CallerDescription'] = callerDescription if(metadata != None): params['MetaData'] = metadata if(callerReference == None): callerReference = uuid.uuid4() params['CallerReference'] = callerReference if reserve: response = self.make_request("Reserve", params) else: response = self.make_request("Pay", params) body = response.read() if(response.status == 200): rs = ResultSet() h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise FPSResponseError(response.status, response.reason, body)
Make a payment transaction. You must specify the amount. This can also perform a Reserve request if 'reserve' is set to True.
def format_dates(self, data, columns): """ This method translates columns values into datetime objects :param data: original Pandas dataframe :param columns: list of columns to cast the date to a datetime object :type data: pandas.DataFrame :type columns: list of strings :returns: Pandas dataframe with updated 'columns' with datetime objects :rtype: pandas.DataFrame """ for column in columns: if column in data.columns: data[column] = pandas.to_datetime(data[column]) return data
This method translates columns values into datetime objects :param data: original Pandas dataframe :param columns: list of columns to cast the date to a datetime object :type data: pandas.DataFrame :type columns: list of strings :returns: Pandas dataframe with updated 'columns' with datetime objects :rtype: pandas.DataFrame
def qubits(self): """Return a list of qubits as (QuantumRegister, index) pairs.""" return [(v, i) for k, v in self.qregs.items() for i in range(v.size)]
Return a list of qubits as (QuantumRegister, index) pairs.
def path_valid(self): """ Returns ---------- path_valid: (n,) bool, indexes of self.paths self.polygons_closed which are valid polygons """ valid = [i is not None for i in self.polygons_closed] valid = np.array(valid, dtype=np.bool) return valid
Returns ---------- path_valid: (n,) bool, indexes of self.paths self.polygons_closed which are valid polygons
def bbox(self): """(left, top, right, bottom) tuple.""" if not hasattr(self, '_bbox'): data = None for key in ('ARTBOARD_DATA1', 'ARTBOARD_DATA2', 'ARTBOARD_DATA3'): if key in self.tagged_blocks: data = self.tagged_blocks.get_data(key) assert data is not None rect = data.get(b'artboardRect') self._bbox = ( int(rect.get(b'Left')), int(rect.get(b'Top ')), int(rect.get(b'Rght')), int(rect.get(b'Btom')), ) return self._bbox
(left, top, right, bottom) tuple.
def closest_pair(arr, give="indicies"): """Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]] """ idxs = [idx for idx in np.ndindex(arr.shape)] outs = [] min_dist = arr.max() - arr.min() for idxa in idxs: for idxb in idxs: if idxa == idxb: continue dist = abs(arr[idxa] - arr[idxb]) if dist == min_dist: if not [idxb, idxa] in outs: outs.append([idxa, idxb]) elif dist < min_dist: min_dist = dist outs = [[idxa, idxb]] if give == "indicies": return outs elif give == "distance": return min_dist else: raise KeyError("give not recognized in closest_pair")
Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]]
def reflect_table(engine, klass): """Inspect and reflect objects""" try: meta = MetaData() meta.reflect(bind=engine) except OperationalError as e: raise DatabaseError(error=e.orig.args[1], code=e.orig.args[0]) # Try to reflect from any of the supported tables table = None for tb in klass.tables(): if tb in meta.tables: table = meta.tables[tb] break if table is None: raise DatabaseError(error="Invalid schema. Table not found", code="-1") # Map table schema into klass mapper(klass, table, column_prefix=klass.column_prefix()) return table
Inspect and reflect objects
def read_rows(self, rows, **keys): """ Read the specified rows. parameters ---------- rows: list,array A list or array of row indices. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction. """ if rows is None: # we actually want all rows! return self._read_all() if self._info['hdutype'] == ASCII_TBL: keys['rows'] = rows return self.read(**keys) rows = self._extract_rows(rows) dtype, offsets, isvar = self.get_rec_dtype(**keys) w, = numpy.where(isvar == True) # noqa if w.size > 0: vstorage = keys.get('vstorage', self._vstorage) colnums = self._extract_colnums() return self._read_rec_with_var( colnums, rows, dtype, offsets, isvar, vstorage) else: array = numpy.zeros(rows.size, dtype=dtype) self._FITS.read_rows_as_rec(self._ext+1, array, rows) array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array) for colnum, name in enumerate(array.dtype.names): self._rescale_and_convert_field_inplace( array, name, self._info['colinfo'][colnum]['tscale'], self._info['colinfo'][colnum]['tzero']) lower = keys.get('lower', False) upper = keys.get('upper', False) if self.lower or lower: _names_to_lower_if_recarray(array) elif self.upper or upper: _names_to_upper_if_recarray(array) self._maybe_trim_strings(array, **keys) return array
Read the specified rows. parameters ---------- rows: list,array A list or array of row indices. vstorage: string, optional Over-ride the default method to store variable length columns. Can be 'fixed' or 'object'. See docs on fitsio.FITS for details. lower: bool, optional If True, force all columns names to lower case in output. Will over ride the lower= keyword from construction. upper: bool, optional If True, force all columns names to upper case in output. Will over ride the lower= keyword from construction.
def create_logger(): """Initial the global logger variable""" global logger formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s') handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1) handler.setFormatter(formatter) handler.setLevel(log_level) handler.suffix = "%Y-%m-%d" logger = logging.getLogger("sacplus") logger.setLevel(log_level) logger.addHandler(handler)
Initial the global logger variable
def _parse_message(self, data): """ Parse the message from the device. :param data: message data :type data: string :raises: :py:class:`~alarmdecoder.util.InvalidMessageError` """ match = self._regex.match(str(data)) if match is None: raise InvalidMessageError('Received invalid message: {0}'.format(data)) header, self.bitfield, self.numeric_code, self.panel_data, alpha = match.group(1, 2, 3, 4, 5) is_bit_set = lambda bit: not self.bitfield[bit] == "0" self.ready = is_bit_set(1) self.armed_away = is_bit_set(2) self.armed_home = is_bit_set(3) self.backlight_on = is_bit_set(4) self.programming_mode = is_bit_set(5) self.beeps = int(self.bitfield[6], 16) self.zone_bypassed = is_bit_set(7) self.ac_power = is_bit_set(8) self.chime_on = is_bit_set(9) self.alarm_event_occurred = is_bit_set(10) self.alarm_sounding = is_bit_set(11) self.battery_low = is_bit_set(12) self.entry_delay_off = is_bit_set(13) self.fire_alarm = is_bit_set(14) self.check_zone = is_bit_set(15) self.perimeter_only = is_bit_set(16) self.system_fault = is_bit_set(17) if self.bitfield[18] in list(PANEL_TYPES): self.panel_type = PANEL_TYPES[self.bitfield[18]] # pos 20-21 - Unused. self.text = alpha.strip('"') self.mask = int(self.panel_data[3:3+8], 16) if self.panel_type in (ADEMCO, DSC): if int(self.panel_data[19:21], 16) & 0x01 > 0: # Current cursor location on the alpha display. self.cursor_location = int(self.panel_data[21:23], 16)
Parse the message from the device. :param data: message data :type data: string :raises: :py:class:`~alarmdecoder.util.InvalidMessageError`
def editor_interfaces(self): """ Provides access to editor interface management methods for the given content type. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface :return: :class:`ContentTypeEditorInterfacesProxy <contentful_management.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy>` object. :rtype: contentful.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy Usage: >>> content_type_editor_interfaces_proxy = content_type.editor_interfaces() <ContentTypeEditorInterfacesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat"> """ return ContentTypeEditorInterfacesProxy(self._client, self.space.id, self._environment_id, self.id)
Provides access to editor interface management methods for the given content type. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface :return: :class:`ContentTypeEditorInterfacesProxy <contentful_management.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy>` object. :rtype: contentful.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy Usage: >>> content_type_editor_interfaces_proxy = content_type.editor_interfaces() <ContentTypeEditorInterfacesProxy space_id="cfexampleapi" environment_id="master" content_type_id="cat">
def settimeout(self, timeout): """ Set the timeout to the websocket. timeout: timeout time(second). """ self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout)
Set the timeout to the websocket. timeout: timeout time(second).
def is_equal_to_ignoring_case(self, other): """Asserts that val is case-insensitive equal to other.""" if not isinstance(self.val, str_types): raise TypeError('val is not a string') if not isinstance(other, str_types): raise TypeError('given arg must be a string') if self.val.lower() != other.lower(): self._err('Expected <%s> to be case-insensitive equal to <%s>, but was not.' % (self.val, other)) return self
Asserts that val is case-insensitive equal to other.
def find_step_impl(self, step): """ Find the implementation of the step for the given match string. Returns the StepImpl object corresponding to the implementation, and the arguments to the step implementation. If no implementation is found, raises UndefinedStepImpl. If more than one implementation is found, raises AmbiguousStepImpl. Each of the arguments returned will have been transformed by the first matching transform implementation. """ result = None for si in self.steps[step.step_type]: matches = si.match(step.match) if matches: if result: raise AmbiguousStepImpl(step, result[0], si) args = [self._apply_transforms(arg, si) for arg in matches.groups()] result = si, args if not result: raise UndefinedStepImpl(step) return result
Find the implementation of the step for the given match string. Returns the StepImpl object corresponding to the implementation, and the arguments to the step implementation. If no implementation is found, raises UndefinedStepImpl. If more than one implementation is found, raises AmbiguousStepImpl. Each of the arguments returned will have been transformed by the first matching transform implementation.
def padDigitalData(self, dig_data, n): """Pad dig_data with its last element so that the new array is a multiple of n. """ n = int(n) l0 = len(dig_data) if l0 % n == 0: return dig_data # no need of padding else: ladd = n - (l0 % n) dig_data_add = np.zeros(ladd, dtype="uint32") dig_data_add.fill(dig_data[-1]) return np.concatenate((dig_data, dig_data_add))
Pad dig_data with its last element so that the new array is a multiple of n.
def delete_namespaced_pod_preset(self, name, namespace, **kwargs): # noqa: E501 """delete_namespaced_pod_preset # noqa: E501 delete a PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_pod_preset(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPreset (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.delete_namespaced_pod_preset_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
delete_namespaced_pod_preset # noqa: E501 delete a PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_pod_preset(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPreset (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread.
def get_output_metadata(self, path, filename): """ Describe a file by its metadata. :return: dict """ checksums = get_checksums(path, ['md5']) metadata = {'filename': filename, 'filesize': os.path.getsize(path), 'checksum': checksums['md5sum'], 'checksum_type': 'md5'} if self.metadata_only: metadata['metadata_only'] = True return metadata
Describe a file by its metadata. :return: dict
def built(name, runas, dest_dir, spec, sources, tgt, template=None, deps=None, env=None, results=None, force=False, saltenv='base', log_dir='/var/log/salt/pkgbuild'): ''' Ensure that the named package is built and exists in the named directory name The name to track the build, the name value is otherwise unused runas The user to run the build process as dest_dir The directory on the minion to place the built package(s) spec The location of the spec file (used for rpms) sources The list of package sources tgt The target platform to run the build on template Run the spec file through a templating engine .. versionchanged:: 2015.8.2 This argument is now optional, allowing for no templating engine to be used if none is desired. deps Packages required to ensure that the named package is built can be hosted on either the salt master server or on an HTTP or FTP server. Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials env A dictionary of environment variables to be set prior to execution. Example: .. code-block:: yaml - env: DEB_BUILD_OPTIONS: 'nocheck' .. warning:: The above illustrates a common PyYAML pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) PyYAML idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. results The names of the expected rpms that will be built force : False If ``True``, packages will be built even if they already exist in the ``dest_dir``. This is useful when building a package for continuous or nightly package builds. .. versionadded:: 2015.8.2 saltenv The saltenv to use for files downloaded from the salt filesever log_dir : /var/log/salt/rpmbuild Root directory for log files created from the build. Logs will be organized by package name, version, OS release, and CPU architecture under this directory. .. versionadded:: 2015.8.2 ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if not results: ret['comment'] = '\'results\' argument is required' ret['result'] = False return ret if isinstance(results, six.string_types): results = results.split(',') needed = _get_missing_results(results, dest_dir) if not force and not needed: ret['comment'] = 'All needed packages exist' return ret if __opts__['test']: ret['result'] = None if force: ret['comment'] = 'Packages will be force-built' else: ret['comment'] = 'The following packages need to be built: ' ret['comment'] += ', '.join(needed) return ret # Need the check for None here, if env is not provided then it falls back # to None and it is assumed that the environment is not being overridden. if env is not None and not isinstance(env, dict): ret['comment'] = ('Invalidly-formatted \'env\' parameter. See ' 'documentation.') ret['result'] = False return ret func = 'pkgbuild.build' if __grains__.get('os_family', False) not in ('RedHat', 'Suse'): for res in results: if res.endswith('.rpm'): func = 'rpmbuild.build' break ret['changes'] = __salt__[func]( runas, tgt, dest_dir, spec, sources, deps, env, template, saltenv, log_dir) needed = _get_missing_results(results, dest_dir) if needed: ret['comment'] = 'The following packages were not built: ' ret['comment'] += ', '.join(needed) ret['result'] = False else: ret['comment'] = 'All needed packages were built' return ret
Ensure that the named package is built and exists in the named directory name The name to track the build, the name value is otherwise unused runas The user to run the build process as dest_dir The directory on the minion to place the built package(s) spec The location of the spec file (used for rpms) sources The list of package sources tgt The target platform to run the build on template Run the spec file through a templating engine .. versionchanged:: 2015.8.2 This argument is now optional, allowing for no templating engine to be used if none is desired. deps Packages required to ensure that the named package is built can be hosted on either the salt master server or on an HTTP or FTP server. Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials env A dictionary of environment variables to be set prior to execution. Example: .. code-block:: yaml - env: DEB_BUILD_OPTIONS: 'nocheck' .. warning:: The above illustrates a common PyYAML pitfall, that **yes**, **no**, **on**, **off**, **true**, and **false** are all loaded as boolean ``True`` and ``False`` values, and must be enclosed in quotes to be used as strings. More info on this (and other) PyYAML idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`. results The names of the expected rpms that will be built force : False If ``True``, packages will be built even if they already exist in the ``dest_dir``. This is useful when building a package for continuous or nightly package builds. .. versionadded:: 2015.8.2 saltenv The saltenv to use for files downloaded from the salt filesever log_dir : /var/log/salt/rpmbuild Root directory for log files created from the build. Logs will be organized by package name, version, OS release, and CPU architecture under this directory. .. versionadded:: 2015.8.2
async def create( cls, interface: Interface, mode: LinkMode, subnet: Union[Subnet, int] = None, ip_address: str = None, force: bool = False, default_gateway: bool = False): """ Create a link on `Interface` in MAAS. :param interface: Interface to create the link on. :type interface: `Interface` :param mode: Mode of the link. :type mode: `LinkMode` :param subnet: The subnet to create the link on (optional). :type subnet: `Subnet` or `int` :param ip_address: The IP address to assign to the link. :type ip_address: `str` :param force: If True, allows `LinkMode.LINK_UP` to be created even if other links already exist. Also allows the selection of any subnet no matter the VLAN the subnet belongs to. Using this option will cause all other interface links to be deleted (optional). :type force: `bool` :param default_gateway: If True, sets the gateway IP address for the subnet as the default gateway for the node this interface belongs to. Option can only be used with the `LinkMode.AUTO` and `LinkMode.STATIC` modes. :type default_gateway: `bool` :returns: The created InterfaceLink. :rtype: `InterfaceLink` """ if not isinstance(interface, Interface): raise TypeError( "interface must be an Interface, not %s" % type(interface).__name__) if not isinstance(mode, LinkMode): raise TypeError( "mode must be a LinkMode, not %s" % type(mode).__name__) if subnet is not None: if isinstance(subnet, Subnet): subnet = subnet.id elif isinstance(subnet, int): pass else: raise TypeError( "subnet must be a Subnet or int, not %s" % type(subnet).__name__) if mode in [LinkMode.AUTO, LinkMode.STATIC]: if subnet is None: raise ValueError('subnet is required for %s' % mode) if default_gateway and mode not in [LinkMode.AUTO, LinkMode.STATIC]: raise ValueError('cannot set as default_gateway for %s' % mode) params = { 'system_id': interface.node.system_id, 'id': interface.id, 'mode': mode.value, 'force': force, 'default_gateway': default_gateway, } if subnet is not None: params['subnet'] = subnet if ip_address is not None: params['ip_address'] = ip_address # The API doesn't return just the link it returns the whole interface. # Store the link ids before the save to find the addition at the end. link_ids = { link.id for link in interface.links } data = await interface._handler.link_subnet(**params) # Update the links on the interface, except for the newly created link # the `ManagedCreate` wrapper will add that to the interfaces link data # automatically. new_links = { link['id']: link for link in data['links'] } links_diff = list(set(new_links.keys()) - link_ids) new_link = new_links.pop(links_diff[0]) interface._data['links'] = list(new_links.values()) interface._orig_data['links'] = copy.deepcopy(interface._data['links']) return cls._object(new_link)
Create a link on `Interface` in MAAS. :param interface: Interface to create the link on. :type interface: `Interface` :param mode: Mode of the link. :type mode: `LinkMode` :param subnet: The subnet to create the link on (optional). :type subnet: `Subnet` or `int` :param ip_address: The IP address to assign to the link. :type ip_address: `str` :param force: If True, allows `LinkMode.LINK_UP` to be created even if other links already exist. Also allows the selection of any subnet no matter the VLAN the subnet belongs to. Using this option will cause all other interface links to be deleted (optional). :type force: `bool` :param default_gateway: If True, sets the gateway IP address for the subnet as the default gateway for the node this interface belongs to. Option can only be used with the `LinkMode.AUTO` and `LinkMode.STATIC` modes. :type default_gateway: `bool` :returns: The created InterfaceLink. :rtype: `InterfaceLink`
def _build(self, x, prev_state): """Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2. """ x.get_shape().with_rank(2) self._batch_size = x.get_shape().as_list()[0] self._dtype = x.dtype x_zeros = tf.concat( [x, tf.zeros( shape=(self._batch_size, 1), dtype=self._dtype)], 1) x_ones = tf.concat( [x, tf.ones( shape=(self._batch_size, 1), dtype=self._dtype)], 1) # Weights for the halting signal halting_linear = basic.Linear(name="halting_linear", output_size=1) body = functools.partial( self._body, halting_linear=halting_linear, x_ones=x_ones) cumul_halting_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) core_output_size = [x.value for x in self._core.output_size] out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size), dtype=self._dtype) cumul_state_init = _nested_zeros_like(prev_state) remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) (unused_final_x, final_out, unused_final_state, final_cumul_state, unused_final_halting, final_iteration, final_remainder) = tf.while_loop( self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init, cumul_halting_init, iteration_init, remainder_init]) act_output = basic.Linear( name="act_output_linear", output_size=self._output_size)(final_out) return (act_output, (final_iteration, final_remainder)), final_cumul_state
Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2.
def get_full_xml_representation(entity, private_key): """Get full XML representation of an entity. This contains the <XML><post>..</post></XML> wrapper. Accepts either a Base entity or a Diaspora entity. Author `private_key` must be given so that certain entities can be signed. """ from federation.entities.diaspora.mappers import get_outbound_entity diaspora_entity = get_outbound_entity(entity, private_key) xml = diaspora_entity.to_xml() return "<XML><post>%s</post></XML>" % etree.tostring(xml).decode("utf-8")
Get full XML representation of an entity. This contains the <XML><post>..</post></XML> wrapper. Accepts either a Base entity or a Diaspora entity. Author `private_key` must be given so that certain entities can be signed.
def glob(patterns, parent=None, excludes=None, include_dotfiles=False, ignore_false_excludes=False): """ Wrapper for #glob2.glob() that accepts an arbitrary number of patterns and matches them. The paths are normalized with #norm(). Relative patterns are automaticlly joined with *parent*. If the parameter is omitted, it defaults to the current working directory. If *excludes* is specified, it must be a string or a list of strings that is/contains glob patterns or filenames to be removed from the result before returning. > Every file listed in *excludes* will only remove **one** match from > the result list that was generated from *patterns*. Thus, if you > want to exclude some files with a pattern except for a specific file > that would also match that pattern, simply list that file another > time in the *patterns*. # Parameters patterns (list of str): A list of glob patterns or filenames. parent (str): The parent directory for relative paths. excludes (list of str): A list of glob patterns or filenames. include_dotfiles (bool): If True, `*` and `**` can also capture file or directory names starting with a dot. ignore_false_excludes (bool): False by default. If True, items listed in *excludes* that have not been globbed will raise an exception. # Returns list of str: A list of filenames. """ if not glob2: raise glob2_ext if isinstance(patterns, str): patterns = [patterns] if not parent: parent = os.getcwd() result = [] for pattern in patterns: if not os.path.isabs(pattern): pattern = os.path.join(parent, pattern) result += glob2.glob(canonical(pattern)) for pattern in (excludes or ()): if not os.path.isabs(pattern): pattern = os.path.join(parent, pattern) pattern = canonical(pattern) if not isglob(pattern): try: result.remove(pattern) except ValueError as exc: if not ignore_false_excludes: raise ValueError('{} ({})'.format(exc, pattern)) else: for item in glob2.glob(pattern): try: result.remove(item) except ValueError as exc: if not ignore_false_excludes: raise ValueError('{} ({})'.format(exc, pattern)) return result
Wrapper for #glob2.glob() that accepts an arbitrary number of patterns and matches them. The paths are normalized with #norm(). Relative patterns are automaticlly joined with *parent*. If the parameter is omitted, it defaults to the current working directory. If *excludes* is specified, it must be a string or a list of strings that is/contains glob patterns or filenames to be removed from the result before returning. > Every file listed in *excludes* will only remove **one** match from > the result list that was generated from *patterns*. Thus, if you > want to exclude some files with a pattern except for a specific file > that would also match that pattern, simply list that file another > time in the *patterns*. # Parameters patterns (list of str): A list of glob patterns or filenames. parent (str): The parent directory for relative paths. excludes (list of str): A list of glob patterns or filenames. include_dotfiles (bool): If True, `*` and `**` can also capture file or directory names starting with a dot. ignore_false_excludes (bool): False by default. If True, items listed in *excludes* that have not been globbed will raise an exception. # Returns list of str: A list of filenames.
def convertnumbers(table, strict=False, **kwargs): """ Convenience function to convert all field values to numbers where possible. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz', 'quux'], ... ['1', '3.0', '9+3j', 'aaa'], ... ['2', '1.3', '7+2j', None]] >>> table2 = etl.convertnumbers(table1) >>> table2 +-----+-----+--------+-------+ | foo | bar | baz | quux | +=====+=====+========+=======+ | 1 | 3.0 | (9+3j) | 'aaa' | +-----+-----+--------+-------+ | 2 | 1.3 | (7+2j) | None | +-----+-----+--------+-------+ """ return convertall(table, numparser(strict), **kwargs)
Convenience function to convert all field values to numbers where possible. E.g.:: >>> import petl as etl >>> table1 = [['foo', 'bar', 'baz', 'quux'], ... ['1', '3.0', '9+3j', 'aaa'], ... ['2', '1.3', '7+2j', None]] >>> table2 = etl.convertnumbers(table1) >>> table2 +-----+-----+--------+-------+ | foo | bar | baz | quux | +=====+=====+========+=======+ | 1 | 3.0 | (9+3j) | 'aaa' | +-----+-----+--------+-------+ | 2 | 1.3 | (7+2j) | None | +-----+-----+--------+-------+
def create_image_lists(image_dir, testing_percentage, validation_percentage): """Builds a list of training images from the file system. Analyzes the sub folders in the image directory, splits them into stable training, testing, and validation sets, and returns a data structure describing the lists of images for each label and their paths. Args: image_dir: String path to a folder containing subfolders of images. testing_percentage: Integer percentage of the images to reserve for tests. validation_percentage: Integer percentage of images reserved for validation. Returns: An OrderedDict containing an entry for each label subfolder, with images split into training, testing, and validation sets within each label. The order of items defines the class indices. """ if not tf.gfile.Exists(image_dir): tf.logging.error("Image directory '" + image_dir + "' not found.") return None result = collections.OrderedDict() sub_dirs = sorted(x[0] for x in tf.gfile.Walk(image_dir)) # The root directory comes first, so skip it. is_root_dir = True for sub_dir in sub_dirs: if is_root_dir: is_root_dir = False continue extensions = sorted(set(os.path.normcase(ext) # Smash case on Windows. for ext in ['JPEG', 'JPG', 'jpeg', 'jpg', 'png'])) file_list = [] dir_name = os.path.basename( # tf.gfile.Walk() returns sub-directory with trailing '/' when it is in # Google Cloud Storage, which confuses os.path.basename(). sub_dir[:-1] if sub_dir.endswith('/') else sub_dir) if dir_name == image_dir: continue tf.logging.info("Looking for images in '" + dir_name + "'") for extension in extensions: file_glob = os.path.join(image_dir, dir_name, '*.' + extension) file_list.extend(tf.gfile.Glob(file_glob)) if not file_list: tf.logging.warning('No files found') continue if len(file_list) < 20: tf.logging.warning( 'WARNING: Folder has less than 20 images, which may cause issues.') elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS: tf.logging.warning( 'WARNING: Folder {} has more than {} images. Some images will ' 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS)) label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower()) training_images = [] testing_images = [] validation_images = [] for file_name in file_list: base_name = os.path.basename(file_name) # We want to ignore anything after '_nohash_' in the file name when # deciding which set to put an image in, the data set creator has a way of # grouping photos that are close variations of each other. For example # this is used in the plant disease data set to group multiple pictures of # the same leaf. hash_name = re.sub(r'_nohash_.*$', '', file_name) # This looks a bit magical, but we need to decide whether this file should # go into the training, testing, or validation sets, and we want to keep # existing files in the same set even if more files are subsequently # added. # To do that, we need a stable way of deciding based on just the file name # itself, so we do a hash of that and then use that to generate a # probability value that we use to assign it. hash_name_hashed = hashlib.sha1(tf.compat.as_bytes(hash_name)).hexdigest() percentage_hash = ((int(hash_name_hashed, 16) % (MAX_NUM_IMAGES_PER_CLASS + 1)) * (100.0 / MAX_NUM_IMAGES_PER_CLASS)) if percentage_hash < validation_percentage: validation_images.append(base_name) elif percentage_hash < (testing_percentage + validation_percentage): testing_images.append(base_name) else: training_images.append(base_name) result[label_name] = { 'dir': dir_name, 'training': training_images, 'testing': testing_images, 'validation': validation_images, } return result
Builds a list of training images from the file system. Analyzes the sub folders in the image directory, splits them into stable training, testing, and validation sets, and returns a data structure describing the lists of images for each label and their paths. Args: image_dir: String path to a folder containing subfolders of images. testing_percentage: Integer percentage of the images to reserve for tests. validation_percentage: Integer percentage of images reserved for validation. Returns: An OrderedDict containing an entry for each label subfolder, with images split into training, testing, and validation sets within each label. The order of items defines the class indices.
def textFileStream(self, directory): """ Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8. """ return DStream(self._jssc.textFileStream(directory), self, UTF8Deserializer())
Create an input stream that monitors a Hadoop-compatible file system for new files and reads them as text files. Files must be wrriten to the monitored directory by "moving" them from another location within the same file system. File names starting with . are ignored. The text files must be encoded as UTF-8.
def coord(self, offset=(0,0)): '''return lat,lon within a tile given (offsetx,offsety)''' (tilex, tiley) = self.tile (offsetx, offsety) = offset world_tiles = 1<<self.zoom x = ( tilex + 1.0*offsetx/TILES_WIDTH ) / (world_tiles/2.) - 1 y = ( tiley + 1.0*offsety/TILES_HEIGHT) / (world_tiles/2.) - 1 lon = x * 180.0 y = math.exp(-y*2*math.pi) e = (y-1)/(y+1) lat = 180.0/math.pi * math.asin(e) return (lat, lon)
return lat,lon within a tile given (offsetx,offsety)
def dict(self): """ the python object for rendering json. It is called dict to be coherent with the other modules but it actually returns a list :return: the python object for rendering json :rtype: list """ json_list = [] for step in self.steps: json_list.append(step.dict) return json_list
the python object for rendering json. It is called dict to be coherent with the other modules but it actually returns a list :return: the python object for rendering json :rtype: list
def visit_set(self, node): """return an astroid.Set node as string""" return "{%s}" % ", ".join(child.accept(self) for child in node.elts)
return an astroid.Set node as string
def get_institute_graph_url(start, end): """ Pie chart comparing institutes usage. """ filename = get_institute_graph_filename(start, end) urls = { 'graph_url': urlparse.urljoin(GRAPH_URL, filename + ".png"), 'data_url': urlparse.urljoin(GRAPH_URL, filename + ".csv"), } return urls
Pie chart comparing institutes usage.
def ppo_original_world_model(): """Atari parameters with world model as policy.""" hparams = ppo_original_params() hparams.policy_network = "next_frame_basic_deterministic" hparams_keys = hparams.values().keys() video_hparams = basic_deterministic_params.next_frame_basic_deterministic() for (name, value) in six.iteritems(video_hparams.values()): if name in hparams_keys: hparams.set_hparam(name, value) else: hparams.add_hparam(name, value) # Mostly to avoid decaying WM params when training the policy. hparams.weight_decay = 0 return hparams
Atari parameters with world model as policy.
def ConfigureViewTypeChoices( self, event=None ): """Configure the set of View types in the toolbar (and menus)""" self.viewTypeTool.SetItems( getattr( self.loader, 'ROOTS', [] )) if self.loader and self.viewType in self.loader.ROOTS: self.viewTypeTool.SetSelection( self.loader.ROOTS.index( self.viewType )) # configure the menu with the available choices... def chooser( typ ): def Callback( event ): if typ != self.viewType: self.viewType = typ self.OnRootView( event ) return Callback # Clear all previous items for item in self.viewTypeMenu.GetMenuItems(): self.viewTypeMenu.DeleteItem( item ) if self.loader and self.loader.ROOTS: for root in self.loader.ROOTS: item = wx.MenuItem( self.viewTypeMenu, -1, root.title(), _("View hierarchy by %(name)s")%{ 'name': root.title(), }, kind=wx.ITEM_RADIO, ) item.SetCheckable( True ) self.viewTypeMenu.AppendItem( item ) item.Check( root == self.viewType ) wx.EVT_MENU( self, item.GetId(), chooser( root ))
Configure the set of View types in the toolbar (and menus)
def _log(self, s): r"""Log a string. It flushes but doesn't append \n, so do that yourself.""" # TODO(tewalds): Should this be using logging.info instead? How to see them # outside of google infrastructure? sys.stderr.write(s) sys.stderr.flush()
r"""Log a string. It flushes but doesn't append \n, so do that yourself.
def show(parent=None, targets=[], modal=None, auto_publish=False, auto_validate=False): """Attempt to show GUI Requires install() to have been run first, and a live instance of Pyblish QML in the background. Arguments: parent (None, optional): Deprecated targets (list, optional): Publishing targets modal (bool, optional): Block interactions to parent """ # Get modal mode from environment if modal is None: modal = bool(os.environ.get("PYBLISH_QML_MODAL", False)) # Automatically install if not already installed. install(modal) show_settings = settings.to_dict() show_settings['autoPublish'] = auto_publish show_settings['autoValidate'] = auto_validate # Show existing GUI if _state.get("currentServer"): server = _state["currentServer"] proxy = ipc.server.Proxy(server) try: proxy.show(show_settings) return server except IOError: # The running instance has already been closed. _state.pop("currentServer") if not host.is_headless(): host.splash() try: service = ipc.service.Service() server = ipc.server.Server(service, targets=targets, modal=modal) except Exception: # If for some reason, the GUI fails to show. traceback.print_exc() return host.desplash() proxy = ipc.server.Proxy(server) proxy.show(show_settings) # Store reference to server for future calls _state["currentServer"] = server log.info("Success. QML server available as " "pyblish_qml.api.current_server()") server.listen() return server
Attempt to show GUI Requires install() to have been run first, and a live instance of Pyblish QML in the background. Arguments: parent (None, optional): Deprecated targets (list, optional): Publishing targets modal (bool, optional): Block interactions to parent
def storage_set(self, key, value): """ Store a value for the module. """ if not self._module: return self._storage_init() module_name = self._module.module_full_name return self._storage.storage_set(module_name, key, value)
Store a value for the module.
def set_iscsi_volume(self, port_id, initiator_iqn, initiator_dhcp=False, initiator_ip=None, initiator_netmask=None, target_dhcp=False, target_iqn=None, target_ip=None, target_port=3260, target_lun=0, boot_prio=1, chap_user=None, chap_secret=None, mutual_chap_secret=None): """Set iSCSI volume information to configuration. :param port_id: Physical port ID. :param initiator_iqn: IQN of initiator. :param initiator_dhcp: True if DHCP is used in the iSCSI network. :param initiator_ip: IP address of initiator. None if DHCP is used. :param initiator_netmask: Netmask of initiator as integer. None if DHCP is used. :param target_dhcp: True if DHCP is used for iSCSI target. :param target_iqn: IQN of target. None if DHCP is used. :param target_ip: IP address of target. None if DHCP is used. :param target_port: Port number of target. None if DHCP is used. :param target_lun: LUN number of target. None if DHCP is used, :param boot_prio: Boot priority of the volume. 1 indicates the highest priority. """ initiator_netmask = (_convert_netmask(initiator_netmask) if initiator_netmask else None) port_handler = _parse_physical_port_id(port_id) iscsi_boot = _create_iscsi_boot( initiator_iqn, initiator_dhcp=initiator_dhcp, initiator_ip=initiator_ip, initiator_netmask=initiator_netmask, target_dhcp=target_dhcp, target_iqn=target_iqn, target_ip=target_ip, target_port=target_port, target_lun=target_lun, boot_prio=boot_prio, chap_user=chap_user, chap_secret=chap_secret, mutual_chap_secret=mutual_chap_secret) port = self._find_port(port_handler) if port: port_handler.set_iscsi_port(port, iscsi_boot) else: port = port_handler.create_iscsi_port(iscsi_boot) self._add_port(port_handler, port)
Set iSCSI volume information to configuration. :param port_id: Physical port ID. :param initiator_iqn: IQN of initiator. :param initiator_dhcp: True if DHCP is used in the iSCSI network. :param initiator_ip: IP address of initiator. None if DHCP is used. :param initiator_netmask: Netmask of initiator as integer. None if DHCP is used. :param target_dhcp: True if DHCP is used for iSCSI target. :param target_iqn: IQN of target. None if DHCP is used. :param target_ip: IP address of target. None if DHCP is used. :param target_port: Port number of target. None if DHCP is used. :param target_lun: LUN number of target. None if DHCP is used, :param boot_prio: Boot priority of the volume. 1 indicates the highest priority.
def logical_downlinks(self): """ Gets the LogicalDownlinks API client. Returns: LogicalDownlinks: """ if not self.__logical_downlinks: self.__logical_downlinks = LogicalDownlinks( self.__connection) return self.__logical_downlinks
Gets the LogicalDownlinks API client. Returns: LogicalDownlinks:
def mod_repo(repo, saltenv='base', **kwargs): ''' Modify one or more values for a repo. If the repo does not exist, it will be created, so long as the definition is well formed. For Ubuntu the ``ppa:<project>/repo`` format is acceptable. ``ppa:`` format can only be used to create a new repository. The following options are available to modify a repo definition: architectures A comma-separated list of supported architectures, e.g. ``amd64`` If this option is not set, all architectures (configured in the system) will be used. comps A comma separated list of components for the repo, e.g. ``main`` file A file name to be used keyserver Keyserver to get gpg key from keyid Key ID or a list of key IDs to load with the ``keyserver`` argument key_url URL to a GPG key to add to the APT GPG keyring key_text GPG key in string form to add to the APT GPG keyring .. versionadded:: 2018.3.0 consolidate : False If ``True``, will attempt to de-duplicate and consolidate sources comments Sometimes you want to supply additional information, but not as enabled configuration. All comments provided here will be joined into a single string and appended to the repo configuration with a comment marker (#) before it. .. versionadded:: 2015.8.9 .. note:: Due to the way keys are stored for APT, there is a known issue where the key won't be updated unless another change is made at the same time. Keys should be properly added on initial configuration. CLI Examples: .. code-block:: bash salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe ''' if 'refresh_db' in kwargs: salt.utils.versions.warn_until( 'Neon', 'The \'refresh_db\' argument to \'pkg.mod_repo\' has been ' 'renamed to \'refresh\'. Support for using \'refresh_db\' will be ' 'removed in the Neon release of Salt.' ) refresh = kwargs['refresh_db'] else: refresh = kwargs.get('refresh', True) _check_apt() # to ensure no one sets some key values that _shouldn't_ be changed on the # object itself, this is just a white-list of "ok" to set properties if repo.startswith('ppa:'): if __grains__['os'] in ('Ubuntu', 'Mint', 'neon'): # secure PPAs cannot be supported as of the time of this code # implementation via apt-add-repository. The code path for # secure PPAs should be the same as urllib method if salt.utils.path.which('apt-add-repository') \ and 'ppa_auth' not in kwargs: repo_info = get_repo(repo) if repo_info: return {repo: repo_info} else: env = None http_proxy_url = _get_http_proxy_url() if http_proxy_url: env = {'http_proxy': http_proxy_url, 'https_proxy': http_proxy_url} if float(__grains__['osrelease']) < 12.04: cmd = ['apt-add-repository', repo] else: cmd = ['apt-add-repository', '-y', repo] out = _call_apt(cmd, env=env, scope=False, **kwargs) if out['retcode']: raise CommandExecutionError( 'Unable to add PPA \'{0}\'. \'{1}\' exited with ' 'status {2!s}: \'{3}\' '.format( repo[4:], cmd, out['retcode'], out['stderr'] ) ) # explicit refresh when a repo is modified. if refresh: refresh_db() return {repo: out} else: if not HAS_SOFTWAREPROPERTIES: _warn_software_properties(repo) else: log.info('Falling back to urllib method for private PPA') # fall back to urllib style try: owner_name, ppa_name = repo[4:].split('/', 1) except ValueError: raise CommandExecutionError( 'Unable to get PPA info from argument. ' 'Expected format "<PPA_OWNER>/<PPA_NAME>" ' '(e.g. saltstack/salt) not found. Received ' '\'{0}\' instead.'.format(repo[4:]) ) dist = __grains__['lsb_distrib_codename'] # ppa has a lot of implicit arguments. Make them explicit. # These will defer to any user-defined variants kwargs['dist'] = dist ppa_auth = '' if 'file' not in kwargs: filename = '/etc/apt/sources.list.d/{0}-{1}-{2}.list' kwargs['file'] = filename.format(owner_name, ppa_name, dist) try: launchpad_ppa_info = _get_ppa_info_from_launchpad( owner_name, ppa_name) if 'ppa_auth' not in kwargs: kwargs['keyid'] = launchpad_ppa_info[ 'signing_key_fingerprint'] else: if 'keyid' not in kwargs: error_str = 'Private PPAs require a ' \ 'keyid to be specified: {0}/{1}' raise CommandExecutionError( error_str.format(owner_name, ppa_name) ) except HTTPError as exc: raise CommandExecutionError( 'Launchpad does not know about {0}/{1}: {2}'.format( owner_name, ppa_name, exc) ) except IndexError as exc: raise CommandExecutionError( 'Launchpad knows about {0}/{1} but did not ' 'return a fingerprint. Please set keyid ' 'manually: {2}'.format(owner_name, ppa_name, exc) ) if 'keyserver' not in kwargs: kwargs['keyserver'] = 'keyserver.ubuntu.com' if 'ppa_auth' in kwargs: if not launchpad_ppa_info['private']: raise CommandExecutionError( 'PPA is not private but auth credentials ' 'passed: {0}'.format(repo) ) # assign the new repo format to the "repo" variable # so we can fall through to the "normal" mechanism # here. if 'ppa_auth' in kwargs: ppa_auth = '{0}@'.format(kwargs['ppa_auth']) repo = LP_PVT_SRC_FORMAT.format(ppa_auth, owner_name, ppa_name, dist) else: repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist) else: raise CommandExecutionError( 'cannot parse "ppa:" style repo definitions: {0}' .format(repo) ) sources = sourceslist.SourcesList() if kwargs.get('consolidate', False): # attempt to de-dup and consolidate all sources # down to entries in sources.list # this option makes it easier to keep the sources # list in a "sane" state. # # this should remove duplicates, consolidate comps # for a given source down to one line # and eliminate "invalid" and comment lines # # the second side effect is removal of files # that are not the main sources.list file sources = _consolidate_repo_sources(sources) repos = [s for s in sources if not s.invalid] mod_source = None try: repo_type, \ repo_architectures, \ repo_uri, \ repo_dist, \ repo_comps = _split_repo_str(repo) except SyntaxError: raise SyntaxError( 'Error: repo \'{0}\' not a well formatted definition'.format(repo) ) full_comp_list = set(repo_comps) no_proxy = __salt__['config.option']('no_proxy') if 'keyid' in kwargs: keyid = kwargs.pop('keyid', None) keyserver = kwargs.pop('keyserver', None) if not keyid or not keyserver: error_str = 'both keyserver and keyid options required.' raise NameError(error_str) if not isinstance(keyid, list): keyid = [keyid] for key in keyid: if isinstance(key, int): # yaml can make this an int, we need the hex version key = hex(key) cmd = ['apt-key', 'export', key] output = __salt__['cmd.run_stdout'](cmd, python_shell=False, **kwargs) imported = output.startswith('-----BEGIN PGP') if keyserver: if not imported: http_proxy_url = _get_http_proxy_url() if http_proxy_url and keyserver not in no_proxy: cmd = ['apt-key', 'adv', '--batch', '--keyserver-options', 'http-proxy={0}'.format(http_proxy_url), '--keyserver', keyserver, '--logger-fd', '1', '--recv-keys', key] else: cmd = ['apt-key', 'adv', '--batch', '--keyserver', keyserver, '--logger-fd', '1', '--recv-keys', key] ret = _call_apt(cmd, scope=False, **kwargs) if ret['retcode'] != 0: raise CommandExecutionError( 'Error: key retrieval failed: {0}'.format(ret['stdout']) ) elif 'key_url' in kwargs: key_url = kwargs['key_url'] fn_ = __salt__['cp.cache_file'](key_url, saltenv) if not fn_: raise CommandExecutionError( 'Error: file not found: {0}'.format(key_url) ) cmd = ['apt-key', 'add', fn_] out = __salt__['cmd.run_stdout'](cmd, python_shell=False, **kwargs) if not out.upper().startswith('OK'): raise CommandExecutionError( 'Error: failed to add key from {0}'.format(key_url) ) elif 'key_text' in kwargs: key_text = kwargs['key_text'] cmd = ['apt-key', 'add', '-'] out = __salt__['cmd.run_stdout'](cmd, stdin=key_text, python_shell=False, **kwargs) if not out.upper().startswith('OK'): raise CommandExecutionError( 'Error: failed to add key:\n{0}'.format(key_text) ) if 'comps' in kwargs: kwargs['comps'] = kwargs['comps'].split(',') full_comp_list |= set(kwargs['comps']) else: kwargs['comps'] = list(full_comp_list) if 'architectures' in kwargs: kwargs['architectures'] = kwargs['architectures'].split(',') else: kwargs['architectures'] = repo_architectures if 'disabled' in kwargs: kwargs['disabled'] = salt.utils.data.is_true(kwargs['disabled']) kw_type = kwargs.get('type') kw_dist = kwargs.get('dist') for source in repos: # This series of checks will identify the starting source line # and the resulting source line. The idea here is to ensure # we are not returning bogus data because the source line # has already been modified on a previous run. repo_matches = source.type == repo_type and source.uri.rstrip('/') == repo_uri.rstrip('/') and source.dist == repo_dist kw_matches = source.dist == kw_dist and source.type == kw_type if repo_matches or kw_matches: for comp in full_comp_list: if comp in getattr(source, 'comps', []): mod_source = source if not source.comps: mod_source = source if kwargs['architectures'] != source.architectures: mod_source = source if mod_source: break if 'comments' in kwargs: kwargs['comments'] = \ salt.utils.pkg.deb.combine_comments(kwargs['comments']) if not mod_source: mod_source = sourceslist.SourceEntry(repo) if 'comments' in kwargs: mod_source.comment = kwargs['comments'] sources.list.append(mod_source) elif 'comments' in kwargs: mod_source.comment = kwargs['comments'] for key in kwargs: if key in _MODIFY_OK and hasattr(mod_source, key): setattr(mod_source, key, kwargs[key]) sources.save() # on changes, explicitly refresh if refresh: refresh_db() return { repo: { 'architectures': getattr(mod_source, 'architectures', []), 'comps': mod_source.comps, 'disabled': mod_source.disabled, 'file': mod_source.file, 'type': mod_source.type, 'uri': mod_source.uri, 'line': mod_source.line } }
Modify one or more values for a repo. If the repo does not exist, it will be created, so long as the definition is well formed. For Ubuntu the ``ppa:<project>/repo`` format is acceptable. ``ppa:`` format can only be used to create a new repository. The following options are available to modify a repo definition: architectures A comma-separated list of supported architectures, e.g. ``amd64`` If this option is not set, all architectures (configured in the system) will be used. comps A comma separated list of components for the repo, e.g. ``main`` file A file name to be used keyserver Keyserver to get gpg key from keyid Key ID or a list of key IDs to load with the ``keyserver`` argument key_url URL to a GPG key to add to the APT GPG keyring key_text GPG key in string form to add to the APT GPG keyring .. versionadded:: 2018.3.0 consolidate : False If ``True``, will attempt to de-duplicate and consolidate sources comments Sometimes you want to supply additional information, but not as enabled configuration. All comments provided here will be joined into a single string and appended to the repo configuration with a comment marker (#) before it. .. versionadded:: 2015.8.9 .. note:: Due to the way keys are stored for APT, there is a known issue where the key won't be updated unless another change is made at the same time. Keys should be properly added on initial configuration. CLI Examples: .. code-block:: bash salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe
def remove(self, key, where=None, start=None, stop=None): """ Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store """ where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: # the key is not a valid store, re-raising KeyError raise except Exception: if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!") # we are actually trying to remove a node (with children) s = self.get_node(key) if s is not None: s._f_remove(recursive=True) return None # remove the node if com._all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table else: if not s.is_table: raise ValueError( 'can only remove with where on objects written as tables') return s.delete(where=where, start=start, stop=stop)
Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store
def vtableEqual(a, objectStart, b): """vtableEqual compares an unwritten vtable to a written vtable.""" N.enforce_number(objectStart, N.UOffsetTFlags) if len(a) * N.VOffsetTFlags.bytewidth != len(b): return False for i, elem in enumerate(a): x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth) # Skip vtable entries that indicate a default value. if x == 0 and elem == 0: pass else: y = objectStart - elem if x != y: return False return True
vtableEqual compares an unwritten vtable to a written vtable.
def convert_units(values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation,**kwargs): """ Convert a value from one unit to another one. Example:: >>> cli = PluginLib.connect() >>> cli.service.convert_units(20.0, 'm', 'km') 0.02 Parameters: values: single measure or an array of measures source_measure_or_unit_abbreviation: A measure in the source unit, or just the abbreviation of the source unit, from which convert the provided measure value/values target_measure_or_unit_abbreviation: A measure in the target unit, or just the abbreviation of the target unit, into which convert the provided measure value/values Returns: Always a list """ if numpy.isscalar(values): # If it is a scalar, converts to an array values = [values] float_values = [float(value) for value in values] values_to_return = convert(float_values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation) return values_to_return
Convert a value from one unit to another one. Example:: >>> cli = PluginLib.connect() >>> cli.service.convert_units(20.0, 'm', 'km') 0.02 Parameters: values: single measure or an array of measures source_measure_or_unit_abbreviation: A measure in the source unit, or just the abbreviation of the source unit, from which convert the provided measure value/values target_measure_or_unit_abbreviation: A measure in the target unit, or just the abbreviation of the target unit, into which convert the provided measure value/values Returns: Always a list
def exists(self): """ Check whether the directory exists on the camera. """ if self.name in ("", "/") and self.parent is None: return True else: return self in self.parent.directories
Check whether the directory exists on the camera.
def clean(self, list_article_candidates): """Iterates over each article_candidate and cleans every extracted data. :param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted :return: A list, the list with the cleaned ArticleCandidate-Objects """ # Save cleaned article_candidates in results. results = [] for article_candidate in list_article_candidates: article_candidate.title = self.do_cleaning(article_candidate.title) article_candidate.description = self.do_cleaning(article_candidate.description) article_candidate.text = self.do_cleaning(article_candidate.text) article_candidate.topimage = self.do_cleaning(article_candidate.topimage) article_candidate.author = self.do_cleaning(article_candidate.author) article_candidate.publish_date = self.do_cleaning(article_candidate.publish_date) results.append(article_candidate) return results
Iterates over each article_candidate and cleans every extracted data. :param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted :return: A list, the list with the cleaned ArticleCandidate-Objects
def face_subset(self, face_index): """ Given a mask of face indices, return a sliced version. Parameters ---------- face_index: (n,) int, mask for faces (n,) bool, mask for faces Returns ---------- visual: ColorVisuals object containing a subset of faces. """ if self.defined: result = ColorVisuals( face_colors=self.face_colors[face_index]) else: result = ColorVisuals() return result
Given a mask of face indices, return a sliced version. Parameters ---------- face_index: (n,) int, mask for faces (n,) bool, mask for faces Returns ---------- visual: ColorVisuals object containing a subset of faces.
def _register_options(self, api_interface): # type: (ApiInterfaceBase) -> None """ Register CORS options endpoints. """ op_paths = api_interface.op_paths(collate_methods=True) for path, operations in op_paths.items(): if api.Method.OPTIONS not in operations: self._options_operation(api_interface, path, operations.keys())
Register CORS options endpoints.
def encode_schedule(schedule): """Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple. """ interpolation, steps, pmfs = schedule return interpolation + ' ' + ' '.join( '@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs))
Encodes a schedule tuple into a string. Args: schedule: A tuple containing (interpolation, steps, pmfs), where interpolation is a string specifying the interpolation strategy, steps is an int array_like of shape [N] specifying the global steps, and pmfs is an array_like of shape [N, M] where pmf[i] is the sampling distribution at global step steps[i]. N is the number of schedule requirements to interpolate and M is the size of the probability space. Returns: The string encoding of the schedule tuple.
def create(cls, name, servers=None, time_range='yesterday', all_logs=False, filter_for_delete=None, comment=None, **kwargs): """ Create a new delete log task. Provide True to all_logs to delete all log types. Otherwise provide kwargs to specify each log by type of interest. :param str name: name for this task :param servers: servers to back up. Servers must be instances of management servers or log servers. If no value is provided, all servers are backed up. :type servers: list(ManagementServer or LogServer) :param str time_range: specify a time range for the deletion. Valid options are 'yesterday', 'last_full_week_sun_sat', 'last_full_week_mon_sun', 'last_full_month' (default 'yesterday') :param FilterExpression filter_for_delete: optional filter for deleting. (default: FilterExpression('Match All') :param bool all_logs: if True, all log types will be deleted. If this is True, kwargs are ignored (default: False) :param kwargs: see :func:`~log_target_types` for keyword arguments and default values. :raises ElementNotFound: specified servers were not found :raises CreateElementFailed: failure to create the task :return: the task :rtype: DeleteLogTask """ if not servers: servers = [svr.href for svr in ManagementServer.objects.all()] servers.extend([svr.href for svr in LogServer.objects.all()]) else: servers = [svr.href for svr in servers] filter_for_delete = filter_for_delete.href if filter_for_delete else \ FilterExpression('Match All').href json = { 'name': name, 'resources': servers, 'time_limit_type': time_range, 'start_time': 0, 'end_time': 0, 'file_format': 'unknown', 'filter_for_delete': filter_for_delete, 'comment': comment} json.update(**log_target_types(all_logs, **kwargs)) return ElementCreator(cls, json)
Create a new delete log task. Provide True to all_logs to delete all log types. Otherwise provide kwargs to specify each log by type of interest. :param str name: name for this task :param servers: servers to back up. Servers must be instances of management servers or log servers. If no value is provided, all servers are backed up. :type servers: list(ManagementServer or LogServer) :param str time_range: specify a time range for the deletion. Valid options are 'yesterday', 'last_full_week_sun_sat', 'last_full_week_mon_sun', 'last_full_month' (default 'yesterday') :param FilterExpression filter_for_delete: optional filter for deleting. (default: FilterExpression('Match All') :param bool all_logs: if True, all log types will be deleted. If this is True, kwargs are ignored (default: False) :param kwargs: see :func:`~log_target_types` for keyword arguments and default values. :raises ElementNotFound: specified servers were not found :raises CreateElementFailed: failure to create the task :return: the task :rtype: DeleteLogTask
def _fill_cropping(self, image_size, view_size): """ Return a (left, top, right, bottom) 4-tuple containing the cropping values required to display an image of *image_size* in *view_size* when stretched proportionately. Each value is a percentage expressed as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and *view_size* are each (width, height) pairs. """ def aspect_ratio(width, height): return width / height ar_view = aspect_ratio(*view_size) ar_image = aspect_ratio(*image_size) if ar_view < ar_image: # image too wide crop = (1.0 - (ar_view/ar_image)) / 2.0 return (crop, 0.0, crop, 0.0) if ar_view > ar_image: # image too tall crop = (1.0 - (ar_image/ar_view)) / 2.0 return (0.0, crop, 0.0, crop) return (0.0, 0.0, 0.0, 0.0)
Return a (left, top, right, bottom) 4-tuple containing the cropping values required to display an image of *image_size* in *view_size* when stretched proportionately. Each value is a percentage expressed as a fraction of 1.0, e.g. 0.425 represents 42.5%. *image_size* and *view_size* are each (width, height) pairs.
def editpermissions_user_view(self, request, user_id, forum_id=None): """ Allows to edit user permissions for the considered forum. The view displays a form to define which permissions are granted for the given user for the considered forum. """ user_model = get_user_model() user = get_object_or_404(user_model, pk=user_id) forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None # Set up the context context = self.get_forum_perms_base_context(request, forum) context['forum'] = forum context['title'] = '{} - {}'.format(_('Forum permissions'), user) context['form'] = self._get_permissions_form( request, UserForumPermission, {'forum': forum, 'user': user}, ) return render(request, self.editpermissions_user_view_template_name, context)
Allows to edit user permissions for the considered forum. The view displays a form to define which permissions are granted for the given user for the considered forum.
def article(request, slug): """ The main view of the Django-CMS Articles! Takes a request and a slug, renders the article. """ # Get current CMS Page as article tree tree = request.current_page.get_public_object() # Check whether it really is a tree. # It could also be one of its sub-pages. if tree.application_urls != 'CMSArticlesApp': # In such case show regular CMS Page return page(request, slug) # Get an Article object from the request draft = use_draft(request) and request.user.has_perm('cms_articles.change_article') preview = 'preview' in request.GET and request.user.has_perm('cms_articles.change_article') site = tree.node.site article = get_article_from_slug(tree, slug, preview, draft) if not article: # raise 404 _handle_no_page(request) request.current_article = article if hasattr(request, 'user') and request.user.is_staff: user_languages = get_language_list(site_id=site.pk) else: user_languages = get_public_languages(site_id=site.pk) request_language = get_language_from_request(request, check_path=True) # get_published_languages will return all languages in draft mode # and published only in live mode. # These languages are then filtered out by the user allowed languages available_languages = [ language for language in user_languages if language in list(article.get_published_languages()) ] own_urls = [ request.build_absolute_uri(request.path), '/%s' % request.path, request.path, ] try: redirect_on_fallback = get_redirect_on_fallback(request_language, site_id=site.pk) except LanguageError: redirect_on_fallback = False if request_language not in user_languages: # Language is not allowed # Use the default site language default_language = get_default_language_for_site(site.pk) fallbacks = get_fallback_languages(default_language, site_id=site.pk) fallbacks = [default_language] + fallbacks else: fallbacks = get_fallback_languages(request_language, site_id=site.pk) # Only fallback to languages the user is allowed to see fallback_languages = [ language for language in fallbacks if language != request_language and language in available_languages ] language_is_unavailable = request_language not in available_languages if language_is_unavailable and not fallback_languages: # There is no page with the requested language # and there's no configured fallbacks return _handle_no_page(request) elif language_is_unavailable and redirect_on_fallback: # There is no page with the requested language and # the user has explicitly requested to redirect on fallbacks, # so redirect to the first configured / available fallback language fallback = fallback_languages[0] redirect_url = article.get_absolute_url(fallback, fallback=False) else: redirect_url = False if redirect_url: if request.user.is_staff and hasattr(request, 'toolbar') and request.toolbar.edit_mode_active: request.toolbar.redirect_url = redirect_url elif redirect_url not in own_urls: # prevent redirect to self return HttpResponseRedirect(redirect_url) # permission checks if article.login_required and not request.user.is_authenticated(): return redirect_to_login(urlquote(request.get_full_path()), settings.LOGIN_URL) if hasattr(request, 'toolbar'): request.toolbar.obj = article structure_requested = get_cms_setting('CMS_TOOLBAR_URL__BUILD') in request.GET if article.has_change_permission(request) and structure_requested: return render_object_structure(request, article) return render_article(request, article, current_language=request_language, slug=slug)
The main view of the Django-CMS Articles! Takes a request and a slug, renders the article.
def recall(self, label=None): """ Returns recall or recall for a given label (category) if specified. """ if label is None: return self.call("recall") else: return self.call("recall", float(label))
Returns recall or recall for a given label (category) if specified.
def estimate_tx_gas_with_safe(self, safe_address: str, to: str, value: int, data: bytes, operation: int, block_identifier='pending') -> int: """ Estimate tx gas using safe `requiredTxGas` method :return: int: Estimated gas :raises: CannotEstimateGas: If gas cannot be estimated :raises: ValueError: Cannot decode received data """ data = data or b'' def parse_revert_data(result: bytes) -> int: # 4 bytes - error method id # 32 bytes - position # 32 bytes - length # Last 32 bytes - value of revert (if everything went right) gas_estimation_offset = 4 + 32 + 32 estimated_gas = result[gas_estimation_offset:] # Estimated gas must be 32 bytes if len(estimated_gas) != 32: logger.warning('Safe=%s Problem estimating gas, returned value is %s for tx=%s', safe_address, result.hex(), tx) raise CannotEstimateGas('Received %s for tx=%s' % (result.hex(), tx)) return int(estimated_gas.hex(), 16) # Add 10k, else we will fail in case of nested calls try: tx = self.get_contract(safe_address).functions.requiredTxGas( to, value, data, operation ).buildTransaction({ 'from': safe_address, 'gas': int(1e7), 'gasPrice': 0, }) # If we build the tx web3 will not try to decode it for us # Ganache 6.3.0 and Geth are working like this result: HexBytes = self.w3.eth.call(tx, block_identifier=block_identifier) return parse_revert_data(result) except ValueError as exc: # Parity """ Parity throws a ValueError, e.g. {'code': -32015, 'message': 'VM execution error.', 'data': 'Reverted 0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000 000000000000000000000000000000000000000000000002c4d6574686f642063616e206f6e6c792062652063616c6c656 42066726f6d207468697320636f6e74726163740000000000000000000000000000000000000000'} """ error_dict = exc.args[0] data = error_dict.get('data') if not data: raise exc elif isinstance(data, str) and 'Reverted ' in data: # Parity result = HexBytes(data.replace('Reverted ', '')) return parse_revert_data(result) key = list(data.keys())[0] result = data[key]['return'] if result == '0x0': raise exc else: # Ganache-Cli with no `--noVMErrorsOnRPCResponse` flag enabled logger.warning('You should use `--noVMErrorsOnRPCResponse` flag with Ganache-cli') estimated_gas_hex = result[138:] assert len(estimated_gas_hex) == 64 estimated_gas = int(estimated_gas_hex, 16) return estimated_gas
Estimate tx gas using safe `requiredTxGas` method :return: int: Estimated gas :raises: CannotEstimateGas: If gas cannot be estimated :raises: ValueError: Cannot decode received data
def decode(self, dataset_split=None, decode_from_file=False, checkpoint_path=None): """Decodes from dataset or file.""" if decode_from_file: decoding.decode_from_file(self._estimator, self._decode_hparams.decode_from_file, self._hparams, self._decode_hparams, self._decode_hparams.decode_to_file) else: decoding.decode_from_dataset( self._estimator, self._hparams.problem.name, self._hparams, self._decode_hparams, dataset_split=dataset_split, checkpoint_path=checkpoint_path)
Decodes from dataset or file.
def dispose(json_str): """Clear all comments in json_str. Clear JS-style comments like // and /**/ in json_str. Accept a str or unicode as input. Args: json_str: A json string of str or unicode to clean up comment Returns: str: The str without comments (or unicode if you pass in unicode) """ result_str = list(json_str) escaped = False normal = True sl_comment = False ml_comment = False quoted = False a_step_from_comment = False a_step_from_comment_away = False former_index = None for index, char in enumerate(json_str): if escaped: # We have just met a '\' escaped = False continue if a_step_from_comment: # We have just met a '/' if char != '/' and char != '*': a_step_from_comment = False normal = True continue if a_step_from_comment_away: # We have just met a '*' if char != '/': a_step_from_comment_away = False if char == '"': if normal and not escaped: # We are now in a string quoted = True normal = False elif quoted and not escaped: # We are now out of a string quoted = False normal = True elif char == '\\': # '\' should not take effect in comment if normal or quoted: escaped = True elif char == '/': if a_step_from_comment: # Now we are in single line comment a_step_from_comment = False sl_comment = True normal = False former_index = index - 1 elif a_step_from_comment_away: # Now we are out of comment a_step_from_comment_away = False normal = True ml_comment = False for i in range(former_index, index + 1): result_str[i] = "" elif normal: # Now we are just one step away from comment a_step_from_comment = True normal = False elif char == '*': if a_step_from_comment: # We are now in multi-line comment a_step_from_comment = False ml_comment = True normal = False former_index = index - 1 elif ml_comment: a_step_from_comment_away = True elif char == '\n': if sl_comment: sl_comment = False normal = True for i in range(former_index, index + 1): result_str[i] = "" elif char == ']' or char == '}': if normal: _remove_last_comma(result_str, index) # Show respect to original input if we are in python2 return ("" if isinstance(json_str, str) else u"").join(result_str)
Clear all comments in json_str. Clear JS-style comments like // and /**/ in json_str. Accept a str or unicode as input. Args: json_str: A json string of str or unicode to clean up comment Returns: str: The str without comments (or unicode if you pass in unicode)
def get_best_span(span_start_logits: torch.Tensor, span_end_logits: torch.Tensor) -> torch.Tensor: """ This acts the same as the static method ``BidirectionalAttentionFlow.get_best_span()`` in ``allennlp/models/reading_comprehension/bidaf.py``. We keep it here so that users can directly import this function without the class. We call the inputs "logits" - they could either be unnormalized logits or normalized log probabilities. A log_softmax operation is a constant shifting of the entire logit vector, so taking an argmax over either one gives the same result. """ if span_start_logits.dim() != 2 or span_end_logits.dim() != 2: raise ValueError("Input shapes must be (batch_size, passage_length)") batch_size, passage_length = span_start_logits.size() device = span_start_logits.device # (batch_size, passage_length, passage_length) span_log_probs = span_start_logits.unsqueeze(2) + span_end_logits.unsqueeze(1) # Only the upper triangle of the span matrix is valid; the lower triangle has entries where # the span ends before it starts. span_log_mask = torch.triu(torch.ones((passage_length, passage_length), device=device)).log() valid_span_log_probs = span_log_probs + span_log_mask # Here we take the span matrix and flatten it, then find the best span using argmax. We # can recover the start and end indices from this flattened list using simple modular # arithmetic. # (batch_size, passage_length * passage_length) best_spans = valid_span_log_probs.view(batch_size, -1).argmax(-1) span_start_indices = best_spans // passage_length span_end_indices = best_spans % passage_length return torch.stack([span_start_indices, span_end_indices], dim=-1)
This acts the same as the static method ``BidirectionalAttentionFlow.get_best_span()`` in ``allennlp/models/reading_comprehension/bidaf.py``. We keep it here so that users can directly import this function without the class. We call the inputs "logits" - they could either be unnormalized logits or normalized log probabilities. A log_softmax operation is a constant shifting of the entire logit vector, so taking an argmax over either one gives the same result.
def validateElement(self, ctxt, elem): """Try to validate the subtree under an element """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateElement(ctxt__o, self._o, elem__o) return ret
Try to validate the subtree under an element
async def create(gc: GroupControl, name, slaves): """Create new group""" click.echo("Creating group %s with slaves: %s" % (name, slaves)) click.echo(await gc.create(name, slaves))
Create new group
def main(argv=sys.argv, stream=sys.stderr): """Entry point for ``tappy`` command.""" args = parse_args(argv) suite = build_suite(args) runner = unittest.TextTestRunner(verbosity=args.verbose, stream=stream) result = runner.run(suite) return get_status(result)
Entry point for ``tappy`` command.
def find_next_candidate(self): """ Returns the next candidate Node for (potential) evaluation. The candidate list (really a stack) initially consists of all of the top-level (command line) targets provided when the Taskmaster was initialized. While we walk the DAG, visiting Nodes, all the children that haven't finished processing get pushed on to the candidate list. Each child can then be popped and examined in turn for whether *their* children are all up-to-date, in which case a Task will be created for their actual evaluation and potential building. Here is where we also allow candidate Nodes to alter the list of Nodes that should be examined. This is used, for example, when invoking SCons in a source directory. A source directory Node can return its corresponding build directory Node, essentially saying, "Hey, you really need to build this thing over here instead." """ try: return self.candidates.pop() except IndexError: pass try: node = self.top_targets_left.pop() except IndexError: return None self.current_top = node alt, message = node.alter_targets() if alt: self.message = message self.candidates.append(node) self.candidates.extend(self.order(alt)) node = self.candidates.pop() return node
Returns the next candidate Node for (potential) evaluation. The candidate list (really a stack) initially consists of all of the top-level (command line) targets provided when the Taskmaster was initialized. While we walk the DAG, visiting Nodes, all the children that haven't finished processing get pushed on to the candidate list. Each child can then be popped and examined in turn for whether *their* children are all up-to-date, in which case a Task will be created for their actual evaluation and potential building. Here is where we also allow candidate Nodes to alter the list of Nodes that should be examined. This is used, for example, when invoking SCons in a source directory. A source directory Node can return its corresponding build directory Node, essentially saying, "Hey, you really need to build this thing over here instead."
def reflect_filter(sources, model, cache=None): '''Returns the list of reflections of objects in the `source` list to other class. Objects that are not found in target table are silently discarded. ''' targets = [reflect(source, model, cache=cache) for source in sources] # Some objects may not be available in target DB (not published), so we # have to exclude None from the list. return [target for target in targets if target is not None]
Returns the list of reflections of objects in the `source` list to other class. Objects that are not found in target table are silently discarded.
def upload_headimg(self, account, media_file): """ 上传客服账号头像 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号 :param media_file: 要上传的头像文件,一个 File-Object :return: 返回的 JSON 数据包 """ return self._post( 'https://api.weixin.qq.com/customservice/kfaccount/uploadheadimg', params={ 'kf_account': account }, files={ 'media': media_file } )
上传客服账号头像 详情请参考 http://mp.weixin.qq.com/wiki/1/70a29afed17f56d537c833f89be979c9.html :param account: 完整客服账号 :param media_file: 要上传的头像文件,一个 File-Object :return: 返回的 JSON 数据包
def replicate_global_dbs(cloud_url=None, local_url=None): """ Set up replication of the global databases from the cloud server to the local server. :param str cloud_url: Used to override the cloud url from the global configuration in case the calling function is in the process of initializing the cloud server :param str local_url: Used to override the local url from the global configuration in case the calling function is in the process of initializing the local server """ local_url = local_url or config["local_server"]["url"] cloud_url = cloud_url or config["cloud_server"]["url"] server = Server(local_url) for db_name in global_dbs: server.replicate( db_name, urljoin(cloud_url, db_name), db_name, continuous=True, )
Set up replication of the global databases from the cloud server to the local server. :param str cloud_url: Used to override the cloud url from the global configuration in case the calling function is in the process of initializing the cloud server :param str local_url: Used to override the local url from the global configuration in case the calling function is in the process of initializing the local server
def frozen_stats_from_tree(tree): """Restores a statistics from the given flat members tree. :func:`make_frozen_stats_tree` makes a tree for this function. """ if not tree: raise ValueError('Empty tree') stats_index = [] for parent_offset, members in tree: stats = FrozenStatistics(*members) stats_index.append(stats) if parent_offset is not None: stats_index[parent_offset].children.append(stats) return stats_index[0]
Restores a statistics from the given flat members tree. :func:`make_frozen_stats_tree` makes a tree for this function.
def lock(self, session, lock_type, timeout, requested_key=None): """Establishes an access mode to the specified resources. Corresponds to viLock function of the VISA library. :param session: Unique logical identifier to a session. :param lock_type: Specifies the type of lock requested, either Constants.EXCLUSIVE_LOCK or Constants.SHARED_LOCK. :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. :param requested_key: This parameter is not used and should be set to VI_NULL when lockType is VI_EXCLUSIVE_LOCK. :return: access_key that can then be passed to other sessions to share the lock, return value of the library call. :rtype: str, :class:`pyvisa.constants.StatusCode` """ try: sess = self.sessions[session] except KeyError: return StatusCode.error_invalid_object return sess.lock(lock_type, timeout, requested_key)
Establishes an access mode to the specified resources. Corresponds to viLock function of the VISA library. :param session: Unique logical identifier to a session. :param lock_type: Specifies the type of lock requested, either Constants.EXCLUSIVE_LOCK or Constants.SHARED_LOCK. :param timeout: Absolute time period (in milliseconds) that a resource waits to get unlocked by the locking session before returning an error. :param requested_key: This parameter is not used and should be set to VI_NULL when lockType is VI_EXCLUSIVE_LOCK. :return: access_key that can then be passed to other sessions to share the lock, return value of the library call. :rtype: str, :class:`pyvisa.constants.StatusCode`
def to_phalf_from_pfull(arr, val_toa=0, val_sfc=0): """Compute data at half pressure levels from values at full levels. Could be the pressure array itself, but it could also be any other data defined at pressure levels. Requires specification of values at surface and top of atmosphere. """ phalf = np.zeros((arr.shape[0] + 1, arr.shape[1], arr.shape[2])) phalf[0] = val_toa phalf[-1] = val_sfc phalf[1:-1] = 0.5*(arr[:-1] + arr[1:]) return phalf
Compute data at half pressure levels from values at full levels. Could be the pressure array itself, but it could also be any other data defined at pressure levels. Requires specification of values at surface and top of atmosphere.
def list_running_zones(self): """ Returns the currently active relay. :returns: Returns the running relay number or None if no relays are active. :rtype: string """ self.update_controller_info() if self.running is None or not self.running: return None return int(self.running[0]['relay'])
Returns the currently active relay. :returns: Returns the running relay number or None if no relays are active. :rtype: string
def editors(self, value): """Update editors. DEPRECATED: use ``policy["roles/editors"] = value`` instead.""" warnings.warn( _ASSIGNMENT_DEPRECATED_MSG.format("editors", EDITOR_ROLE), DeprecationWarning, ) self[EDITOR_ROLE] = value
Update editors. DEPRECATED: use ``policy["roles/editors"] = value`` instead.
def unnest(c, elem, ignore_whitespace=False): """unnest the element from its parent within doc. MUTABLE CHANGES""" parent = elem.getparent() gparent = parent.getparent() index = parent.index(elem) # put everything up to elem into a new parent element right before the current parent preparent = etree.Element(parent.tag) preparent.text, parent.text = (parent.text or ''), '' for k in parent.attrib.keys(): preparent.set(k, parent.get(k)) if index > 0: for ch in parent.getchildren()[:index]: preparent.append(ch) gparent.insert(gparent.index(parent), preparent) XML.remove_if_empty(preparent, leave_tail=True, ignore_whitespace=ignore_whitespace) # put the element right before the current parent XML.remove(elem, leave_tail=True) gparent.insert(gparent.index(parent), elem) elem.tail = '' # if the original parent is empty, remove it XML.remove_if_empty(parent, leave_tail=True, ignore_whitespace=ignore_whitespace)
unnest the element from its parent within doc. MUTABLE CHANGES
def create_venv_with_package(packages): """Create a venv with these packages in a temp dir and yielf the env. packages should be an iterable of pip version instructio (e.g. package~=1.2.3) """ with tempfile.TemporaryDirectory() as tempdir: myenv = create(tempdir, with_pip=True) pip_call = [ myenv.env_exe, "-m", "pip", "install", ] subprocess.check_call(pip_call + ['-U', 'pip']) if packages: subprocess.check_call(pip_call + packages) yield myenv
Create a venv with these packages in a temp dir and yielf the env. packages should be an iterable of pip version instructio (e.g. package~=1.2.3)
def _get_parent(root): """Returns root element for a list. :Args: root (Element): lxml element of current location :Returns: lxml element representing list """ elem = root while True: elem = elem.getparent() if elem.tag in ['ul', 'ol']: return elem
Returns root element for a list. :Args: root (Element): lxml element of current location :Returns: lxml element representing list
def _encrypt(self, dec, password=None): """ Internal encryption function Uses either the password argument for the encryption, or, if not supplied, the password field of the object :param dec: a byte string representing the to be encrypted data :rtype: bytes """ if AES is None: raise ImportError("PyCrypto required") if password is None: password = self.password if password is None: raise ValueError( "Password need to be provided to create encrypted archives") # generate the different encryption parts (non-secure!) master_key = Random.get_random_bytes(32) master_salt = Random.get_random_bytes(64) user_salt = Random.get_random_bytes(64) master_iv = Random.get_random_bytes(16) user_iv = Random.get_random_bytes(16) rounds = 10000 # create the PKCS#7 padding l = len(dec) pad = 16 - (l % 16) dec += bytes([pad] * pad) # encrypt the data cipher = AES.new(master_key, IV=master_iv, mode=AES.MODE_CBC) enc = cipher.encrypt(dec) # generate the master key checksum master_ck = PBKDF2(self.encode_utf8(master_key), master_salt, dkLen=256//8, count=rounds) # generate the user key from the given password user_key = PBKDF2(password, user_salt, dkLen=256//8, count=rounds) # encrypt the master key and iv master_dec = b"\x10" + master_iv + b"\x20" + master_key + b"\x20" + master_ck l = len(master_dec) pad = 16 - (l % 16) master_dec += bytes([pad] * pad) cipher = AES.new(user_key, IV=user_iv, mode=AES.MODE_CBC) master_enc = cipher.encrypt(master_dec) # put everything together enc = binascii.b2a_hex(user_salt).upper() + b"\n" + \ binascii.b2a_hex(master_salt).upper() + b"\n" + \ str(rounds).encode() + b"\n" + \ binascii.b2a_hex(user_iv).upper() + b"\n" + \ binascii.b2a_hex(master_enc).upper() + b"\n" + enc return enc
Internal encryption function Uses either the password argument for the encryption, or, if not supplied, the password field of the object :param dec: a byte string representing the to be encrypted data :rtype: bytes
def _rdumpq(q,size,value,encoding=None): """Dump value as a tnetstring, to a deque instance, last chunks first. This function generates the tnetstring representation of the given value, pushing chunks of the output onto the given deque instance. It pushes the last chunk first, then recursively generates more chunks. When passed in the current size of the string in the queue, it will return the new size of the string in the queue. Operating last-chunk-first makes it easy to calculate the size written for recursive structures without having to build their representation as a string. This is measurably faster than generating the intermediate strings, especially on deeply nested structures. """ write = q.appendleft if value is None: write("0:~") return size + 3 if value is True: write("4:true!") return size + 7 if value is False: write("5:false!") return size + 8 if isinstance(value,(int,long)): data = str(value) ldata = len(data) span = str(ldata) write("#") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,(float,)): # Use repr() for float rather than str(). # It round-trips more accurately. # Probably unnecessary in later python versions that # use David Gay's ftoa routines. data = repr(value) ldata = len(data) span = str(ldata) write("^") write(data) write(":") write(span) return size + 2 + len(span) + ldata if isinstance(value,str): lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue if isinstance(value,(list,tuple,)): write("]") init_size = size = size + 1 for item in reversed(value): size = _rdumpq(q,size,item,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,dict): write("}") init_size = size = size + 1 for (k,v) in value.iteritems(): size = _rdumpq(q,size,v,encoding) size = _rdumpq(q,size,k,encoding) span = str(size - init_size) write(":") write(span) return size + 1 + len(span) if isinstance(value,unicode): if encoding is None: raise ValueError("must specify encoding to dump unicode strings") value = value.encode(encoding) lvalue = len(value) span = str(lvalue) write(",") write(value) write(":") write(span) return size + 2 + len(span) + lvalue raise ValueError("unserializable object")
Dump value as a tnetstring, to a deque instance, last chunks first. This function generates the tnetstring representation of the given value, pushing chunks of the output onto the given deque instance. It pushes the last chunk first, then recursively generates more chunks. When passed in the current size of the string in the queue, it will return the new size of the string in the queue. Operating last-chunk-first makes it easy to calculate the size written for recursive structures without having to build their representation as a string. This is measurably faster than generating the intermediate strings, especially on deeply nested structures.
def get_channel_groups(self, channel_ids): '''This function returns the group of each channel specifed by channel_ids Parameters ---------- channel_ids: array_like The channel ids (ints) for which the groups will be returned Returns ---------- groups: array_like Returns a list of corresonding groups (ints) for the given channel_ids ''' groups = [] for channel_id in channel_ids: group = self.get_channel_property(channel_id, 'group') groups.append(group) return groups
This function returns the group of each channel specifed by channel_ids Parameters ---------- channel_ids: array_like The channel ids (ints) for which the groups will be returned Returns ---------- groups: array_like Returns a list of corresonding groups (ints) for the given channel_ids
def has_descriptor(self, descriptor): """ Return ``True`` if the character has the given descriptor. :param IPADescriptor descriptor: the descriptor to be checked against :rtype: bool """ for p in self.descriptors: if p in descriptor: return True return False
Return ``True`` if the character has the given descriptor. :param IPADescriptor descriptor: the descriptor to be checked against :rtype: bool
def makeFrequencyPanel(allFreqs, patientName): """ For a title, make a graph showing the frequencies. @param allFreqs: result from getCompleteFreqs @param patientName: A C{str}, title for the panel """ titles = sorted( iter(allFreqs.keys()), key=lambda title: (allFreqs[title]['bitScoreMax'], title)) origMaxY = 0 cols = 6 rows = len(allFreqs) figure, ax = plt.subplots(rows, cols, squeeze=False) substitutions = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G'] colors = ['blue', 'black', 'red', 'yellow', 'green', 'orange'] for i, title in enumerate(titles): for index in range(6): for subst in allFreqs[str(title)]: substitution = substitutions[index] print(i, index, title, 'substitution', substitutions[index]) if substitution[0] == 'C': pattern = 'cPattern' else: pattern = 'tPattern' maxY = makeFrequencyGraph(allFreqs, title, substitution, pattern, color=colors[index], createFigure=False, showFigure=False, readsAx=ax[i][index]) if maxY > origMaxY: origMaxY = maxY # add title for individual plot. # if used for other viruses, this will have to be adapted. if index == 0: gi = title.split('|')[1] titles = title.split(' ') try: typeIndex = titles.index('type') except ValueError: typeNumber = 'gi: %s' % gi else: typeNumber = titles[typeIndex + 1] ax[i][index].set_ylabel(('Type %s \n maxBitScore: %s' % ( typeNumber, allFreqs[title]['bitScoreMax'])), fontsize=10) # add xAxis tick labels if i == 0: ax[i][index].set_title(substitution, fontsize=13) if i == len(allFreqs) - 1 or i == (len(allFreqs) - 1) / 2: if index < 3: pat = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT'] else: pat = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT'] ax[i][index].set_xticklabels(pat, rotation=45, fontsize=8) # make Y-axis equal for i, title in enumerate(allFreqs): for index in range(6): a = ax[i][index] a.set_ylim([0, origMaxY]) # add title of whole panel figure.suptitle('Mutation Signatures in %s' % patientName, fontsize=20) figure.set_size_inches(5 * cols, 3 * rows, forward=True) figure.show() return allFreqs
For a title, make a graph showing the frequencies. @param allFreqs: result from getCompleteFreqs @param patientName: A C{str}, title for the panel
def find_service_by_type(self, service_type): """ Get service for a given service type. :param service_type: Service type, ServiceType :return: Service """ for service in self._services: if service_type == service.type: return service return None
Get service for a given service type. :param service_type: Service type, ServiceType :return: Service