code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def gumbel_softmax_discrete_bottleneck(x, bottleneck_bits, beta=0.25, decay=0.999, epsilon=1e-5, temperature_warmup_steps=150000, hard=False, summary=True): """VQ-VAE using Gumbel-Softmax. Different from `gumbel_softmax()` function as this function calculates the KL by using the discrete entropy instead of taking the argmax, and it also uses an exponential moving average to update the codebook while the `gumbel_softmax()` function includes no codebook update. Args: x: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook, whose squared difference is used as the Gumbel-Softmax logits. bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`. beta: Beta factor for commitment loss (Default: 0.25). decay: Decay factor for exponential moving average (Default: 0.999). epsilon: Small value to avoid dividing by zero in EMA update (Default: 1e-5). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). hard: When `True`, we use hard Gumbel-Softmax samples and force discrete latents by taking the argmax. When `False`, we use soft samples, which we treat as codebook weights (Default: False). summary: When `True`, we save histogram summaries of the KL term (Default: True). Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments. When `hard == True`, this is one-hot, containing the arg-max of the Gumbel-Softmax samples (and we use the straightthrough gradient). Otherwise, it contains the Gumbel-Softmax samples exactly, which are values from the `(K-1)`-simplex where `K` is the bottleneck size. loss: The loss, which is the sum of the KL between the Gumbel-Softmax and the uniform prior and the commitment loss multiplied by the beta factor. We approximate the KL by using the entropy of a categorical distribution instead of the Gumbel Softmax. """ bottleneck_size = 2**bottleneck_bits x_shape = common_layers.shape_list(x) hidden_size = x_shape[-1] means, ema_means, ema_count = get_vq_codebook(bottleneck_size, hidden_size) x = tf.reshape(x, [-1, hidden_size]) bottleneck_size = common_layers.shape_list(means)[0] x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) scalar_prod = tf.matmul(x, means, transpose_b=True) dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod class_probs = tf.nn.softmax(dist) log_class_probs = tf.nn.log_softmax(dist) gumbel_samples = gumbel_sample(common_layers.shape_list(dist)) steps = temperature_warmup_steps gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 temperature = 1.2 - common_layers.inverse_lin_decay(steps) # 10% of the time keep reasonably high temperature to keep learning. temperature = tf.cond( tf.less(tf.random_uniform([]), 0.9), lambda: temperature, lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) gumbel_softmax_samples = tf.nn.softmax( (log_class_probs + gumbel_samples) / temperature) # Calculate KL between q and a uniform prior. kl = tf.reduce_sum( class_probs * (log_class_probs - tf.log(1.0 / bottleneck_size)), -1) if summary: tf.summary.histogram("KL", tf.reshape(kl, [-1])) # Straight-through gradient estimation when we're using hard assignments. if hard: x_means_idx = tf.reshape(tf.argmax(gumbel_softmax_samples, axis=-1), [-1]) x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) x_means_assignments = gumbel_softmax_samples + tf.stop_gradient( x_means_hot - gumbel_softmax_samples) else: x_means_assignments = gumbel_softmax_samples x_means_assignments_flat = tf.reshape(x_means_assignments, [-1, bottleneck_size]) x_means = tf.matmul(x_means_assignments_flat, means) commitment_loss = tf.reduce_mean( tf.squared_difference(x, tf.stop_gradient(x_means))) # Update the ema variables. updated_ema_count = moving_averages.assign_moving_average( ema_count, tf.reduce_sum( tf.reshape(x_means_assignments, shape=[-1, bottleneck_size]), axis=0), decay, zero_debias=False) dw = tf.matmul(x_means_assignments, x, transpose_a=True) updated_ema_means = tf.identity( moving_averages.assign_moving_average( ema_means, dw, decay, zero_debias=False)) n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) updated_ema_count = ( (updated_ema_count + epsilon) / (n + bottleneck_size * epsilon) * n) updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) with tf.control_dependencies([commitment_loss]): update_means = means.assign(updated_ema_means) with tf.control_dependencies([update_means]): loss = beta * commitment_loss # Add KL loss. loss += tf.reduce_mean(kl) x_means_assignments = tf.reshape(x_means_assignments, x_shape[:-1] + [bottleneck_size]) return x_means_assignments, loss
VQ-VAE using Gumbel-Softmax. Different from `gumbel_softmax()` function as this function calculates the KL by using the discrete entropy instead of taking the argmax, and it also uses an exponential moving average to update the codebook while the `gumbel_softmax()` function includes no codebook update. Args: x: A `float`-like `Tensor` containing the latent vectors to be compared to the codebook, whose squared difference is used as the Gumbel-Softmax logits. bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`. beta: Beta factor for commitment loss (Default: 0.25). decay: Decay factor for exponential moving average (Default: 0.999). epsilon: Small value to avoid dividing by zero in EMA update (Default: 1e-5). temperature_warmup_steps: Number of steps it takes to decay temperature to 0 (Default: 150000). hard: When `True`, we use hard Gumbel-Softmax samples and force discrete latents by taking the argmax. When `False`, we use soft samples, which we treat as codebook weights (Default: False). summary: When `True`, we save histogram summaries of the KL term (Default: True). Returns: x_means_assignments: A `float`-like `Tensor` containing the codebook assignments. When `hard == True`, this is one-hot, containing the arg-max of the Gumbel-Softmax samples (and we use the straightthrough gradient). Otherwise, it contains the Gumbel-Softmax samples exactly, which are values from the `(K-1)`-simplex where `K` is the bottleneck size. loss: The loss, which is the sum of the KL between the Gumbel-Softmax and the uniform prior and the commitment loss multiplied by the beta factor. We approximate the KL by using the entropy of a categorical distribution instead of the Gumbel Softmax.
def plot_vxx(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): """ Plot the Vxx component of the tensor. Usage ----- x.plot_vxx([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = False If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{xx}$' Text label for the colorbar.. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if cb_label is None: cb_label = self._vxx_label if ax is None: fig, axes = self.vxx.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.vxx.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the Vxx component of the tensor. Usage ----- x.plot_vxx([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = False If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{xx}$' Text label for the colorbar.. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
def generate_cloudformation_args(stack_name, parameters, tags, template, capabilities=DEFAULT_CAPABILITIES, change_set_type=None, service_role=None, stack_policy=None, change_set_name=None): """Used to generate the args for common cloudformation API interactions. This is used for create_stack/update_stack/create_change_set calls in cloudformation. Args: stack_name (str): The fully qualified stack name in Cloudformation. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. template (:class:`stacker.provider.base.Template`): The template object. capabilities (list, optional): A list of capabilities to use when updating Cloudformation. change_set_type (str, optional): An optional change set type to use with create_change_set. service_role (str, optional): An optional service role to use when interacting with Cloudformation. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. change_set_name (str, optional): An optional change set name to use with create_change_set. Returns: dict: A dictionary of arguments to be used in the Cloudformation API call. """ args = { "StackName": stack_name, "Parameters": parameters, "Tags": tags, "Capabilities": capabilities, } if service_role: args["RoleARN"] = service_role if change_set_name: args["ChangeSetName"] = change_set_name if change_set_type: args["ChangeSetType"] = change_set_type if template.url: args["TemplateURL"] = template.url else: args["TemplateBody"] = template.body # When creating args for CreateChangeSet, don't include the stack policy, # since ChangeSets don't support it. if not change_set_name: args.update(generate_stack_policy_args(stack_policy)) return args
Used to generate the args for common cloudformation API interactions. This is used for create_stack/update_stack/create_change_set calls in cloudformation. Args: stack_name (str): The fully qualified stack name in Cloudformation. parameters (list): A list of dictionaries that defines the parameter list to be applied to the Cloudformation stack. tags (list): A list of dictionaries that defines the tags that should be applied to the Cloudformation stack. template (:class:`stacker.provider.base.Template`): The template object. capabilities (list, optional): A list of capabilities to use when updating Cloudformation. change_set_type (str, optional): An optional change set type to use with create_change_set. service_role (str, optional): An optional service role to use when interacting with Cloudformation. stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy. change_set_name (str, optional): An optional change set name to use with create_change_set. Returns: dict: A dictionary of arguments to be used in the Cloudformation API call.
def equal_distribution_folds(y, folds=2): """Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions. """ n, classes = y.shape # Compute sample distribution over classes dist = y.sum(axis=0).astype('float') dist /= dist.sum() index_list = [] fold_dist = np.zeros((folds, classes), dtype='float') for _ in range(folds): index_list.append([]) for i in range(n): if i < folds: target_fold = i else: normed_folds = fold_dist.T / fold_dist.sum(axis=1) how_off = normed_folds.T - dist target_fold = np.argmin( np.dot((y[i] - .5).reshape(1, -1), how_off.T)) fold_dist[target_fold] += y[i] index_list[target_fold].append(i) logger.debug("Fold distributions:") logger.debug(fold_dist) return index_list
Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions.
def disconnect(self): """ This method disconnects an IOM session to allow for reconnecting when switching networks See the Advanced topics section of the doc for details """ if self.sascfg.mode != 'IOM': res = "This method is only available with the IOM access method" else: res = self._io.disconnect() return res
This method disconnects an IOM session to allow for reconnecting when switching networks See the Advanced topics section of the doc for details
def chown(hdfs_path, user=None, group=None, hdfs_user=None): """ See :meth:`fs.hdfs.chown`. """ user = user or '' group = group or '' host, port, path_ = path.split(hdfs_path, hdfs_user) with hdfs(host, port, hdfs_user) as fs: return fs.chown(path_, user=user, group=group)
See :meth:`fs.hdfs.chown`.
def delete(self, cls, rid, user='undefined'): """ Delete a record by id. `user` currently unused. Would be used with soft deletes. >>> s = teststore() >>> s.create('tstoretest', {'id': '1', 'name': 'Toto'}) >>> len(s.list('tstoretest')) 1 >>> s.delete('tstoretest', '1') >>> len(s.list('tstoretest')) 0 >>> s.delete('tstoretest', '1') Traceback (most recent call last): ... KeyError: 'No record tstoretest/1' """ self.validate_record_type(cls) deletedcount = self.db.delete(cls, {ID: rid}) if deletedcount < 1: raise KeyError('No record {}/{}'.format(cls, rid))
Delete a record by id. `user` currently unused. Would be used with soft deletes. >>> s = teststore() >>> s.create('tstoretest', {'id': '1', 'name': 'Toto'}) >>> len(s.list('tstoretest')) 1 >>> s.delete('tstoretest', '1') >>> len(s.list('tstoretest')) 0 >>> s.delete('tstoretest', '1') Traceback (most recent call last): ... KeyError: 'No record tstoretest/1'
def get_matching_multiplex_port(self,name): """ Given a name, figure out if a multiplex port prefixes this name and return it. Otherwise return none. """ # short circuit: if the attribute name already exists return none # if name in self._portnames: return None # if not len([p for p in self._portnames if name.startswith(p) and name != p]): return None matching_multiplex_ports = [self.__getattribute__(p) for p in self._portnames if name.startswith(p) and name != p and hasattr(self, p) and self.__getattribute__(p).is_multiplex ] for port in matching_multiplex_ports: return port return None
Given a name, figure out if a multiplex port prefixes this name and return it. Otherwise return none.
def exists(self, value=None): """ Return True if the given pk value exists for the given class. If no value is given, we use the value of the current field, which is the value of the "_pk" attribute of its instance. """ try: if not value: value = self.get() except (AttributeError, DoesNotExist): # If the instance is deleted, the _pk attribute doesn't exist # anymore. So we catch the AttributeError to return False (this pk # field doesn't exist anymore) in this specific case return False else: return self.connection.sismember(self.collection_key, value)
Return True if the given pk value exists for the given class. If no value is given, we use the value of the current field, which is the value of the "_pk" attribute of its instance.
def _osquery_cmd(table, attrs=None, where=None, format='json'): ''' Helper function to run osquery queries ''' ret = { 'result': True, } if attrs: if isinstance(attrs, list): valid_attrs = _table_attrs(table) if valid_attrs: for a in attrs: if a not in valid_attrs: ret['result'] = False ret['comment'] = '{0} is not a valid attribute for table {1}'.format(a, table) return ret _attrs = ','.join(attrs) else: ret['result'] = False ret['comment'] = 'Invalid table {0}.'.format(table) return ret else: ret['comment'] = 'attrs must be specified as a list.' ret['result'] = False return ret else: _attrs = '*' sql = 'select {0} from {1}'.format(_attrs, table) if where: sql = '{0} where {1}'.format(sql, where) sql = '{0};'.format(sql) res = _osquery(sql) if res['result']: ret['data'] = res['data'] else: ret['comment'] = res['error'] return ret
Helper function to run osquery queries
def toProtocolElement(self): """ Returns the representation of this ContinuousSet as the corresponding ProtocolElement. """ gaContinuousSet = protocol.ContinuousSet() gaContinuousSet.id = self.getId() gaContinuousSet.dataset_id = self.getParentContainer().getId() gaContinuousSet.reference_set_id = pb.string( self._referenceSet.getId()) gaContinuousSet.name = self._name gaContinuousSet.source_uri = self._sourceUri attributes = self.getAttributes() for key in attributes: gaContinuousSet.attributes.attr[key] \ .values.extend(protocol.encodeValue(attributes[key])) return gaContinuousSet
Returns the representation of this ContinuousSet as the corresponding ProtocolElement.
def entropy(s): """Calculate the Entropy Impurity for a list of samples. """ return -sum( p*np.log(p) for i in range(len(s)) for p in [prop(s[i], s)] )
Calculate the Entropy Impurity for a list of samples.
def _plot_data_to_ax( data_all, ax1, e_unit=None, sed=True, ylabel=None, ulim_opts={}, errorbar_opts={}, ): """ Plots data errorbars and upper limits onto ax. X label is left to plot_data and plot_fit because they depend on whether residuals are plotted. """ if e_unit is None: e_unit = data_all["energy"].unit f_unit, sedf = sed_conversion( data_all["energy"], data_all["flux"].unit, sed ) if "group" not in data_all.keys(): data_all["group"] = np.zeros(len(data_all)) groups = np.unique(data_all["group"]) for g in groups: data = data_all[np.where(data_all["group"] == g)] _, sedfg = sed_conversion(data["energy"], data["flux"].unit, sed) # wrap around color and marker cycles color = color_cycle[int(g) % len(color_cycle)] marker = marker_cycle[int(g) % len(marker_cycle)] ul = data["ul"] notul = ~ul # Hack to show y errors compatible with 0 in loglog plot yerr_lo = data["flux_error_lo"][notul] y = data["flux"][notul].to(yerr_lo.unit) bad_err = np.where((y - yerr_lo) <= 0.0) yerr_lo[bad_err] = y[bad_err] * (1.0 - 1e-7) yerr = u.Quantity((yerr_lo, data["flux_error_hi"][notul])) xerr = u.Quantity((data["energy_error_lo"], data["energy_error_hi"])) opts = dict( zorder=100, marker=marker, ls="", elinewidth=2, capsize=0, mec=color, mew=0.1, ms=5, color=color, ) opts.update(**errorbar_opts) ax1.errorbar( data["energy"][notul].to(e_unit).value, (data["flux"][notul] * sedfg[notul]).to(f_unit).value, yerr=(yerr * sedfg[notul]).to(f_unit).value, xerr=xerr[:, notul].to(e_unit).value, **opts ) if np.any(ul): if "elinewidth" in errorbar_opts: ulim_opts["elinewidth"] = errorbar_opts["elinewidth"] _plot_ulims( ax1, data["energy"][ul].to(e_unit).value, (data["flux"][ul] * sedfg[ul]).to(f_unit).value, (xerr[:, ul]).to(e_unit).value, color, **ulim_opts ) ax1.set_xscale("log") ax1.set_yscale("log") xmin = 10 ** np.floor( np.log10( np.min(data["energy"] - data["energy_error_lo"]).to(e_unit).value ) ) xmax = 10 ** np.ceil( np.log10( np.max(data["energy"] + data["energy_error_hi"]).to(e_unit).value ) ) ax1.set_xlim(xmin, xmax) # avoid autoscaling to errorbars to 0 notul = ~data_all["ul"] if np.any(data_all["flux_error_lo"][notul] >= data_all["flux"][notul]): elo = (data_all["flux"][notul] * sedf[notul]).to(f_unit).value - ( data_all["flux_error_lo"][notul] * sedf[notul] ).to(f_unit).value gooderr = np.where( data_all["flux_error_lo"][notul] < data_all["flux"][notul] ) ymin = 10 ** np.floor(np.log10(np.min(elo[gooderr]))) ax1.set_ylim(bottom=ymin) if ylabel is None: if sed: ax1.set_ylabel( r"$E^2\mathrm{{d}}N/\mathrm{{d}}E$" " [{0}]".format(u.Unit(f_unit).to_string("latex_inline")) ) else: ax1.set_ylabel( r"$\mathrm{{d}}N/\mathrm{{d}}E$" " [{0}]".format(u.Unit(f_unit).to_string("latex_inline")) ) else: ax1.set_ylabel(ylabel)
Plots data errorbars and upper limits onto ax. X label is left to plot_data and plot_fit because they depend on whether residuals are plotted.
def to_python(self, value): """ B64decode and unpickle the object, optionally decompressing it. If an error is raised in de-pickling and we're sure the value is a definite pickle, the error is allowed to propogate. If we aren't sure if the value is a pickle or not, then we catch the error and return the original value instead. """ if value is not None: try: value = dbsafe_decode(value, self.compress) except: # If the value is a definite pickle; and an error is raised in # de-pickling it should be allowed to propogate. if isinstance(value, PickledObject): raise return value
B64decode and unpickle the object, optionally decompressing it. If an error is raised in de-pickling and we're sure the value is a definite pickle, the error is allowed to propogate. If we aren't sure if the value is a pickle or not, then we catch the error and return the original value instead.
def obj2unicode(obj): """Return a unicode representation of a python object """ if isinstance(obj, unicode_type): return obj elif isinstance(obj, bytes_type): try: return unicode_type(obj, 'utf-8') except UnicodeDecodeError as strerror: sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror)) return unicode_type(obj, 'utf-8', 'replace') else: return unicode_type(obj)
Return a unicode representation of a python object
def adjoint(self): """Return the adjoint operator. The laplacian is self-adjoint, so this returns ``self``. """ return Laplacian(self.range, self.domain, pad_mode=self.pad_mode, pad_const=0)
Return the adjoint operator. The laplacian is self-adjoint, so this returns ``self``.
def update_handler(Model, name=None, **kwds): """ This factory returns an action handler that updates a new instance of the specified model when a update action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to update when the action received. Returns: function(type, payload): The action handler for this model """ async def action_handler(service, action_type, payload, props, notify=True, **kwds): # if the payload represents a new instance of `Model` if action_type == get_crud_action('update', name or Model): try: # the props of the message message_props = {} # if there was a correlation id in the request if 'correlation_id' in props: # make sure it ends up in the reply message_props['correlation_id'] = props['correlation_id'] # grab the nam eof the primary key for the model pk_field = Model.primary_key() # make sure there is a primary key to id the model if not pk_field.name in payload: # yell loudly raise ValueError("Must specify the pk of the model when updating") # grab the matching model model = Model.select().where(pk_field == payload[pk_field.name]).get() # remove the key from the payload payload.pop(pk_field.name, None) # for every key,value pair for key, value in payload.items(): # TODO: add protection for certain fields from being # changed by the api setattr(model, key, value) # save the updates model.save() # if we need to tell someone about what happened if notify: # publish the scucess event await service.event_broker.send( payload=ModelSerializer().serialize(model), action_type=change_action_status(action_type, success_status()), **message_props ) # if something goes wrong except Exception as err: # if we need to tell someone about what happened if notify: # publish the error as an event await service.event_broker.send( payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props ) # otherwise we aren't supposed to notify else: # raise the exception normally raise err # return the handler return action_handler
This factory returns an action handler that updates a new instance of the specified model when a update action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to update when the action received. Returns: function(type, payload): The action handler for this model
def check(self, dsm, **kwargs): """ Check layered architecture. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if layered architecture else False, messages """ layered_architecture = True messages = [] categories = dsm.categories dsm_size = dsm.size[0] if not categories: categories = ['appmodule'] * dsm_size for i in range(0, dsm_size - 1): for j in range(i + 1, dsm_size): if (categories[i] != 'broker' and categories[j] != 'broker' and dsm.entities[i].split('.')[0] != dsm.entities[j].split('.')[0]): # noqa if dsm.data[i][j] > 0: layered_architecture = False messages.append( 'Dependency from %s to %s breaks the ' 'layered architecture.' % ( dsm.entities[i], dsm.entities[j])) return layered_architecture, '\n'.join(messages)
Check layered architecture. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. Returns: bool, str: True if layered architecture else False, messages
def _call_command(self, name, *args, **kwargs): """ If a command is called for the main field, without dynamic part, an ImplementationError is raised: commands can only be applied on dynamic versions. On dynamic versions, if the command is a modifier, we add the version in the inventory. """ if self.dynamic_version_of is None: raise ImplementationError('The main version of a dynamic field cannot accept commands') try: result = super(DynamicFieldMixin, self)._call_command(name, *args, **kwargs) except: raise else: if name in self.available_modifiers and name not in ('delete', 'hdel'): self._inventory.sadd(self.dynamic_part) return result
If a command is called for the main field, without dynamic part, an ImplementationError is raised: commands can only be applied on dynamic versions. On dynamic versions, if the command is a modifier, we add the version in the inventory.
def render(gpg_data, saltenv='base', sls='', argline='', **kwargs): ''' Create a gpg object given a gpg_keydir, and then use it to try to decrypt the data to be rendered. ''' if not _get_gpg_exec(): raise SaltRenderError('GPG unavailable') log.debug('Reading GPG keys from: %s', _get_key_dir()) translate_newlines = kwargs.get('translate_newlines', False) return _decrypt_object(gpg_data, translate_newlines=translate_newlines)
Create a gpg object given a gpg_keydir, and then use it to try to decrypt the data to be rendered.
def flush(self): """ This only needs to be called manually from unit tests """ self.logger.debug('Flush joining') self.queue.join() self.logger.debug('Flush joining ready')
This only needs to be called manually from unit tests
def doc_inherit(parent, style="parent"): """ Returns a function/method decorator that, given `parent`, updates the docstring of the decorated function/method based on the specified style and the corresponding attribute of `parent`. Parameters ---------- parent : Union[str, Any] The docstring, or object of which the docstring is utilized as the parent docstring during the docstring merge. style : Union[Any, Callable[[str, str], str]], optional (default: "parent") A valid inheritance-scheme style ID or function that merges two docstrings. Returns ------- custom_inherit.DocInheritDecorator Notes ----- `doc_inherit` should always be used as the inner-most decorator when being used in conjunction with other decorators, such as `@property`, `@staticmethod`, etc.""" merge_func = store[style] decorator = _DocInheritDecorator decorator.doc_merger = staticmethod(merge_func) return decorator(parent)
Returns a function/method decorator that, given `parent`, updates the docstring of the decorated function/method based on the specified style and the corresponding attribute of `parent`. Parameters ---------- parent : Union[str, Any] The docstring, or object of which the docstring is utilized as the parent docstring during the docstring merge. style : Union[Any, Callable[[str, str], str]], optional (default: "parent") A valid inheritance-scheme style ID or function that merges two docstrings. Returns ------- custom_inherit.DocInheritDecorator Notes ----- `doc_inherit` should always be used as the inner-most decorator when being used in conjunction with other decorators, such as `@property`, `@staticmethod`, etc.
def read_plain_double(file_obj, count): """Read `count` 64-bit float (double) using the plain encoding.""" return struct.unpack("<{}d".format(count).encode("utf-8"), file_obj.read(8 * count))
Read `count` 64-bit float (double) using the plain encoding.
def gen_signature(priv, pub, signature_path, auto_create=False, keysize=None): ''' Generate master public-key-signature ''' skey = get_key(__opts__) return skey.gen_keys_signature(priv, pub, signature_path, auto_create, keysize)
Generate master public-key-signature
def common_bootsrap_payload(self): """Common data always sent to the client""" messages = get_flashed_messages(with_categories=True) locale = str(get_locale()) return { 'flash_messages': messages, 'conf': {k: conf.get(k) for k in FRONTEND_CONF_KEYS}, 'locale': locale, 'language_pack': get_language_pack(locale), 'feature_flags': get_feature_flags(), }
Common data always sent to the client
def get_by_id(self, symbol: str) -> SymbolMap: """ Finds the map by in-symbol """ return self.query.filter(SymbolMap.in_symbol == symbol).first()
Finds the map by in-symbol
def setup_log(name): '''Returns a logging instance for the provided name. The returned object is an instance of logging.Logger. Logged messages will be printed to stderr when running in the CLI, or forwarded to XBMC's log when running in XBMC mode. ''' _log = logging.getLogger(name) _log.setLevel(GLOBAL_LOG_LEVEL) handler = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s - %(levelname)s - [%(name)s] %(message)s') handler.setFormatter(formatter) _log.addHandler(handler) _log.addFilter(XBMCFilter('[%s] ' % name)) return _log
Returns a logging instance for the provided name. The returned object is an instance of logging.Logger. Logged messages will be printed to stderr when running in the CLI, or forwarded to XBMC's log when running in XBMC mode.
def linear_set_layer(layer_size, inputs, context=None, activation_fn=tf.nn.relu, dropout=0.0, name=None): """Basic layer type for doing funky things with sets. Applies a linear transformation to each element in the input set. If a context is supplied, it is concatenated with the inputs. e.g. One can use global_pool_1d to get a representation of the set which can then be used as the context for the next layer. TODO: Add bias add (or control the biases used). Args: layer_size: Dimension to transform the input vectors to. inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. context: A tensor of shape [batch_size, context_dims] containing a global statistic about the set. activation_fn: The activation function to use. dropout: Dropout probability. name: name. Returns: Tensor of shape [batch_size, sequence_length, output_dims] containing the sequences of transformed vectors. """ with tf.variable_scope( name, default_name="linear_set_layer", values=[inputs]): # Apply 1D convolution to apply linear filter to each element # along the 2nd dimension. outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv") # Apply the context if it exists. if context is not None: # Unfortunately tf doesn't support broadcasting via concat, but we can # simply add the transformed context to get the same effect. if len(context.get_shape().as_list()) == 2: context = tf.expand_dims(context, axis=1) cont_tfm = conv1d( context, layer_size, 1, activation=None, name="cont_conv") outputs += cont_tfm if activation_fn is not None: outputs = activation_fn(outputs) if dropout != 0.0: outputs = tf.nn.dropout(outputs, 1.0 - dropout) return outputs
Basic layer type for doing funky things with sets. Applies a linear transformation to each element in the input set. If a context is supplied, it is concatenated with the inputs. e.g. One can use global_pool_1d to get a representation of the set which can then be used as the context for the next layer. TODO: Add bias add (or control the biases used). Args: layer_size: Dimension to transform the input vectors to. inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. context: A tensor of shape [batch_size, context_dims] containing a global statistic about the set. activation_fn: The activation function to use. dropout: Dropout probability. name: name. Returns: Tensor of shape [batch_size, sequence_length, output_dims] containing the sequences of transformed vectors.
def _create_dock(self): """Create dockwidget and tabify it with the legend.""" # Import dock here as it needs to be imported AFTER i18n is set up from safe.gui.widgets.dock import Dock self.dock_widget = Dock(self.iface) self.dock_widget.setObjectName('InaSAFE-Dock') self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dock_widget) legend_tab = self.iface.mainWindow().findChild(QApplication, 'Legend') if legend_tab: self.iface.mainWindow().tabifyDockWidget( legend_tab, self.dock_widget) self.dock_widget.raise_()
Create dockwidget and tabify it with the legend.
def insert(self, **fields): """Creates a new record in the database. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: fields: The fields of the row to create. Returns: The primary key of the record that was created. """ if self.conflict_target or self.conflict_action: compiler = self._build_insert_compiler([fields]) rows = compiler.execute_sql(return_id=True) pk_field_name = self.model._meta.pk.name return rows[0][pk_field_name] # no special action required, use the standard Django create(..) return super().create(**fields).pk
Creates a new record in the database. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: fields: The fields of the row to create. Returns: The primary key of the record that was created.
async def serialize_properties(inputs: 'Inputs', property_deps: Dict[str, List['Resource']], input_transformer: Optional[Callable[[str], str]] = None) -> struct_pb2.Struct: """ Serializes an arbitrary Input bag into a Protobuf structure, keeping track of the list of dependent resources in the `deps` list. Serializing properties is inherently async because it awaits any futures that are contained transitively within the input bag. """ struct = struct_pb2.Struct() for k, v in inputs.items(): deps = [] result = await serialize_property(v, deps, input_transformer) # We treat properties that serialize to None as if they don't exist. if result is not None: # While serializing to a pb struct, we must "translate" all key names to be what the engine is going to # expect. Resources provide the "transform" function for doing this. translated_name = k if input_transformer is not None: translated_name = input_transformer(k) log.debug(f"top-level input property translated: {k} -> {translated_name}") # pylint: disable=unsupported-assignment-operation struct[translated_name] = result property_deps[translated_name] = deps return struct
Serializes an arbitrary Input bag into a Protobuf structure, keeping track of the list of dependent resources in the `deps` list. Serializing properties is inherently async because it awaits any futures that are contained transitively within the input bag.
def atime(self): """ Get most recent access time in timestamp. """ try: return self._stat.st_atime except: # pragma: no cover self._stat = self.stat() return self.atime
Get most recent access time in timestamp.
def read_xml(cls, url, markup, game): """ read xml object :param url: contents url :param markup: markup provider :param game: MLBAM Game object :return: pitchpx.game.players.Players object """ return Players._read_objects(MlbamUtil.find_xml("".join([url, cls.FILENAME]), markup) ,game)
read xml object :param url: contents url :param markup: markup provider :param game: MLBAM Game object :return: pitchpx.game.players.Players object
def IsTemplateParameterList(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is the end of template<>. Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is end of a template parameter list, False otherwise. """ (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, column) if (startpos > -1 and Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): return True return False
Check if the token ending on (linenum, column) is the end of template<>. Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is end of a template parameter list, False otherwise.
def plan(self): """ Gets the associated plan for this invoice. In order to provide a consistent view of invoices, the plan object should be taken from the first invoice item that has one, rather than using the plan associated with the subscription. Subscriptions (and their associated plan) are updated by the customer and represent what is current, but invoice items are immutable within the invoice and stay static/unchanged. In other words, a plan retrieved from an invoice item will represent the plan as it was at the time an invoice was issued. The plan retrieved from the subscription will be the currently active plan. :returns: The associated plan for the invoice. :rtype: ``djstripe.Plan`` """ for invoiceitem in self.invoiceitems.all(): if invoiceitem.plan: return invoiceitem.plan if self.subscription: return self.subscription.plan
Gets the associated plan for this invoice. In order to provide a consistent view of invoices, the plan object should be taken from the first invoice item that has one, rather than using the plan associated with the subscription. Subscriptions (and their associated plan) are updated by the customer and represent what is current, but invoice items are immutable within the invoice and stay static/unchanged. In other words, a plan retrieved from an invoice item will represent the plan as it was at the time an invoice was issued. The plan retrieved from the subscription will be the currently active plan. :returns: The associated plan for the invoice. :rtype: ``djstripe.Plan``
def find_aliases(self, seq_id=None, namespace=None, alias=None, current_only=True, translate_ncbi_namespace=None): """returns iterator over alias annotation records that match criteria The arguments, all optional, restrict the records that are returned. Without arguments, all aliases are returned. If arguments contain %, the `like` comparison operator is used. Otherwise arguments must match exactly. """ clauses = [] params = [] def eq_or_like(s): return "like" if "%" in s else "=" if translate_ncbi_namespace is None: translate_ncbi_namespace = self.translate_ncbi_namespace if alias is not None: clauses += ["alias {} ?".format(eq_or_like(alias))] params += [alias] if namespace is not None: # Switch to using RefSeq for RefSeq accessions # issue #38: translate "RefSeq" to "NCBI" to enable RefSeq lookups # issue #31: later breaking change, translate database if namespace == "RefSeq": namespace = "NCBI" clauses += ["namespace {} ?".format(eq_or_like(namespace))] params += [namespace] if seq_id is not None: clauses += ["seq_id {} ?".format(eq_or_like(seq_id))] params += [seq_id] if current_only: clauses += ["is_current = 1"] cols = ["seqalias_id", "seq_id", "alias", "added", "is_current"] if translate_ncbi_namespace: cols += ["case namespace when 'NCBI' then 'RefSeq' else namespace end as namespace"] else: cols += ["namespace"] sql = "select {cols} from seqalias".format(cols=", ".join(cols)) if clauses: sql += " where " + " and ".join("(" + c + ")" for c in clauses) sql += " order by seq_id, namespace, alias" _logger.debug("Executing: " + sql) return self._db.execute(sql, params)
returns iterator over alias annotation records that match criteria The arguments, all optional, restrict the records that are returned. Without arguments, all aliases are returned. If arguments contain %, the `like` comparison operator is used. Otherwise arguments must match exactly.
def get_tokens(self, node, include_extra=False): """ Yields all tokens making up the given node. If include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT. """ return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
Yields all tokens making up the given node. If include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
def update_image(self, name): # The `_image` is to avoid conflicts with MutableMapping.update. """ Update (i.e., rename) the image :param str name: the new name for the image :return: an updated `Image` object :rtype: Image :raises DOAPIError: if the API endpoint replies with an error """ api = self.doapi_manager return api._image(api.request(self.url, method='PUT', data={"name": name})["image"])
Update (i.e., rename) the image :param str name: the new name for the image :return: an updated `Image` object :rtype: Image :raises DOAPIError: if the API endpoint replies with an error
def WriteSignedBinaryReferences(self, binary_id, references, cursor=None): """Writes blob references for a signed binary to the DB.""" args = { "binary_type": binary_id.binary_type.SerializeToDataStore(), "binary_path": binary_id.path, "binary_path_hash": mysql_utils.Hash(binary_id.path), "blob_references": references.SerializeToString() } query = """ INSERT INTO signed_binary_references {cols} VALUES {vals} ON DUPLICATE KEY UPDATE blob_references = VALUES(blob_references) """.format( cols=mysql_utils.Columns(args), vals=mysql_utils.NamedPlaceholders(args)) cursor.execute(query, args)
Writes blob references for a signed binary to the DB.
def _spa_python_import(how): """Compile spa.py appropriately""" from pvlib import spa # check to see if the spa module was compiled with numba using_numba = spa.USE_NUMBA if how == 'numpy' and using_numba: # the spa module was compiled to numba code, so we need to # reload the module without compiling # the PVLIB_USE_NUMBA env variable is used to tell the module # to not compile with numba warnings.warn('Reloading spa to use numpy') os.environ['PVLIB_USE_NUMBA'] = '0' spa = reload(spa) del os.environ['PVLIB_USE_NUMBA'] elif how == 'numba' and not using_numba: # The spa module was not compiled to numba code, so set # PVLIB_USE_NUMBA so it does compile to numba on reload. warnings.warn('Reloading spa to use numba') os.environ['PVLIB_USE_NUMBA'] = '1' spa = reload(spa) del os.environ['PVLIB_USE_NUMBA'] elif how != 'numba' and how != 'numpy': raise ValueError("how must be either 'numba' or 'numpy'") return spa
Compile spa.py appropriately
def execute_loaders(self, env=None, silent=None, key=None, filename=None): """Execute all internal and registered loaders :param env: The environment to load :param silent: If loading erros is silenced :param key: if provided load a single key :param filename: optional custom filename to load """ if key is None: default_loader(self, self._defaults) env = (env or self.current_env).upper() silent = silent or self.SILENT_ERRORS_FOR_DYNACONF settings_loader( self, env=env, silent=silent, key=key, filename=filename ) self.load_extra_yaml(env, silent, key) # DEPRECATED enable_external_loaders(self) for loader in self.loaders: self.logger.debug("Dynaconf executing: %s", loader.__name__) loader.load(self, env, silent=silent, key=key) self.load_includes(env, silent=silent, key=key) self.logger.debug("Loaded Files: %s", deduplicate(self._loaded_files))
Execute all internal and registered loaders :param env: The environment to load :param silent: If loading erros is silenced :param key: if provided load a single key :param filename: optional custom filename to load
def _easy_facetgrid(data, plotfunc, kind, x=None, y=None, row=None, col=None, col_wrap=None, sharex=True, sharey=True, aspect=None, size=None, subplot_kws=None, **kwargs): """ Convenience method to call xarray.plot.FacetGrid from 2d plotting methods kwargs are the arguments to 2d plotting method """ ax = kwargs.pop('ax', None) figsize = kwargs.pop('figsize', None) if ax is not None: raise ValueError("Can't use axes when making faceted plots.") if aspect is None: aspect = 1 if size is None: size = 3 elif figsize is not None: raise ValueError('cannot provide both `figsize` and `size` arguments') g = FacetGrid(data=data, col=col, row=row, col_wrap=col_wrap, sharex=sharex, sharey=sharey, figsize=figsize, aspect=aspect, size=size, subplot_kws=subplot_kws) if kind == 'line': return g.map_dataarray_line(plotfunc, x, y, **kwargs) if kind == 'dataarray': return g.map_dataarray(plotfunc, x, y, **kwargs)
Convenience method to call xarray.plot.FacetGrid from 2d plotting methods kwargs are the arguments to 2d plotting method
def create_module(clear_target, target): """Creates a new template HFOS plugin module""" if os.path.exists(target): if clear_target: shutil.rmtree(target) else: log("Target exists! Use --clear to delete it first.", emitter='MANAGE') sys.exit(2) done = False info = None while not done: info = _ask_questionnaire() pprint(info) done = _ask('Is the above correct', default='y', data_type='bool') augmented_info = _augment_info(info) log("Constructing module %(plugin_name)s" % info) _construct_module(augmented_info, target)
Creates a new template HFOS plugin module
def calculate_signatures(self): """Calculate the signatures for this MAR file. Returns: A list of signature tuples: [(algorithm_id, signature_data), ...] """ if not self.signing_algorithm: return [] algo_id = {'sha1': 1, 'sha384': 2}[self.signing_algorithm] hashers = [(algo_id, make_hasher(algo_id))] for block in get_signature_data(self.fileobj, self.filesize): [h.update(block) for (_, h) in hashers] signatures = [(algo_id, sign_hash(self.signing_key, h.finalize(), h.algorithm.name)) for (algo_id, h) in hashers] return signatures
Calculate the signatures for this MAR file. Returns: A list of signature tuples: [(algorithm_id, signature_data), ...]
def parse_parameter_group(self, global_params, region, parameter_group): """ Parse a single Redshift parameter group and fetch all of its parameters :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param parameter_group: Parameter group """ pg_name = parameter_group.pop('ParameterGroupName') pg_id = self.get_non_aws_id(pg_name) # Name could be used as only letters digits or hyphens parameter_group['name'] = pg_name parameter_group['parameters'] = {} api_client = api_clients[region] parameters = handle_truncated_response(api_client.describe_cluster_parameters, {'ParameterGroupName': pg_name}, ['Parameters'])['Parameters'] for parameter in parameters: param = {} param['value'] = parameter['ParameterValue'] param['source'] = parameter['Source'] parameter_group['parameters'][parameter['ParameterName']] = param (self).parameter_groups[pg_id] = parameter_group
Parse a single Redshift parameter group and fetch all of its parameters :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param parameter_group: Parameter group
def set_status(self, status, msg): """ Set and return the status of the task. Args: status: Status object or string representation of the status msg: string with human-readable message used in the case of errors. """ # truncate string if it's long. msg will be logged in the object and we don't want to waste memory. if len(msg) > 2000: msg = msg[:2000] msg += "\n... snip ...\n" # Locked files must be explicitly unlocked if self.status == self.S_LOCKED or status == self.S_LOCKED: err_msg = ( "Locked files must be explicitly unlocked before calling set_status but\n" "task.status = %s, input status = %s" % (self.status, status)) raise RuntimeError(err_msg) status = Status.as_status(status) changed = True if hasattr(self, "_status"): changed = (status != self._status) self._status = status if status == self.S_RUN: # Set datetimes.start when the task enters S_RUN if self.datetimes.start is None: self.datetimes.start = datetime.datetime.now() # Add new entry to history only if the status has changed. if changed: if status == self.S_SUB: self.datetimes.submission = datetime.datetime.now() self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % ( self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg)) elif status == self.S_OK: self.history.info("Task completed %s", msg) elif status == self.S_ABICRITICAL: self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg) else: self.history.info("Status changed to %s. msg: %s", status, msg) ####################################################### # The section belows contains callbacks that should not # be executed if we are in spectator_mode ####################################################### if status == self.S_DONE: # Execute the callback self._on_done() if status == self.S_OK: # Finalize the task. if not self.finalized: self._on_ok() # here we remove the output files of the task and of its parents. if self.gc is not None and self.gc.policy == "task": self.clean_output_files() if self.status == self.S_OK: # Because _on_ok might have changed the status. self.send_signal(self.S_OK) return status
Set and return the status of the task. Args: status: Status object or string representation of the status msg: string with human-readable message used in the case of errors.
def load_heartrate(as_series=False): """Uniform heart-rate data. A sample of heartrate data borrowed from an `MIT database <http://ecg.mit.edu/time-series/>`_. The sample consists of 150 evenly spaced (0.5 seconds) heartrate measurements. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The heartrate vector. Examples -------- >>> from pmdarima.datasets import load_heartrate >>> load_heartrate() array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904 , 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389 , 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972 , 91.7821, 91.7911, 90.807 , 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243 , 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771 , 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963 , 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899 , 95.1592, 95.2415, 95.5414, 95.0971, 94.528 , 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044 , 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077 , 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052 , 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 ]) >>> load_heartrate(True).head() 0 84.2697 1 84.2697 2 84.0619 3 85.6542 4 87.2093 dtype: float64 References ---------- .. [1] Goldberger AL, Rigney DR. Nonlinear dynamics at the bedside. In: Glass L, Hunter P, McCulloch A, eds. Theory of Heart: Biomechanics, Biophysics, and Nonlinear Dynamics of Cardiac Function. New York: Springer-Verlag, 1991, pp. 583-605. """ rslt = np.array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904, 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389, 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972, 91.7821, 91.7911, 90.807, 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243, 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771, 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963, 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899, 95.1592, 95.2415, 95.5414, 95.0971, 94.528, 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044, 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077, 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052, 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645]) if as_series: return pd.Series(rslt) return rslt
Uniform heart-rate data. A sample of heartrate data borrowed from an `MIT database <http://ecg.mit.edu/time-series/>`_. The sample consists of 150 evenly spaced (0.5 seconds) heartrate measurements. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The heartrate vector. Examples -------- >>> from pmdarima.datasets import load_heartrate >>> load_heartrate() array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904 , 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389 , 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972 , 91.7821, 91.7911, 90.807 , 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243 , 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771 , 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963 , 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899 , 95.1592, 95.2415, 95.5414, 95.0971, 94.528 , 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044 , 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077 , 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052 , 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 ]) >>> load_heartrate(True).head() 0 84.2697 1 84.2697 2 84.0619 3 85.6542 4 87.2093 dtype: float64 References ---------- .. [1] Goldberger AL, Rigney DR. Nonlinear dynamics at the bedside. In: Glass L, Hunter P, McCulloch A, eds. Theory of Heart: Biomechanics, Biophysics, and Nonlinear Dynamics of Cardiac Function. New York: Springer-Verlag, 1991, pp. 583-605.
def hardware_connector_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware") connector = ET.SubElement(hardware, "connector") name = ET.SubElement(connector, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def listdir_matches(match): """Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash. """ import os last_slash = match.rfind('/') if last_slash == -1: dirname = '.' match_prefix = match result_prefix = '' else: match_prefix = match[last_slash + 1:] if last_slash == 0: dirname = '/' result_prefix = '/' else: dirname = match[0:last_slash] result_prefix = dirname + '/' def add_suffix_if_dir(filename): try: if (os.stat(filename)[0] & 0x4000) != 0: return filename + '/' except FileNotFoundError: # This can happen when a symlink points to a non-existant file. pass return filename matches = [add_suffix_if_dir(result_prefix + filename) for filename in os.listdir(dirname) if filename.startswith(match_prefix)] return matches
Returns a list of filenames contained in the named directory. Only filenames which start with `match` will be returned. Directories will have a trailing slash.
def totext(self) ->str: """ return blob content from StorageBlobModel instance to a string. Parameters are: """ sreturn = '' if self.properties.content_settings.content_encoding is None: raise AzureStorageWrapException(self, 'can not convert blob {!s} to text because content_encoding is not given'.format(self.name)) else: sreturn = self.content.decode(self.properties.content_settings.content_encoding, 'ignore') return sreturn
return blob content from StorageBlobModel instance to a string. Parameters are:
def patch_namespaced_pod_preset(self, name, namespace, body, **kwargs): # noqa: E501 """patch_namespaced_pod_preset # noqa: E501 partially update the specified PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_pod_preset(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPreset (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1alpha1PodPreset If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.patch_namespaced_pod_preset_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
patch_namespaced_pod_preset # noqa: E501 partially update the specified PodPreset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_pod_preset(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodPreset (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1alpha1PodPreset If the method is called asynchronously, returns the request thread.
def _set_property(xml_root, name, value, properties=None): """Sets property to specified value.""" if properties is None: properties = xml_root.find("properties") for prop in properties: if prop.get("name") == name: prop.set("value", utils.get_unicode_str(value)) break else: etree.SubElement( properties, "property", {"name": name, "value": utils.get_unicode_str(value)} )
Sets property to specified value.
def stream(self, actor_sid=values.unset, event_type=values.unset, resource_sid=values.unset, source_ip_address=values.unset, start_date=values.unset, end_date=values.unset, limit=None, page_size=None): """ Streams EventInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode actor_sid: Only include Events initiated by this Actor :param unicode event_type: Only include Events of this EventType :param unicode resource_sid: Only include Events referring to this resource :param unicode source_ip_address: Only include Events that originated from this IP address :param datetime start_date: Only show events on or after this date :param datetime end_date: Only show events on or before this date :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.monitor.v1.event.EventInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( actor_sid=actor_sid, event_type=event_type, resource_sid=resource_sid, source_ip_address=source_ip_address, start_date=start_date, end_date=end_date, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
Streams EventInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode actor_sid: Only include Events initiated by this Actor :param unicode event_type: Only include Events of this EventType :param unicode resource_sid: Only include Events referring to this resource :param unicode source_ip_address: Only include Events that originated from this IP address :param datetime start_date: Only show events on or after this date :param datetime end_date: Only show events on or before this date :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.monitor.v1.event.EventInstance]
def fit(self, bbox, max_zoom=MAX_ZOOM, force_zoom=None): """ Fits the projector to a BoundingBox :param bbox: BoundingBox :param max_zoom: max zoom allowed :param force_zoom: force this specific zoom value even if the whole bbox does not completely fit """ BUFFER_FACTOR = 1.1 if force_zoom is not None: self.zoom = force_zoom else: for zoom in range(max_zoom, MIN_ZOOM-1, -1): self.zoom = zoom left, top = self.lonlat_to_screen([bbox.west], [bbox.north]) right, bottom = self.lonlat_to_screen([bbox.east], [bbox.south]) if (top - bottom < SCREEN_H*BUFFER_FACTOR) and (right - left < SCREEN_W*BUFFER_FACTOR): break west_tile, north_tile = self.deg2num(bbox.north, bbox.west, self.zoom) east_tile, south_tile = self.deg2num(bbox.south, bbox.east, self.zoom) self.xtile = west_tile - self.tiles_horizontally/2. + (east_tile - west_tile)/2 self.ytile = north_tile - self.tiles_vertically/2. + (south_tile - north_tile)/2 self.calculate_viewport_size()
Fits the projector to a BoundingBox :param bbox: BoundingBox :param max_zoom: max zoom allowed :param force_zoom: force this specific zoom value even if the whole bbox does not completely fit
def iter_links_link_element(self, element): '''Iterate a ``link`` for URLs. This function handles stylesheets and icons in addition to standard scraping rules. ''' rel = element.attrib.get('rel', '') stylesheet = 'stylesheet' in rel icon = 'icon' in rel inline = stylesheet or icon if stylesheet: link_type = LinkType.css elif icon: link_type = LinkType.media else: link_type = None for attrib_name, link in self.iter_links_by_attrib(element): yield LinkInfo( element=element, tag=element.tag, attrib=attrib_name, link=link, inline=inline, linked=not inline, base_link=None, value_type='plain', link_type=link_type )
Iterate a ``link`` for URLs. This function handles stylesheets and icons in addition to standard scraping rules.
def _build_request_url(self, secure, api_method, version): """Build a URL for a API method request """ if secure: proto = ANDROID.PROTOCOL_SECURE else: proto = ANDROID.PROTOCOL_INSECURE req_url = ANDROID.API_URL.format( protocol=proto, api_method=api_method, version=version ) return req_url
Build a URL for a API method request
def page_factory(request): """ Page factory. Config models example: .. code-block:: python models = { '': [WebPage, CatalogResource], 'catalogue': CatalogResource, 'news': NewsResource, } """ prefix = request.matchdict['prefix'] # /{prefix}/page1/page2/page3... settings = request.registry.settings dbsession = settings[CONFIG_DBSESSION] config = settings[CONFIG_MODELS] if prefix not in config: # prepend {prefix} to *traverse request.matchdict['traverse'] =\ tuple([prefix] + list(request.matchdict['traverse'])) prefix = None # Get all resources and models from config with the same prefix. resources = config.get( prefix, config.get( # 1. get resources with prefix same as URL prefix '', config.get( # 2. if not, then try to get empty prefix '/', None))) # 3. else try to get prefix '/' otherwise None if not hasattr(resources, '__iter__'): resources = (resources, ) tree = {} if not resources: return tree # Add top level nodes of resources in the tree for resource in resources: table = None if not hasattr(resource, '__table__')\ and hasattr(resource, 'model'): table = resource.model else: table = resource if not hasattr(table, 'slug'): continue nodes = dbsession.query(table) if hasattr(table, 'parent_id'): nodes = nodes.filter(or_( table.parent_id == None, # noqa table.parent.has(table.slug == '/') )) for node in nodes: if not node.slug: continue resource = resource_of_node(resources, node) tree[node.slug] = resource(node, prefix=prefix) return tree
Page factory. Config models example: .. code-block:: python models = { '': [WebPage, CatalogResource], 'catalogue': CatalogResource, 'news': NewsResource, }
def get_balance(self): """ Get balance with provider. """ if not SMSGLOBAL_CHECK_BALANCE_COUNTRY: raise Exception('SMSGLOBAL_CHECK_BALANCE_COUNTRY setting must be set to check balance.') params = { 'user' : self.get_username(), 'password' : self.get_password(), 'country' : SMSGLOBAL_CHECK_BALANCE_COUNTRY, } req = urllib2.Request(SMSGLOBAL_API_URL_CHECKBALANCE, urllib.urlencode(params)) response = urllib2.urlopen(req).read() # CREDITS:8658.44;COUNTRY:AU;SMS:3764.54; if response.startswith('ERROR'): raise Exception('Error retrieving balance: %s' % response.replace('ERROR:', '')) return dict([(p.split(':')[0].lower(), p.split(':')[1]) for p in response.split(';') if len(p) > 0])
Get balance with provider.
def add_orbit(self, component=None, **kwargs): """ Shortcut to :meth:`add_component` but with kind='orbit' """ kwargs.setdefault('component', component) return self.add_component('orbit', **kwargs)
Shortcut to :meth:`add_component` but with kind='orbit'
def _build_word(syl, vowels): """Builds a Pinyin word re pattern from a Pinyin syllable re pattern. A word is defined as a series of consecutive valid Pinyin syllables with optional hyphens and apostrophes interspersed. Hyphens must be followed immediately by another valid Pinyin syllable. Apostrophes must be followed by another valid Pinyin syllable that starts with an 'a', 'e', or 'o'. """ return "(?:{syl}(?:-(?={syl})|'(?=[{a}{e}{o}])(?={syl}))?)+".format( syl=syl, a=vowels['a'], e=vowels['e'], o=vowels['o'])
Builds a Pinyin word re pattern from a Pinyin syllable re pattern. A word is defined as a series of consecutive valid Pinyin syllables with optional hyphens and apostrophes interspersed. Hyphens must be followed immediately by another valid Pinyin syllable. Apostrophes must be followed by another valid Pinyin syllable that starts with an 'a', 'e', or 'o'.
def _datalog(self, parameter, run, maxrun, det_id): "Extract data from database" values = { 'parameter_name': parameter, 'minrun': run, 'maxrun': maxrun, 'detid': det_id, } data = urlencode(values) content = self._get_content('streamds/datalognumbers.txt?' + data) if content.startswith('ERROR'): log.error(content) return None try: dataframe = read_csv(content) except ValueError: log.warning( "Empty dataset" ) # ...probably. Waiting for more info return make_empty_dataset() else: add_datetime(dataframe) try: self._add_converted_units(dataframe, parameter) except KeyError: log.warning( "Could not add converted units for {0}".format(parameter) ) return dataframe
Extract data from database
def is_contradictory(self, other): """ Returns True if the two DictCells are unmergeable. """ if not isinstance(other, DictCell): raise Exception("Incomparable") for key, val in self: if key in other.__dict__['p'] \ and val.is_contradictory(other.__dict__['p'][key]): return True return False
Returns True if the two DictCells are unmergeable.
def warn_if_detached(func): """ Warn if self / cls is detached. """ @wraps(func) def wrapped(this, *args, **kwargs): # Check for _detached in __dict__ instead of using hasattr # to avoid infinite loop in __getattr__ if '_detached' in this.__dict__ and this._detached: warnings.warn('here') return func(this, *args, **kwargs) return wrapped
Warn if self / cls is detached.
def check_for_eni_source(): ''' Juju removes the source line when setting up interfaces, replace if missing ''' with open('/etc/network/interfaces', 'r') as eni: for line in eni: if line == 'source /etc/network/interfaces.d/*': return with open('/etc/network/interfaces', 'a') as eni: eni.write('\nsource /etc/network/interfaces.d/*')
Juju removes the source line when setting up interfaces, replace if missing
def export_task_info(node_params, output_element): """ Adds Task node attributes to exported XML element :param node_params: dictionary with given task parameters, :param output_element: object representing BPMN XML 'task' element. """ if consts.Consts.default in node_params and node_params[consts.Consts.default] is not None: output_element.set(consts.Consts.default, node_params[consts.Consts.default])
Adds Task node attributes to exported XML element :param node_params: dictionary with given task parameters, :param output_element: object representing BPMN XML 'task' element.
def quit(self): """Remove this user from all channels and reinitialize the user's list of joined channels. """ for c in self.channels: c.users.remove(self.nick) self.channels = []
Remove this user from all channels and reinitialize the user's list of joined channels.
def backup(schema, uuid, export_filter, export_format, filename, pretty, export_all, omit): """Exports all collections to (JSON-) files.""" export_format = export_format.upper() if pretty: indent = 4 else: indent = 0 f = None if filename: try: f = open(filename, 'w') except (IOError, PermissionError) as e: backup_log('Could not open output file for writing:', exc=True, lvl=error) return def output(what, convert=False): """Output the backup in a specified format.""" if convert: if export_format == 'JSON': data = json.dumps(what, indent=indent) else: data = "" else: data = what if not filename: print(data) else: f.write(data) if schema is None: if export_all is False: backup_log('No schema given.', lvl=warn) return else: schemata = objectmodels.keys() else: schemata = [schema] all_items = {} for schema_item in schemata: model = objectmodels[schema_item] if uuid: obj = model.find({'uuid': uuid}) elif export_filter: obj = model.find(literal_eval(export_filter)) else: obj = model.find() items = [] for item in obj: fields = item.serializablefields() for field in omit: try: fields.pop(field) except KeyError: pass items.append(fields) all_items[schema_item] = items # if pretty is True: # output('\n// Objectmodel: ' + schema_item + '\n\n') # output(schema_item + ' = [\n') output(all_items, convert=True) if f is not None: f.flush() f.close()
Exports all collections to (JSON-) files.
def grantxml2json(self, grant_xml): """Convert OpenAIRE grant XML into JSON.""" tree = etree.fromstring(grant_xml) # XML harvested from OAI-PMH has a different format/structure if tree.prefix == 'oai': ptree = self.get_subtree( tree, '/oai:record/oai:metadata/oaf:entity/oaf:project')[0] header = self.get_subtree(tree, '/oai:record/oai:header')[0] oai_id = self.get_text_node(header, 'oai:identifier') modified = self.get_text_node(header, 'oai:datestamp') else: ptree = self.get_subtree( tree, '/record/result/metadata/oaf:entity/oaf:project')[0] header = self.get_subtree(tree, '/record/result/header')[0] oai_id = self.get_text_node(header, 'dri:objIdentifier') modified = self.get_text_node(header, 'dri:dateOfTransformation') url = self.get_text_node(ptree, 'websiteurl') code = self.get_text_node(ptree, 'code') title = self.get_text_node(ptree, 'title') acronym = self.get_text_node(ptree, 'acronym') startdate = self.get_text_node(ptree, 'startdate') enddate = self.get_text_node(ptree, 'enddate') funder = self.fundertree2json(ptree, oai_id) internal_id = "{0}::{1}".format(funder['doi'], code) eurepo_id = \ "info:eu-repo/grantAgreement/{funder}/{program}/{code}/".format( funder=quote_plus(funder['name'].encode('utf8')), program=quote_plus(funder['program'].encode('utf8')), code=quote_plus(code.encode('utf8')), ) ret_json = { '$schema': self.schema_formatter.schema_url, 'internal_id': internal_id, 'identifiers': { 'oaf': oai_id, 'eurepo': eurepo_id, 'purl': url if url.startswith("http://purl.org/") else None, }, 'code': code, 'title': title, 'acronym': acronym, 'startdate': startdate, 'enddate': enddate, 'funder': {'$ref': funder['url']}, 'program': funder['program'], 'url': url, 'remote_modified': modified, } return ret_json
Convert OpenAIRE grant XML into JSON.
def r_bergomi(H,T,eta,xi,rho,S0,r,N,M,dW=None,dW_orth=None,cholesky = False,return_v=False): ''' Return M Euler-Maruyama sample paths with N time steps of (S_t,v_t), where (S_t,v_t) follows the rBergomi model of mathematical finance :rtype: M x N x d array ''' times = np.linspace(0, T, N) dt = T/(N-1) times = np.reshape(times,(-1,1)) if dW is None: dW = np.sqrt(dt)*np.random.normal(size=(N-1,M)) if dW_orth is None: dW_orth = np.sqrt(dt)*np.random.normal(size=(N-1,M)) dZ = rho*dW+np.sqrt(1-rho**2)*dW_orth Y = eta*np.sqrt(2*H)*fBrown(H,T,N,M,dW =dW,cholesky = cholesky) v = xi*np.exp(Y-0.5*(eta**2)*times**(2*H)) S = S0*np.exp(integral(np.sqrt(v),dF = dZ,axis=0,cumulative = True)+integral(r - 0.5*v,F = times,axis=0,trapez=False,cumulative = True)) if return_v: return np.array([S,v]).T else: return np.array([S]).T
Return M Euler-Maruyama sample paths with N time steps of (S_t,v_t), where (S_t,v_t) follows the rBergomi model of mathematical finance :rtype: M x N x d array
def transform_coords(self, width, height): """Return the current absolute (x, y) coordinates of the tablet tool event, transformed to screen coordinates and whether they have changed in this event. Note: On some devices, returned value may be negative or larger than the width of the device. See `Out-of-bounds motion events`_ for more details. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: ((float, float), bool): The current absolute (x, y) coordinates transformed to screen coordinates and whether they have changed. """ x = self._libinput.libinput_event_tablet_tool_get_x_transformed( self._handle, width) y = self._libinput.libinput_event_tablet_tool_get_y_transformed( self._handle, height) x_changed = self._libinput.libinput_event_tablet_tool_x_has_changed( self._handle) y_changed = self._libinput.libinput_event_tablet_tool_y_has_changed( self._handle) return (x, y), x_changed or y_changed
Return the current absolute (x, y) coordinates of the tablet tool event, transformed to screen coordinates and whether they have changed in this event. Note: On some devices, returned value may be negative or larger than the width of the device. See `Out-of-bounds motion events`_ for more details. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: ((float, float), bool): The current absolute (x, y) coordinates transformed to screen coordinates and whether they have changed.
def get_draft_url(url): """ Return the given URL with a draft mode HMAC in its querystring. """ if verify_draft_url(url): # Nothing to do. Already a valid draft URL. return url # Parse querystring and add draft mode HMAC. url = urlparse.urlparse(url) salt = get_random_string(5) # QueryDict requires a bytestring as its first argument query = QueryDict(force_bytes(url.query), mutable=True) query['preview'] = '%s:%s' % (salt, get_draft_hmac(salt, url.path)) # Reconstruct URL. parts = list(url) parts[4] = query.urlencode(safe=':') return urlparse.urlunparse(parts)
Return the given URL with a draft mode HMAC in its querystring.
def run_file(name, database, query_file=None, output=None, grain=None, key=None, overwrite=True, saltenv=None, check_db_exists=True, **connection_args): ''' Execute an arbitrary query on the specified database .. versionadded:: 2017.7.0 name Used only as an ID database The name of the database to execute the query_file on query_file The file of mysql commands to run output grain: output in a grain other: the file to store results None: output to the result comment (default) grain: grain to store the output (need output=grain) key: the specified grain will be treated as a dictionary, the result of this state will be stored under the specified key. overwrite: The file or grain will be overwritten if it already exists (default) saltenv: The saltenv to pull the query_file from check_db_exists: The state run will check that the specified database exists (default=True) before running any queries ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Database {0} is already present'.format(database)} if any([query_file.startswith(proto) for proto in ['http://', 'https://', 'salt://', 's3://', 'swift://']]): query_file = __salt__['cp.cache_file'](query_file, saltenv=saltenv or __env__) if not os.path.exists(query_file): ret['comment'] = 'File {0} does not exist'.format(query_file) ret['result'] = False return ret # check if database exists if check_db_exists and not __salt__['mysql.db_exists'](database, **connection_args): err = _get_mysql_error() if err is not None: ret['comment'] = err ret['result'] = False return ret ret['result'] = None ret['comment'] = ('Database {0} is not present' ).format(database) return ret # Check if execution needed if output == 'grain': if grain is not None and key is None: if not overwrite and grain in __salt__['grains.ls'](): ret['comment'] = 'No execution needed. Grain ' + grain\ + ' already set' return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, storing result in '\ + 'grain: ' + grain return ret elif grain is not None: if grain in __salt__['grains.ls'](): grain_value = __salt__['grains.get'](grain) else: grain_value = {} if not overwrite and key in grain_value: ret['comment'] = 'No execution needed. Grain ' + grain\ + ':' + key + ' already set' return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, storing result in '\ + 'grain: ' + grain + ':' + key return ret else: ret['result'] = False ret['comment'] = "Error: output type 'grain' needs the grain "\ + "parameter\n" return ret elif output is not None: if not overwrite and os.path.isfile(output): ret['comment'] = 'No execution needed. File ' + output\ + ' already set' return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, storing result in '\ + 'file: ' + output return ret elif __opts__['test']: ret['result'] = None ret['comment'] = 'Query would execute, not storing result' return ret # The database is present, execute the query query_result = __salt__['mysql.file_query'](database, query_file, **connection_args) if query_result is False: ret['result'] = False return ret mapped_results = [] if 'results' in query_result: for res in query_result['results']: mapped_line = {} for idx, col in enumerate(query_result['columns']): mapped_line[col] = res[idx] mapped_results.append(mapped_line) query_result['results'] = mapped_results ret['comment'] = six.text_type(query_result) if output == 'grain': if grain is not None and key is None: __salt__['grains.setval'](grain, query_result) ret['changes']['query'] = "Executed. Output into grain: "\ + grain elif grain is not None: if grain in __salt__['grains.ls'](): grain_value = __salt__['grains.get'](grain) else: grain_value = {} grain_value[key] = query_result __salt__['grains.setval'](grain, grain_value) ret['changes']['query'] = "Executed. Output into grain: "\ + grain + ":" + key elif output is not None: ret['changes']['query'] = "Executed. Output into " + output with salt.utils.files.fopen(output, 'w') as output_file: if 'results' in query_result: for res in query_result['results']: for col, val in six.iteritems(res): output_file.write( salt.utils.stringutils.to_str( col + ':' + val + '\n' ) ) else: output_file.write( salt.utils.stringutils.to_str(query_result) ) else: ret['changes']['query'] = "Executed" return ret
Execute an arbitrary query on the specified database .. versionadded:: 2017.7.0 name Used only as an ID database The name of the database to execute the query_file on query_file The file of mysql commands to run output grain: output in a grain other: the file to store results None: output to the result comment (default) grain: grain to store the output (need output=grain) key: the specified grain will be treated as a dictionary, the result of this state will be stored under the specified key. overwrite: The file or grain will be overwritten if it already exists (default) saltenv: The saltenv to pull the query_file from check_db_exists: The state run will check that the specified database exists (default=True) before running any queries
def run(self, resources): """Sets the RTC timestamp to UTC. Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step. """ hwman = resources['connection'] con = hwman.hwman.controller() test_interface = con.test_interface() try: test_interface.synchronize_clock() print('Time currently set at %s' % test_interface.current_time_str()) except: raise ArgumentError('Error setting RTC time, check if controller actually has RTC or if iotile-support-lib-controller-3 is updated')
Sets the RTC timestamp to UTC. Args: resources (dict): A dictionary containing the required resources that we needed access to in order to perform this step.
def get_column_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only column metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - col_meta (pandas DataFrame): a DataFrame of all column metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) gctx_file.close() return col_meta
Opens .gctx file and returns only column metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - col_meta (pandas DataFrame): a DataFrame of all column metadata values.
def users_feature(app): """ Add users feature Allows to register users and assign groups, instantiates flask login, flask principal and oauth integration """ # check we have jwt secret configures if not app.config.get('USER_JWT_SECRET', None): raise x.JwtSecretMissing('Please set USER_JWT_SECRET in config') # use custom session interface app.session_interface = BoilerSessionInterface() # init user service user_service.init(app) # init login manager login_manager.init_app(app) login_manager.login_view = 'user.login' login_manager.login_message = None @login_manager.user_loader def load_user(id): return user_service.get(id) # init OAuth oauth.init_app(app) registry = OauthProviders(app) providers = registry.get_providers() with app.app_context(): for provider in providers: if provider not in oauth.remote_apps: oauth.remote_app(provider, **providers[provider]) registry.register_token_getter(provider) # init principal principal.init_app(app) @principal.identity_loader def load_identity(): if current_user.is_authenticated: return Identity(current_user.id) session.pop('identity.name', None) session.pop('identity.auth_type', None) return AnonymousIdentity() @identity_loaded.connect_via(app) def on_identity_loaded(sender, identity): identity.user = current_user if not current_user.is_authenticated: return identity.provides.add(UserNeed(current_user.id)) for role in current_user.roles: identity.provides.add(RoleNeed(role.handle))
Add users feature Allows to register users and assign groups, instantiates flask login, flask principal and oauth integration
def covariance_matrix(self): """ Constructs the covariance matrix of input data from the singular value decomposition. Note that this is different than a covariance matrix of residuals, which is what we want for calculating fit errors. Using SVD output to compute covariance matrix X=UΣV⊤ XX⊤XX⊤=(UΣV⊤)(UΣV⊤)⊤=(UΣV⊤)(VΣU⊤) V is an orthogonal matrix (V⊤V=I), covariance matrix of input data: XX⊤=UΣ2U⊤ Because the axes represent identity in the PCA coordinate system, the PCA major axes themselves represent an affine transformation matrix from PCA to Cartesian space """ a = N.dot(self.U,self.sigma) cv = N.dot(a,a.T) # This yields the covariance matrix in Cartesian # coordinates return cv
Constructs the covariance matrix of input data from the singular value decomposition. Note that this is different than a covariance matrix of residuals, which is what we want for calculating fit errors. Using SVD output to compute covariance matrix X=UΣV⊤ XX⊤XX⊤=(UΣV⊤)(UΣV⊤)⊤=(UΣV⊤)(VΣU⊤) V is an orthogonal matrix (V⊤V=I), covariance matrix of input data: XX⊤=UΣ2U⊤ Because the axes represent identity in the PCA coordinate system, the PCA major axes themselves represent an affine transformation matrix from PCA to Cartesian space
def dispatch_request(self, *args, **kwargs): """ If validation=True perform validation """ if self.validation: specs = {} attrs = flasgger.constants.OPTIONAL_FIELDS + [ 'parameters', 'definitions', 'responses', 'summary', 'description' ] for attr in attrs: specs[attr] = getattr(self, attr) definitions = {} specs.update(convert_schemas(specs, definitions)) specs['definitions'] = definitions flasgger.utils.validate( specs=specs, validation_function=self.validation_function) return super(SwaggerView, self).dispatch_request(*args, **kwargs)
If validation=True perform validation
def get_urls(self): """ Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model """ urls = super(CompetitionEntryAdmin, self).get_urls() csv_urls = patterns('', url( r'^exportcsv/$', self.admin_site.admin_view(self.csv_export), name='competition-csv-export' ) ) return csv_urls + urls
Extend the admin urls for the CompetitionEntryAdmin model to be able to invoke a CSV export view on the admin model
def getPage(url, contextFactory=None, *args, **kwargs): """Download a web page as a string. Download a page. Return a deferred, which will callback with a page (as a string) or errback with a description of the error. See HTTPClientFactory to see what extra args can be passed. """ scheme, host, port, path = client._parse(url) factory = client.HTTPClientFactory(url, *args, **kwargs) if scheme == 'https': if contextFactory is None: raise RuntimeError, 'must provide a contextFactory' conn = reactor.connectSSL(host, port, factory, contextFactory) else: conn = reactor.connectTCP(host, port, factory) return factory
Download a web page as a string. Download a page. Return a deferred, which will callback with a page (as a string) or errback with a description of the error. See HTTPClientFactory to see what extra args can be passed.
def describe_instances(self, xml_bytes): """ Parse the reservations XML payload that is returned from an AWS describeInstances API call. Instead of returning the reservations as the "top-most" object, we return the object that most developers and their code will be interested in: the instances. In instances reservation is available on the instance object. The following instance attributes are optional: * ami_launch_index * key_name * kernel_id * product_codes * ramdisk_id * reason @param xml_bytes: raw XML payload from AWS. """ root = XML(xml_bytes) results = [] # May be a more elegant way to do this: for reservation_data in root.find("reservationSet"): # Create a reservation object with the parsed data. reservation = model.Reservation( reservation_id=reservation_data.findtext("reservationId"), owner_id=reservation_data.findtext("ownerId")) # Get the list of instances. instances = self.instances_set( reservation_data, reservation) results.extend(instances) return results
Parse the reservations XML payload that is returned from an AWS describeInstances API call. Instead of returning the reservations as the "top-most" object, we return the object that most developers and their code will be interested in: the instances. In instances reservation is available on the instance object. The following instance attributes are optional: * ami_launch_index * key_name * kernel_id * product_codes * ramdisk_id * reason @param xml_bytes: raw XML payload from AWS.
def remove_hook(self, name, func): ''' Remove a callback from a hook. ''' if name in self._hooks and func in self._hooks[name]: self._hooks[name].remove(func) return True
Remove a callback from a hook.
def perform_experiment(self, engine_list): """ Performs nearest neighbour recall experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (recall, precision, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time. """ # We will fill this array with measures for all the engines. result = [] # For each engine, first index vectors and then retrieve neighbours for endine_idx, engine in enumerate(engine_list): print('Engine %d / %d' % (endine_idx, len(engine_list))) # Clean storage engine.clean_all_buckets() # Use this to compute average recall avg_recall = 0.0 # Use this to compute average precision avg_precision = 0.0 # Use this to compute average search time avg_search_time = 0.0 # Index all vectors and store them for index, v in enumerate(self.vectors): engine.store_vector(v, 'data_%d' % index) # Look for N nearest neighbours for query vectors for index in self.query_indices: # Get indices of the real nearest as set real_nearest = set(self.closest[index]) # We have to time the search search_time_start = time.time() # Get nearest N according to engine nearest = engine.neighbours(self.vectors[index]) # Get search time search_time = time.time() - search_time_start # For comparance we need their indices (as set) nearest = set([self.__index_of_vector(x[0]) for x in nearest]) # Remove query index from search result to make sure that # recall and precision make sense in terms of "neighbours". # If ONLY the query vector is retrieved, we want recall to be # zero! nearest.remove(index) # If the result list is empty, recall and precision are 0.0 if len(nearest) == 0: recall = 0.0 precision = 0.0 else: # Get intersection count inter_count = float(len(real_nearest & nearest)) # Normalize recall for this vector recall = inter_count/float(len(real_nearest)) # Normalize precision for this vector precision = inter_count/float(len(nearest)) # Add to accumulator avg_recall += recall # Add to accumulator avg_precision += precision # Add to accumulator avg_search_time += search_time # Normalize recall over query set avg_recall /= float(len(self.query_indices)) # Normalize precision over query set avg_precision /= float(len(self.query_indices)) # Normalize search time over query set avg_search_time = avg_search_time / float(len(self.query_indices)) # Normalize search time with respect to exact search avg_search_time /= self.exact_search_time_per_vector print(' recall=%f, precision=%f, time=%f' % (avg_recall, avg_precision, avg_search_time)) result.append((avg_recall, avg_precision, avg_search_time)) # Return (recall, precision, search_time) tuple return result
Performs nearest neighbour recall experiments with custom vector data for all engines in the specified list. Returns self.result contains list of (recall, precision, search_time) tuple. All are the averaged values over all request vectors. search_time is the average retrieval/search time compared to the average exact search time.
def add_cmd_method(self, name, method, argc=None, complete=None): """Adds a command to the command line interface loop. Parameters ---------- name : string The command. method : function(args) The function to execute when this command is issued. The argument of the function is a list of space separated arguments to the command. argc : int, optional (default=None) The number of expected further arguments. If None arguments are not restricted. complete : function(args, text), optional (default=None) A function that is called to complete further arguments. If None no suggestions are made. The function gets the arguments up to the incomplete argument (args). text contains the to be completed argument. The function must returns a list of suggestions or None if text is valid already and there are no further suggestions. """ if ' ' in name: raise ValueError("' ' cannot be in command name {0}".format(name)) self._cmd_methods[name] = method self._cmd_argc[name] = argc self._cmd_complete[name] = complete
Adds a command to the command line interface loop. Parameters ---------- name : string The command. method : function(args) The function to execute when this command is issued. The argument of the function is a list of space separated arguments to the command. argc : int, optional (default=None) The number of expected further arguments. If None arguments are not restricted. complete : function(args, text), optional (default=None) A function that is called to complete further arguments. If None no suggestions are made. The function gets the arguments up to the incomplete argument (args). text contains the to be completed argument. The function must returns a list of suggestions or None if text is valid already and there are no further suggestions.
def _filter_by_zoom(element=None, conf_string=None, zoom=None): """Return element only if zoom condition matches with config string.""" for op_str, op_func in [ # order of operators is important: # prematurely return in cases of "<=" or ">=", otherwise # _strip_zoom() cannot parse config strings starting with "<" # or ">" ("=", operator.eq), ("<=", operator.le), (">=", operator.ge), ("<", operator.lt), (">", operator.gt), ]: if conf_string.startswith(op_str): return element if op_func(zoom, _strip_zoom(conf_string, op_str)) else None
Return element only if zoom condition matches with config string.
def _dump(self, tag, x, lo, hi): """Generate comparison results for a same-tagged range.""" for i in xrange(lo, hi): yield '%s %s' % (tag, x[i])
Generate comparison results for a same-tagged range.
def _get_offset(cmd): """Return the offset into the cmd based upon if it's a dictionary page or a data page.""" dict_offset = cmd.dictionary_page_offset data_offset = cmd.data_page_offset if dict_offset is None or data_offset < dict_offset: return data_offset return dict_offset
Return the offset into the cmd based upon if it's a dictionary page or a data page.
def retrieveAcknowledge(): """RETRIEVE ACKNOWLEDGE Section 9.3.21""" a = TpPd(pd=0x3) b = MessageType(mesType=0x1d) # 00011101 packet = a / b return packet
RETRIEVE ACKNOWLEDGE Section 9.3.21
def downloadFile(self, filename, ispickle=False, athome=False): """ Downloads a single file from Redunda. :param str filename: The name of the file you want to download :param bool ispickle: Optional variable which tells if the file to be downloaded is a pickle; default is False. :returns: returns nothing """ print("Downloading file {} from Redunda.".format(filename)) _, tail = os.path.split(filename) url = "https://redunda.sobotics.org/bots/data/{}?key={}".format(tail, self.key) requestToMake = request.Request(url) #Make the request. response = request.urlopen(requestToMake) if response.code != 200: print("Error occured while downloading file '{}' with error code {}.".format(filename,response.code)) if athome: filename = str(os.path.expanduser("~")) + filename filedata = response.read().decode("utf-8") try: if filename.endswith (".pickle") or ispickle: data = json.loads(filedata) try: with open(filename, "wb") as fileToWrite: pickle.dump (data, fileToWrite) except pickle.PickleError as perr: print("Pickling error occurred: {}".format(perr)) return else: with open (filename, "w") as fileToWrite: fileToWrite.write(filedata) except IOError as ioerr: print("IOError occurred: {}".format(ioerr)) return
Downloads a single file from Redunda. :param str filename: The name of the file you want to download :param bool ispickle: Optional variable which tells if the file to be downloaded is a pickle; default is False. :returns: returns nothing
def get_vnetwork_hosts_output_vnetwork_hosts_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_hosts = ET.Element("get_vnetwork_hosts") config = get_vnetwork_hosts output = ET.SubElement(get_vnetwork_hosts, "output") vnetwork_hosts = ET.SubElement(output, "vnetwork-hosts") name = ET.SubElement(vnetwork_hosts, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def occurrences(coll, value=None, **options): """Return the occurrences of the elements in the collection :param coll: a collection :param value: a value in the collection :param options: an optional keyword used as a criterion to filter the values in the collection :returns: the frequency of the values in the collection as a dictionary >>> occurrences((1, 1, 2, 3)) {1: 2, 2: 1, 3: 1} >>> occurrences((1, 1, 2, 3), 1) 2 Filter the values of the occurrences that are <, <=, >, >=, == or != than a given number:: >>> occurrences((1, 1, 2, 3), lt=3) {1: 2, 2: 1, 3: 1} >>> occurrences((1, 1, 2, 3), gt=1) {1: 2} >>> occurrences((1, 1, 2, 3), ne=1) {1: 2} """ count = {} for element in coll: count[element] = count.get(element, 0) + 1 if options: count = _filter_occurrences(count, options) if value: count = count.get(value, 0) return count
Return the occurrences of the elements in the collection :param coll: a collection :param value: a value in the collection :param options: an optional keyword used as a criterion to filter the values in the collection :returns: the frequency of the values in the collection as a dictionary >>> occurrences((1, 1, 2, 3)) {1: 2, 2: 1, 3: 1} >>> occurrences((1, 1, 2, 3), 1) 2 Filter the values of the occurrences that are <, <=, >, >=, == or != than a given number:: >>> occurrences((1, 1, 2, 3), lt=3) {1: 2, 2: 1, 3: 1} >>> occurrences((1, 1, 2, 3), gt=1) {1: 2} >>> occurrences((1, 1, 2, 3), ne=1) {1: 2}
def url_for(**options): '''Returns the url for the specified options''' url_parts = get_url_parts(**options) image_hash = hashlib.md5(b(options['image_url'])).hexdigest() url_parts.append(image_hash) return "/".join(url_parts)
Returns the url for the specified options
def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False): """Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document. """ if search_entire_document: xml_endpos = html_endpos = len(markup) else: xml_endpos = 1024 html_endpos = max(2048, int(len(markup) * 0.05)) declared_encoding = None declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos) if not declared_encoding_match and is_html: declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos) if declared_encoding_match is not None: declared_encoding = declared_encoding_match.groups()[0].decode( 'ascii', 'replace') if declared_encoding: return declared_encoding.lower() return None
Given a document, tries to find its declared encoding. An XML encoding is declared at the beginning of the document. An HTML encoding is declared in a <meta> tag, hopefully near the beginning of the document.
def process_match(match, fixed_text, cur, cur_end): """Processes a single match in rules""" # Set our tools # -- Initial/default value for replace replace = True # -- Set check cursor depending on match['type'] if match['type'] == 'prefix': chk = cur - 1 else: # suffix chk = cur_end # -- Set scope based on whether scope is negative if match['scope'].startswith('!'): scope = match['scope'][1:] negative = True else: scope = match['scope'] negative = False # Let the matching begin # -- Punctuations if scope == 'punctuation': # Conditions: XORd with negative if (not ((chk < 0 and match['type'] == 'prefix') or (chk >= len(fixed_text) and match['type'] == 'suffix') or validate.is_punctuation(fixed_text[chk])) ^ negative): replace = False # -- Vowels -- Checks: 1. Cursor should not be at first character # -- if prefix or last character if suffix, 2. Character at chk # -- should be a vowel. 3. 'negative' will invert the value of 1 # -- AND 2 elif scope == 'vowel': if (not (((chk >= 0 and match['type'] == 'prefix') or (chk < len(fixed_text) and match['type'] == 'suffix')) and validate.is_vowel(fixed_text[chk])) ^ negative): replace = False # -- Consonants -- Checks: 1. Cursor should not be at first # -- character if prefix or last character if suffix, 2. Character # -- at chk should be a consonant. 3. 'negative' will invert the # -- value of 1 AND 2 elif scope == 'consonant': if (not (((chk >= 0 and match['type'] == 'prefix') or (chk < len(fixed_text) and match['type'] == 'suffix')) and validate.is_consonant(fixed_text[chk])) ^ negative): replace = False # -- Exacts elif scope == 'exact': # Prepare cursor for exact search if match['type'] == 'prefix': exact_start = cur - len(match['value']) exact_end = cur else: # suffix exact_start = cur_end exact_end = cur_end + len(match['value']) # Validate exact find. if not validate.is_exact(match['value'], fixed_text, exact_start, exact_end, negative): replace = False # Return replace, which will be true if none of the checks above match return replace
Processes a single match in rules
def get_forces(self, a): """Calculate atomic forces.""" f = np.zeros( [ len(a), 3 ], dtype=float ) for c in self.calcs: f += c.get_forces(a) return f
Calculate atomic forces.
def check_pypi_exists(dependencies): """Check if the indicated dependencies actually exists in pypi.""" for dependency in dependencies.get('pypi', []): logger.debug("Checking if %r exists in PyPI", dependency) try: exists = _pypi_head_package(dependency) except Exception as error: logger.error("Error checking %s in PyPI: %r", dependency, error) raise FadesError("Could not check if dependency exists in PyPI") else: if not exists: logger.error("%s doesn't exists in PyPI.", dependency) return False return True
Check if the indicated dependencies actually exists in pypi.
def _non_blocking_wrapper(self, method, *args, **kwargs): """Runs given method on every task in the job. Blocks until all tasks finish. Propagates exception from first failed task.""" exceptions = [] def task_run(task): try: getattr(task, method)(*args, **kwargs) except Exception as e: exceptions.append(e) threads = [threading.Thread(name=f'task_{method}_{i}', target=task_run, args=[t]) for i, t in enumerate(self.tasks)] for thread in threads: thread.start() for thread in threads: thread.join() if exceptions: raise exceptions[0]
Runs given method on every task in the job. Blocks until all tasks finish. Propagates exception from first failed task.
def work(options): """The work functions""" # pylint: disable=too-many-locals record = get_record(options) _, mainv, dailyv, _, _, _, safebrowsingv, bytecodev = record.split(':') versions = {'main': mainv, 'daily': dailyv, 'safebrowsing': safebrowsingv, 'bytecode': bytecodev} dqueue = Queue(maxsize=0) dqueue_workers = 3 info("[+] \033[92mStarting workers\033[0m") for index in range(dqueue_workers): info("=> Starting diff download worker: %d" % (index + 1)) worker = Thread(target=download_diffs, args=(dqueue,)) worker.setDaemon(True) worker.start() mqueue = Queue(maxsize=0) mqueue_workers = 4 for index in range(mqueue_workers): info("=> Starting signature download worker: %d" % (index + 1)) worker = Thread(target=update_sig, args=(mqueue,)) worker.setDaemon(True) worker.start() for signature_type in ['main', 'daily', 'bytecode', 'safebrowsing']: if signature_type in ['daily', 'bytecode', 'safebrowsing']: # cdiff downloads localver = get_local_version(options.mirrordir, signature_type) remotever = versions[signature_type] if localver is not None: dqueue.put( ( options, signature_type, localver, remotever ) ) mqueue.put((options, signature_type, versions)) info("=> Waiting on workers to complete tasks") dqueue.join() mqueue.join() info("=> Workers done processing queues") create_dns_file(options, record) sys.exit(0)
The work functions
def toggle_deriv(self, evt=None, value=None): "toggle derivative of data" if value is None: self.conf.data_deriv = not self.conf.data_deriv expr = self.conf.data_expr or '' if self.conf.data_deriv: expr = "deriv(%s)" % expr self.write_message("plotting %s" % expr, panel=0) self.conf.process_data()
toggle derivative of data
def _set_tunnel(self, v, load=False): """ Setter method for tunnel, mapped from YANG variable /interface/tunnel (list) If this variable is read-only (config: false) in the source YANG file, then _set_tunnel is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tunnel() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("identifier",tunnel.tunnel, yang_name="tunnel", rest_name="tunnel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}), is_container='list', yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """tunnel must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("identifier",tunnel.tunnel, yang_name="tunnel", rest_name="tunnel", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='identifier', extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}), is_container='list', yang_name="tunnel", rest_name="tunnel", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel <identifier>', u'cli-full-command': None, u'callpoint': u'GreVxlanTunnelCallpoint', u'cli-suppress-list-no': None, u'cli-mode-name': u'config-intf-tunnel-$(identifier)'}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='list', is_config=True)""", }) self.__tunnel = t if hasattr(self, '_set'): self._set()
Setter method for tunnel, mapped from YANG variable /interface/tunnel (list) If this variable is read-only (config: false) in the source YANG file, then _set_tunnel is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_tunnel() directly.
def build(self, root, schema): """ Build the syntax tree for kubectl command line """ if schema.get("subcommands") and schema["subcommands"]: for subcmd, childSchema in schema["subcommands"].items(): child = CommandTree(node=subcmd) child = self.build(child, childSchema) root.children.append(child) # {args: {}, options: {}, help: ""} root.help = schema.get("help") for name, desc in schema.get("options").items(): if root.node == "kubectl": # register global flags self.globalFlags.append(Option(name, desc["help"])) root.localFlags.append(Option(name, desc["help"])) for arg in schema.get("args"): node = CommandTree(node=arg) root.children.append(node) return root
Build the syntax tree for kubectl command line