code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def configure(self, options, conf): """Configure plugin. Plugin is enabled by default. """ self.conf = conf self.when = options.browser_closer_when
Configure plugin. Plugin is enabled by default.
def update_container(self, container, metadata, **kwargs): """Update container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param metadata(dict): additional metadata to include in the request. :param **kwargs(dict): extend args for specific driver. """ LOG.debug('update_object() with %s is success.', self.driver) return self.driver.update_container(container, metadata, **kwargs)
Update container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param metadata(dict): additional metadata to include in the request. :param **kwargs(dict): extend args for specific driver.
def _update_config_tags(self,directory,files=None): """ Loads tags information from file and updates on flickr, only reads first line. Format is comma separated eg. travel, 2010, South Africa, Pretoria If files is None, will update all files in DB, otherwise will only update files that are in the flickr DB and files list """ if not self._connectToFlickr(): print("%s - Couldn't connect to flickr"%(directory)) return False logger.debug("Updating tags in %s"%(directory)) _tags=self._load_tags(directory) # --- Load DB of photos, and update them all with new tags db = self._loadDB(directory) for fn in db: # --- If file list provided, skip files not in the list if files and fn not in files: logger.debug('%s [flickr] Skipping, tag update',fn) continue logger.info("%s [flickr] Updating tags [%s]" %(fn,_tags)) pid=db[fn]['photoid'] resp=self.flickr.photos_setTags(photo_id=pid,tags=_tags) if resp.attrib['stat']!='ok': logger.error("%s - flickr: photos_setTags failed with status: %s",\ resp.attrib['stat']); return False else: return True return False
Loads tags information from file and updates on flickr, only reads first line. Format is comma separated eg. travel, 2010, South Africa, Pretoria If files is None, will update all files in DB, otherwise will only update files that are in the flickr DB and files list
def loader(): """Load image from URL, and preprocess for Resnet.""" url = request.args.get('url') # read image URL as a request URL param response = requests.get(url) # make request to static image file return response.content
Load image from URL, and preprocess for Resnet.
def hostedzone_from_element(zone): """ Construct a L{HostedZone} instance from a I{HostedZone} XML element. """ return HostedZone( name=maybe_bytes_to_unicode(zone.find("Name").text).encode("ascii").decode("idna"), identifier=maybe_bytes_to_unicode(zone.find("Id").text).replace(u"/hostedzone/", u""), rrset_count=int(zone.find("ResourceRecordSetCount").text), reference=maybe_bytes_to_unicode(zone.find("CallerReference").text), )
Construct a L{HostedZone} instance from a I{HostedZone} XML element.
def maybe_convert_platform(values): """ try to do platform conversion, allow ndarray or list here """ if isinstance(values, (list, tuple)): values = construct_1d_object_array_from_listlike(list(values)) if getattr(values, 'dtype', None) == np.object_: if hasattr(values, '_values'): values = values._values values = lib.maybe_convert_objects(values) return values
try to do platform conversion, allow ndarray or list here
def disable_constant(parameterized): """ Temporarily set parameters on Parameterized object to constant=False. """ params = parameterized.params().values() constants = [p.constant for p in params] for p in params: p.constant = False try: yield except: raise finally: for (p, const) in zip(params, constants): p.constant = const
Temporarily set parameters on Parameterized object to constant=False.
def str2dn(dn, flags=0): """ This function takes a DN as string as parameter and returns a decomposed DN. It's the inverse to dn2str(). flags describes the format of the dn See also the OpenLDAP man-page ldap_str2dn(3) """ # if python2, we need unicode string if not isinstance(dn, six.text_type): dn = dn.decode("utf_8") assert flags == 0 result, i = _distinguishedName(dn, 0) if result is None: raise tldap.exceptions.InvalidDN("Cannot parse dn") if i != len(dn): raise tldap.exceptions.InvalidDN("Cannot parse dn past %s" % dn[i:]) return result
This function takes a DN as string as parameter and returns a decomposed DN. It's the inverse to dn2str(). flags describes the format of the dn See also the OpenLDAP man-page ldap_str2dn(3)
def make_coord_dict(subs, subscript_dict, terse=True): """ This is for assisting with the lookup of a particular element, such that the output of this function would take the place of %s in this expression `variable.loc[%s]` Parameters ---------- subs: list of strings coordinates, either as names of dimensions, or positions within a dimension subscript_dict: dict the full dictionary of subscript names and values terse: Binary Flag - If true, includes only elements that do not cover the full range of values in their respective dimension - If false, returns all dimensions Returns ------- coordinates: dictionary Coordinates needed to access the xarray quantities we're interested in. Examples -------- >>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2': ['D', 'E', 'F']}) {'Dim2': ['D']} >>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2':['D', 'E', 'F']}, >>> terse=False) {'Dim2': ['D'], 'Dim1': ['A', 'B', 'C']} """ sub_elems_list = [y for x in subscript_dict.values() for y in x] coordinates = {} for sub in subs: if sub in sub_elems_list: name = find_subscript_name(subscript_dict, sub) coordinates[name] = [sub] elif not terse: coordinates[sub] = subscript_dict[sub] return coordinates
This is for assisting with the lookup of a particular element, such that the output of this function would take the place of %s in this expression `variable.loc[%s]` Parameters ---------- subs: list of strings coordinates, either as names of dimensions, or positions within a dimension subscript_dict: dict the full dictionary of subscript names and values terse: Binary Flag - If true, includes only elements that do not cover the full range of values in their respective dimension - If false, returns all dimensions Returns ------- coordinates: dictionary Coordinates needed to access the xarray quantities we're interested in. Examples -------- >>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2': ['D', 'E', 'F']}) {'Dim2': ['D']} >>> make_coord_dict(['Dim1', 'D'], {'Dim1': ['A', 'B', 'C'], 'Dim2':['D', 'E', 'F']}, >>> terse=False) {'Dim2': ['D'], 'Dim1': ['A', 'B', 'C']}
def identityRequest(): """IDENTITY REQUEST Section 9.2.10""" a = TpPd(pd=0x5) b = MessageType(mesType=0x8) # 00001000 c = IdentityTypeAndSpareHalfOctets() packet = a / b / c return packet
IDENTITY REQUEST Section 9.2.10
def clean_folder_path(path, expected=None): ''' :param path: A folder path to sanitize and parse :type path: string :param expected: Whether a folder ("folder"), a data object ("entity"), or either (None) is expected :type expected: string or None :returns: *folderpath*, *name* Unescape and parse *path* as a folder path to possibly an entity name. Consecutive unescaped forward slashes "/" are collapsed to a single forward slash. If *expected* is "folder", *name* is always returned as None. Otherwise, the string to the right of the last unescaped "/" is considered a possible data object name and returned as such. ''' folders = split_unescaped('/', path) if len(folders) == 0: return '/', None if expected == 'folder' or folders[-1] == '.' or folders[-1] == '..' or get_last_pos_of_char('/', path) == len(path) - 1: entity_name = None else: entity_name = unescape_name_str(folders.pop()) sanitized_folders = [] for folder in folders: if folder == '.': pass elif folder == '..': if len(sanitized_folders) > 0: sanitized_folders.pop() else: sanitized_folders.append(unescape_folder_str(folder)) return ('/' + '/'.join(sanitized_folders)), entity_name
:param path: A folder path to sanitize and parse :type path: string :param expected: Whether a folder ("folder"), a data object ("entity"), or either (None) is expected :type expected: string or None :returns: *folderpath*, *name* Unescape and parse *path* as a folder path to possibly an entity name. Consecutive unescaped forward slashes "/" are collapsed to a single forward slash. If *expected* is "folder", *name* is always returned as None. Otherwise, the string to the right of the last unescaped "/" is considered a possible data object name and returned as such.
def get(feature, obj, **kwargs): '''Obtain a feature from a set of morphology objects Parameters: feature(string): feature to extract obj: a neuron, population or neurite tree **kwargs: parameters to forward to underlying worker functions Returns: features as a 1D or 2D numpy array. ''' feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES else NEURONFEATURES[feature]) return _np.array(list(feature(obj, **kwargs)))
Obtain a feature from a set of morphology objects Parameters: feature(string): feature to extract obj: a neuron, population or neurite tree **kwargs: parameters to forward to underlying worker functions Returns: features as a 1D or 2D numpy array.
def model_fn(features, labels, mode, params, config): """Builds the model function for use in an estimator. Arguments: features: The input features for the estimator. labels: The labels, unused here. mode: Signifies whether it is train or test or predict. params: Some hyperparameters as a dictionary. config: The RunConfig, unused here. Returns: EstimatorSpec: A tf.estimator.EstimatorSpec instance. """ del labels, config if params["analytic_kl"] and params["mixture_components"] != 1: raise NotImplementedError( "Using `analytic_kl` is only supported when `mixture_components = 1` " "since there's no closed form otherwise.") encoder = make_encoder(params["activation"], params["latent_size"], params["base_depth"]) decoder = make_decoder(params["activation"], params["latent_size"], IMAGE_SHAPE, params["base_depth"]) latent_prior = make_mixture_prior(params["latent_size"], params["mixture_components"]) image_tile_summary( "input", tf.cast(features, dtype=tf.float32), rows=1, cols=16) approx_posterior = encoder(features) approx_posterior_sample = approx_posterior.sample(params["n_samples"]) decoder_likelihood = decoder(approx_posterior_sample) image_tile_summary( "recon/sample", tf.cast(decoder_likelihood.sample()[:3, :16], dtype=tf.float32), rows=3, cols=16) image_tile_summary( "recon/mean", decoder_likelihood.mean()[:3, :16], rows=3, cols=16) # `distortion` is just the negative log likelihood. distortion = -decoder_likelihood.log_prob(features) avg_distortion = tf.reduce_mean(input_tensor=distortion) tf.compat.v1.summary.scalar("distortion", avg_distortion) if params["analytic_kl"]: rate = tfd.kl_divergence(approx_posterior, latent_prior) else: rate = (approx_posterior.log_prob(approx_posterior_sample) - latent_prior.log_prob(approx_posterior_sample)) avg_rate = tf.reduce_mean(input_tensor=rate) tf.compat.v1.summary.scalar("rate", avg_rate) elbo_local = -(rate + distortion) elbo = tf.reduce_mean(input_tensor=elbo_local) loss = -elbo tf.compat.v1.summary.scalar("elbo", elbo) importance_weighted_elbo = tf.reduce_mean( input_tensor=tf.reduce_logsumexp(input_tensor=elbo_local, axis=0) - tf.math.log(tf.cast(params["n_samples"], dtype=tf.float32))) tf.compat.v1.summary.scalar("elbo/importance_weighted", importance_weighted_elbo) # Decode samples from the prior for visualization. random_image = decoder(latent_prior.sample(16)) image_tile_summary( "random/sample", tf.cast(random_image.sample(), dtype=tf.float32), rows=4, cols=4) image_tile_summary("random/mean", random_image.mean(), rows=4, cols=4) # Perform variational inference by minimizing the -ELBO. global_step = tf.compat.v1.train.get_or_create_global_step() learning_rate = tf.compat.v1.train.cosine_decay( params["learning_rate"], global_step, params["max_steps"]) tf.compat.v1.summary.scalar("learning_rate", learning_rate) optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) train_op = optimizer.minimize(loss, global_step=global_step) return tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metric_ops={ "elbo": tf.compat.v1.metrics.mean(elbo), "elbo/importance_weighted": tf.compat.v1.metrics.mean(importance_weighted_elbo), "rate": tf.compat.v1.metrics.mean(avg_rate), "distortion": tf.compat.v1.metrics.mean(avg_distortion), }, )
Builds the model function for use in an estimator. Arguments: features: The input features for the estimator. labels: The labels, unused here. mode: Signifies whether it is train or test or predict. params: Some hyperparameters as a dictionary. config: The RunConfig, unused here. Returns: EstimatorSpec: A tf.estimator.EstimatorSpec instance.
def assertDateTimesBefore(self, sequence, target, strict=True, msg=None): '''Fail if any elements in ``sequence`` are not before ``target``. If ``target`` is iterable, it must have the same length as ``sequence`` If ``strict=True``, fail unless all elements in ``sequence`` are strictly less than ``target``. If ``strict=False``, fail unless all elements in ``sequence`` are less than or equal to ``target``. Parameters ---------- sequence : iterable target : datetime, date, iterable strict : bool msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``sequence`` is not iterable. ValueError If ``target`` is iterable but does not have the same length as ``sequence``. TypeError If ``target`` is not a datetime or date object and is not iterable. ''' if not isinstance(sequence, collections.Iterable): raise TypeError('First argument is not iterable') if strict: standardMsg = '%s is not strictly less than %s' % (sequence, target) op = operator.lt else: standardMsg = '%s is not less than %s' % (sequence, target) op = operator.le # Null date(time)s will always compare False, but # we want to know about null date(time)s if isinstance(target, collections.Iterable): if len(target) != len(sequence): raise ValueError(('Length mismatch: ' 'first argument contains %s elements, ' 'second argument contains %s elements' % ( len(sequence), len(target)))) if not all(op(i, j) for i, j in zip(sequence, target)): self.fail(self._formatMessage(msg, standardMsg)) elif isinstance(target, (date, datetime)): if not all(op(element, target) for element in sequence): self.fail(self._formatMessage(msg, standardMsg)) else: raise TypeError( 'Second argument is not a datetime or date object or iterable')
Fail if any elements in ``sequence`` are not before ``target``. If ``target`` is iterable, it must have the same length as ``sequence`` If ``strict=True``, fail unless all elements in ``sequence`` are strictly less than ``target``. If ``strict=False``, fail unless all elements in ``sequence`` are less than or equal to ``target``. Parameters ---------- sequence : iterable target : datetime, date, iterable strict : bool msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. Raises ------ TypeError If ``sequence`` is not iterable. ValueError If ``target`` is iterable but does not have the same length as ``sequence``. TypeError If ``target`` is not a datetime or date object and is not iterable.
def create_defaults_for(session, user, only_for=None, detail_values=None): """ Create a sizable amount of defaults for a new user. """ detail_values = detail_values or {} if not user.openid.endswith('.fedoraproject.org'): log.warn("New user not from fedoraproject.org. No defaults set.") return # the openid is of the form USERNAME.id.fedoraproject.org nick = user.openid.split('.')[0] # TODO -- make the root here configurable. valid_paths = fmn.lib.load_rules(root='fmn.rules') def rule_maker(path, **kw): """ Shorthand function, used inside loops below. """ return fmn.lib.models.Rule.create_from_code_path( session, valid_paths, path, **kw) def contexts(): names = ['email', 'irc', 'sse'] if only_for: names = [only_for.name] for name in names: context = fmn.lib.models.Context.get(session, name) if context: yield context else: log.warn("No such context %r is in the DB." % name) # For each context, build one little and two big filters for context in contexts(): pref = fmn.lib.models.Preference.load(session, user, context) if not pref: value = detail_values.get(context.name) pref = fmn.lib.models.Preference.create( session, user, context, detail_value=value) # Add a filter that looks for packages of this user filt = fmn.lib.models.Filter.create( session, "Events on packages that I own") filt.add_rule(session, valid_paths, "fmn.rules:user_package_filter", fasnick=nick) # If this is a message about a package of mine, **and** i'm responsible # for it, then don't trigger this filter. Rely on the previous one. filt.add_rule(session, valid_paths, "fmn.rules:user_filter", fasnick=nick, negated=True) # Right off the bat, ignore all messages from non-primary kojis. filt.add_rule(session, valid_paths, "fmn.rules:koji_instance", instance="ppc,s390,arm", negated=True) # And furthermore, exclude lots of message types for code_path in exclusion_packages + exclusion_mutual: filt.add_rule( session, valid_paths, "fmn.rules:%s" % code_path, negated=True) pref.add_filter(session, filt, notify=True) # END "packages I own" # Add a filter that looks for this user filt = fmn.lib.models.Filter.create( session, "Events referring to my username") filt.add_rule(session, valid_paths, "fmn.rules:user_filter", fasnick=nick) # Right off the bat, ignore all messages from non-primary kojis. filt.add_rule(session, valid_paths, "fmn.rules:koji_instance", instance="ppc,s390,arm", negated=True) # And furthermore exclude lots of message types for code_path in exclusion_username + exclusion_mutual: filt.add_rule( session, valid_paths, "fmn.rules:%s" % code_path, negated=True) pref.add_filter(session, filt, notify=True) # END "events references my username" # Add a taskotron filter filt = fmn.lib.models.Filter.create( session, "Critical taskotron tasks on my packages") filt.add_rule(session, valid_paths, "fmn.rules:user_package_filter", fasnick=nick) filt.add_rule(session, valid_paths, "fmn.rules:taskotron_release_critical_task") filt.add_rule(session, valid_paths, "fmn.rules:taskotron_task_particular_or_changed_outcome", outcome='FAILED') pref.add_filter(session, filt, notify=True)
Create a sizable amount of defaults for a new user.
def equilibrium_transition_matrix(Xi, omega, sigma, reversible=True, return_lcc=True): """ Compute equilibrium transition matrix from OOM components: Parameters ---------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM reversible : bool, optional, default=True symmetrize corrected count matrix in order to obtain a reversible transition matrix. return_lcc: bool, optional, default=True return indices of largest connected set. Returns ------- Tt_Eq : ndarray(N, N) equilibrium transition matrix lcc : ndarray(M,) the largest connected set of the transition matrix. """ import msmtools.estimation as me # Compute equilibrium transition matrix: Ct_Eq = np.einsum('j,jkl,lmn,n->km', omega, Xi, Xi, sigma) # Remove negative entries: Ct_Eq[Ct_Eq < 0.0] = 0.0 # Compute transition matrix after symmetrization: pi_r = np.sum(Ct_Eq, axis=1) if reversible: pi_c = np.sum(Ct_Eq, axis=0) pi_sym = pi_r + pi_c # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_sym == 0.0)[0] pi_sym[ind0] = 1.0 Tt_Eq = (Ct_Eq + Ct_Eq.T) / pi_sym[:, None] else: # Avoid zero row-sums. States with zero row-sums will be eliminated by active set update. ind0 = np.where(pi_r == 0.0)[0] pi_r[ind0] = 1.0 Tt_Eq = Ct_Eq / pi_r[:, None] # Perform active set update: lcc = me.largest_connected_set(Tt_Eq) Tt_Eq = me.largest_connected_submatrix(Tt_Eq, lcc=lcc) if return_lcc: return Tt_Eq, lcc else: return Tt_Eq
Compute equilibrium transition matrix from OOM components: Parameters ---------- Xi : ndarray(M, N, M) matrix of set-observable operators omega: ndarray(M,) information state vector of OOM sigma : ndarray(M,) evaluator of OOM reversible : bool, optional, default=True symmetrize corrected count matrix in order to obtain a reversible transition matrix. return_lcc: bool, optional, default=True return indices of largest connected set. Returns ------- Tt_Eq : ndarray(N, N) equilibrium transition matrix lcc : ndarray(M,) the largest connected set of the transition matrix.
def toggle_sensor(request, sensorname): """ This is used only if websocket fails """ if service.read_only: service.logger.warning("Could not perform operation: read only mode enabled") raise Http404 source = request.GET.get('source', 'main') sensor = service.system.namespace[sensorname] sensor.status = not sensor.status service.system.flush() return HttpResponseRedirect(reverse(source))
This is used only if websocket fails
def KL_divergence(P,Q): ''' Compute the KL divergence between distributions P and Q P and Q should be dictionaries linking symbols to probabilities. the keys to P and Q should be the same. ''' assert(P.keys()==Q.keys()) distance = 0 for k in P.keys(): distance += P[k] * log(P[k]/Q[k]) return distance
Compute the KL divergence between distributions P and Q P and Q should be dictionaries linking symbols to probabilities. the keys to P and Q should be the same.
def database_path(self): """ Full database path. Includes the default location + the database filename. """ filename = self.database_filename db_path = ":memory:" if filename == ":memory:" else ( path.abspath(path.join(__file__, "../..", "..", "data", filename))) return db_path
Full database path. Includes the default location + the database filename.
def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True): """The slicenet model, main step used for training.""" with tf.variable_scope("slicenet"): # Project to hidden size if necessary if inputs.get_shape().as_list()[-1] != hparams.hidden_size: inputs = common_layers.conv_block( inputs, hparams.hidden_size, [((1, 1), (3, 3))], first_relu=False, padding="SAME", force2d=True) # Flatten inputs and encode. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) inputs_mask = 1.0 - embedding_to_padding(inputs) inputs = common_layers.add_timing_signal(inputs) # Add position info. target_space_emb = embed_target_space(target_space, hparams.hidden_size) extra_layers = int(hparams.num_hidden_layers * 1.5) inputs_encoded = multi_conv_res( inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask) if not run_decoder: return inputs_encoded # Do the middle part. decoder_start, similarity_loss = slicenet_middle( inputs_encoded, targets, target_space_emb, inputs_mask, hparams) # Decode. decoder_final = multi_conv_res( decoder_start, "LEFT", "decoder", hparams.num_hidden_layers, hparams, mask=inputs_mask, source=inputs_encoded) return decoder_final, tf.reduce_mean(similarity_loss)
The slicenet model, main step used for training.
def element_wise_op(array, other, op, ty): """ Operation of series and other, element-wise (binary operator add) Args: array (WeldObject / Numpy.ndarray): Input array other (WeldObject / Numpy.ndarray): Second Input array op (str): Op string used to compute element-wise operation (+ / *) ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation """ weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array other_var = weld_obj.update(other) if isinstance(other, WeldObject): other_var = other.obj_id weld_obj.dependencies[other_var] = other weld_template = """ map( zip(%(array)s, %(other)s), |a| a.$0 %(op)s a.$1 ) """ weld_obj.weld_code = weld_template % {"array": array_var, "other": other_var, "ty": ty, "op": op} return weld_obj
Operation of series and other, element-wise (binary operator add) Args: array (WeldObject / Numpy.ndarray): Input array other (WeldObject / Numpy.ndarray): Second Input array op (str): Op string used to compute element-wise operation (+ / *) ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
def GetPythonLibraryDirectoryPath(): """Retrieves the Python library directory path.""" path = sysconfig.get_python_lib(True) _, _, path = path.rpartition(sysconfig.PREFIX) if path.startswith(os.sep): path = path[1:] return path
Retrieves the Python library directory path.
def update_name(self): """ Update the name of the Plan in Stripe and in the db. Assumes the object being called has the name attribute already reset, but has not been saved. Stripe does not allow for update of any other Plan attributes besides name. """ p = self.api_retrieve() p.name = self.name p.save() self.save()
Update the name of the Plan in Stripe and in the db. Assumes the object being called has the name attribute already reset, but has not been saved. Stripe does not allow for update of any other Plan attributes besides name.
def insert_paths(self): """Inserts a base path into the sys.path list if one is specified in the configuration. """ if self.args.path: sys.path.insert(0, self.args.path) if hasattr(self.config.application, config.PATHS): if hasattr(self.config.application.paths, config.BASE): sys.path.insert(0, self.config.application.paths.base)
Inserts a base path into the sys.path list if one is specified in the configuration.
async def power(source, exponent): """Raise the elements of an asynchronous sequence to the given power.""" async with streamcontext(source) as streamer: async for item in streamer: yield item ** exponent
Raise the elements of an asynchronous sequence to the given power.
def set_fluxinfo(self): """ Uses list of known flux calibrators (with models in CASA) to find full name given in scan. """ knowncals = ['3C286', '3C48', '3C147', '3C138'] # find scans with knowncals in the name sourcenames = [self.sources[source]['source'] for source in self.sources] calsources = [cal for src in sourcenames for cal in knowncals if cal in src] calsources_full = [src for src in sourcenames for cal in knowncals if cal in src] if len(calsources): # if cal found, set band name from first spw self.band = self.sdm['Receiver'][0].frequencyBand.split('_')[1] if len(calsources) > 1: print 'Found multiple flux calibrators:', calsources self.fluxname = calsources[0] self.fluxname_full = calsources_full[0] print 'Set flux calibrator to %s and band to %s.' % (self.fluxname_full, self.band) else: self.fluxname = '' self.fluxname_full = '' self.band = ''
Uses list of known flux calibrators (with models in CASA) to find full name given in scan.
def create(cls, **kw): """ Create an instance of this class, first cleaning up the keyword arguments so they will fill in any required values. @return: an instance of C{cls} """ for k, v in kw.items(): attr = getattr(cls, k, None) if isinstance(attr, RecordAttribute): kw.pop(k) kw.update(attr._decompose(v)) return cls(**kw)
Create an instance of this class, first cleaning up the keyword arguments so they will fill in any required values. @return: an instance of C{cls}
def _get_populate_values(self, instance) -> Tuple[str, str]: """Gets all values (for each language) from the specified's instance's `populate_from` field. Arguments: instance: The instance to get the values from. Returns: A list of (lang_code, value) tuples. """ return [ ( lang_code, self._get_populate_from_value( instance, self.populate_from, lang_code ), ) for lang_code, _ in settings.LANGUAGES ]
Gets all values (for each language) from the specified's instance's `populate_from` field. Arguments: instance: The instance to get the values from. Returns: A list of (lang_code, value) tuples.
def _import_model(models, crumbs): """ Change the nested items of the paleoModel data. Overwrite the data in-place. :param list models: Metadata :param str crumbs: Crumbs :return dict _models: Metadata """ logger_jsons.info("enter import_model".format(crumbs)) _models = OrderedDict() try: for _idx, model in enumerate(models): # Keep the original dictionary, but replace the three main entries below # Do a direct replacement of chronModelTable columns. No table name, no table work needed. if "summaryTable" in model: model["summaryTable"] = _idx_table_by_name(model["summaryTable"], "{}{}{}".format(crumbs, _idx, "summary")) # Do a direct replacement of ensembleTable columns. No table name, no table work needed. if "ensembleTable" in model: model["ensembleTable"] = _idx_table_by_name(model["ensembleTable"], "{}{}{}".format(crumbs, _idx, "ensemble")) if "distributionTable" in model: model["distributionTable"] = _idx_table_by_name(model["distributionTable"], "{}{}{}".format(crumbs, _idx, "distribution")) _table_name = "{}{}".format(crumbs, _idx) _models[_table_name] = model except Exception as e: logger_jsons.error("import_model: {}".format(e)) print("Error: import_model: {}".format(e)) logger_jsons.info("exit import_model: {}".format(crumbs)) return _models
Change the nested items of the paleoModel data. Overwrite the data in-place. :param list models: Metadata :param str crumbs: Crumbs :return dict _models: Metadata
def UpdateWorkerStatus( self, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings): """Updates the status of a worker. Args: identifier (str): worker identifier. status (str): human readable status of the worker e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the worker. number_of_consumed_sources (int): total number of event sources consumed by the worker. number_of_produced_sources (int): total number of event sources produced by the worker. number_of_consumed_events (int): total number of events consumed by the worker. number_of_produced_events (int): total number of events produced by the worker. number_of_consumed_event_tags (int): total number of event tags consumed by the worker. number_of_produced_event_tags (int): total number of event tags produced by the worker. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the worker. number_of_produced_warnings (int): total number of warnings produced by the worker. """ if identifier not in self._workers_status: self._workers_status[identifier] = ProcessStatus() process_status = self._workers_status[identifier] self._UpdateProcessStatus( process_status, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings)
Updates the status of a worker. Args: identifier (str): worker identifier. status (str): human readable status of the worker e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the worker. number_of_consumed_sources (int): total number of event sources consumed by the worker. number_of_produced_sources (int): total number of event sources produced by the worker. number_of_consumed_events (int): total number of events consumed by the worker. number_of_produced_events (int): total number of events produced by the worker. number_of_consumed_event_tags (int): total number of event tags consumed by the worker. number_of_produced_event_tags (int): total number of event tags produced by the worker. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the worker. number_of_produced_warnings (int): total number of warnings produced by the worker.
def write_branch_data(self, file, padding=" "): """ Writes branch data in Graphviz DOT language. """ attrs = ['%s="%s"' % (k,v) for k,v in self.branch_attr.iteritems()] attr_str = ", ".join(attrs) for br in self.case.branches: file.write("%s%s -> %s [%s];\n" % \ (padding, br.from_bus.name, br.to_bus.name, attr_str))
Writes branch data in Graphviz DOT language.
def _AbortJoin(self, timeout=None): """Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout. """ for pid, process in iter(self._processes_per_pid.items()): logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format( process.name, pid)) process.join(timeout=timeout) if not process.is_alive(): logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format( process.name, pid))
Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout.
def serve_doc(app, url): """ Serve API documentation extracted from request handler docstrings Parameters: * app: Grole application object * url: URL to serve at """ @app.route(url, doc=False) def index(env, req): ret = '' for d in env['doc']: ret += 'URL: {url}, supported methods: {methods}{doc}\n'.format(**d) return ret
Serve API documentation extracted from request handler docstrings Parameters: * app: Grole application object * url: URL to serve at
def get_output(self, idx=-1): """ Return an additional output of the instruction :rtype: string """ buff = "" data = self.get_data() buff += repr(data) + " | " for i in range(0, len(data)): buff += "\\x%02x" % data[i] return buff
Return an additional output of the instruction :rtype: string
def find_file(self, path, saltenv, back=None): ''' Find the path and return the fnd structure, this structure is passed to other backend interfaces. ''' path = salt.utils.stringutils.to_unicode(path) saltenv = salt.utils.stringutils.to_unicode(saltenv) back = self.backends(back) kwargs = {} fnd = {'path': '', 'rel': ''} if os.path.isabs(path): return fnd if '../' in path: return fnd if salt.utils.url.is_escaped(path): # don't attempt to find URL query arguments in the path path = salt.utils.url.unescape(path) else: if '?' in path: hcomps = path.split('?') path = hcomps[0] comps = hcomps[1].split('&') for comp in comps: if '=' not in comp: # Invalid option, skip it continue args = comp.split('=', 1) kwargs[args[0]] = args[1] if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') if 'saltenv' in kwargs: saltenv = kwargs.pop('saltenv') if not isinstance(saltenv, six.string_types): saltenv = six.text_type(saltenv) for fsb in back: fstr = '{0}.find_file'.format(fsb) if fstr in self.servers: fnd = self.servers[fstr](path, saltenv, **kwargs) if fnd.get('path'): fnd['back'] = fsb return fnd return fnd
Find the path and return the fnd structure, this structure is passed to other backend interfaces.
def makeAggShkHist(self): ''' Make simulated histories of aggregate transitory and permanent shocks. Histories are of length self.act_T, for use in the general equilibrium simulation. This replicates the same method for CobbDouglasEconomy; future version should create parent class. Parameters ---------- None Returns ------- None ''' sim_periods = self.act_T Events = np.arange(self.AggShkDstn[0].size) # just a list of integers EventDraws = drawDiscrete(N=sim_periods,P=self.AggShkDstn[0],X=Events,seed=0) PermShkAggHist = self.AggShkDstn[1][EventDraws] TranShkAggHist = self.AggShkDstn[2][EventDraws] # Store the histories self.PermShkAggHist = PermShkAggHist self.TranShkAggHist = TranShkAggHist
Make simulated histories of aggregate transitory and permanent shocks. Histories are of length self.act_T, for use in the general equilibrium simulation. This replicates the same method for CobbDouglasEconomy; future version should create parent class. Parameters ---------- None Returns ------- None
def log_message(self, format, *args): """ overrides the ``log_message`` method from the wsgiref server so that normal logging works with whatever configuration the application has been set to. Levels are inferred from the HTTP status code, 4XX codes are treated as warnings, 5XX as errors and everything else as INFO level. """ code = args[1][0] levels = { '4': 'warning', '5': 'error' } log_handler = getattr(logger, levels.get(code, 'info')) log_handler(format % args)
overrides the ``log_message`` method from the wsgiref server so that normal logging works with whatever configuration the application has been set to. Levels are inferred from the HTTP status code, 4XX codes are treated as warnings, 5XX as errors and everything else as INFO level.
def get_pb_ids(self) -> List[str]: """Return the list of PB ids associated with the SBI. Returns: list, Processing block ids """ values = DB.get_hash_value(self._key, 'processing_block_ids') return ast.literal_eval(values)
Return the list of PB ids associated with the SBI. Returns: list, Processing block ids
def find_extensions_in(path: typing.Union[str, pathlib.Path]) -> list: """ Tries to find things that look like bot extensions in a directory. """ if not isinstance(path, pathlib.Path): path = pathlib.Path(path) if not path.is_dir(): return [] extension_names = [] # Find extensions directly in this folder for subpath in path.glob('*.py'): parts = subpath.with_suffix('').parts if parts[0] == '.': parts = parts[1:] extension_names.append('.'.join(parts)) # Find extensions as subfolder modules for subpath in path.glob('*/__init__.py'): parts = subpath.parent.parts if parts[0] == '.': parts = parts[1:] extension_names.append('.'.join(parts)) return extension_names
Tries to find things that look like bot extensions in a directory.
async def dispatch(self, request, view=None, **kwargs): """Process request.""" # Authorization endpoint self.auth = await self.authorize(request, **kwargs) # noqa # Load collection self.collection = await self.get_many(request, **kwargs) if request.method == 'POST' and view is None: return await super(RESTHandler, self).dispatch(request, **kwargs) # Load resource resource = await self.get_one(request, **kwargs) headers = {} if request.method == 'GET' and resource is None: # Filter resources if VAR_WHERE in request.query: self.collection = await self.filter(request, **kwargs) # Sort resources if VAR_SORT in request.query: sorting = [(name.strip('-'), name.startswith('-')) for name in request.query[VAR_SORT].split(',')] self.collection = await self.sort(*sorting, **kwargs) # Paginate resources per_page = request.query.get(VAR_PER_PAGE, self.meta.per_page) if per_page: try: per_page = int(per_page) if per_page: page = int(request.query.get(VAR_PAGE, 0)) offset = page * per_page self.collection, total = await self.paginate(request, offset, per_page) headers = make_pagination_headers( request, per_page, page, total, self.meta.page_links) except ValueError: raise RESTBadRequest(reason='Pagination params are invalid.') response = await super(RESTHandler, self).dispatch( request, resource=resource, view=view, **kwargs) response.headers.update(headers) return response
Process request.
def checkCytoscapeVersion(host=cytoscape_host,port=cytoscape_port): """ Checks cytoscape version :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :returns: cytoscape and api version """ URL="http://"+str(host)+":"+str(port)+"/v1/version/" r = requests.get(url = URL) r=json.loads(r.content) for k in r.keys(): print(k, r[k])
Checks cytoscape version :param host: cytoscape host address, default=cytoscape_host :param port: cytoscape port, default=1234 :returns: cytoscape and api version
def render(self, *args, **kwargs): """This function accepts either a dict or some keyword arguments which will then be the context the template is evaluated in. The return value will be the rendered template. :param context: the function accepts the same arguments as the :class:`dict` constructor. :return: the rendered template as string """ ns = self.default_context.copy() if len(args) == 1 and isinstance(args[0], MultiDict): ns.update(args[0].to_dict(flat=True)) else: ns.update(dict(*args)) if kwargs: ns.update(kwargs) context = Context(ns, self.charset, self.errors) exec self.code in context.runtime, context return context.get_value(self.unicode_mode)
This function accepts either a dict or some keyword arguments which will then be the context the template is evaluated in. The return value will be the rendered template. :param context: the function accepts the same arguments as the :class:`dict` constructor. :return: the rendered template as string
def swap(self, kaxes, vaxes, size="150"): """ Swap axes from keys to values. This is the core operation underlying shape manipulation on the Spark bolt array. It exchanges an arbitrary set of axes between the keys and the valeus. If either is None, will only move axes in one direction (from keys to values, or values to keys). Keys moved to values will be placed immediately after the split; values moved to keys will be placed immediately before the split. Parameters ---------- kaxes : tuple Axes from keys to move to values vaxes : tuple Axes from values to move to keys size : tuple or int, optional, default = "150" Can either provide a string giving the size in kilobytes, or a tuple with the number of chunks along each value dimension being moved Returns ------- BoltArraySpark """ kaxes = asarray(tupleize(kaxes), 'int') vaxes = asarray(tupleize(vaxes), 'int') if type(size) is not str: size = tupleize(size) if len(kaxes) == self.keys.ndim and len(vaxes) == 0: raise ValueError('Cannot perform a swap that would ' 'end up with all data on a single key') if len(kaxes) == 0 and len(vaxes) == 0: return self from bolt.spark.chunk import ChunkedArray chunks = self.chunk(size) swapped = chunks.keys_to_values(kaxes).values_to_keys([v+len(kaxes) for v in vaxes]) barray = swapped.unchunk() return barray
Swap axes from keys to values. This is the core operation underlying shape manipulation on the Spark bolt array. It exchanges an arbitrary set of axes between the keys and the valeus. If either is None, will only move axes in one direction (from keys to values, or values to keys). Keys moved to values will be placed immediately after the split; values moved to keys will be placed immediately before the split. Parameters ---------- kaxes : tuple Axes from keys to move to values vaxes : tuple Axes from values to move to keys size : tuple or int, optional, default = "150" Can either provide a string giving the size in kilobytes, or a tuple with the number of chunks along each value dimension being moved Returns ------- BoltArraySpark
def hint_width(self): """Width of a column segment.""" return sum((len(self.style.delimiter), self.wide, len(self.style.delimiter), len(u' '), UCS_PRINTLEN + 2, len(u' '), self.style.name_len,))
Width of a column segment.
def _split_indices(self, concat_inds): """Take indices in 'concatenated space' and return as pairs of (traj_i, frame_i) """ clengths = np.append([0], np.cumsum(self.__lengths)) mapping = np.zeros((clengths[-1], 2), dtype=int) for traj_i, (start, end) in enumerate(zip(clengths[:-1], clengths[1:])): mapping[start:end, 0] = traj_i mapping[start:end, 1] = np.arange(end - start) return mapping[concat_inds]
Take indices in 'concatenated space' and return as pairs of (traj_i, frame_i)
def unacknowledge_problem(self): """ Remove the acknowledge, reset the flag. The comment is deleted :return: None """ if self.problem_has_been_acknowledged: logger.debug("[item::%s] deleting acknowledge of %s", self.get_name(), self.get_full_name()) self.problem_has_been_acknowledged = False if self.my_type == 'host': self.broks.append(self.acknowledgement.get_expire_brok(self.get_name())) else: self.broks.append(self.acknowledgement.get_expire_brok(self.host_name, self.get_name())) # delete the comment of the item related with the acknowledge if hasattr(self.acknowledgement, 'comment_id') and \ self.acknowledgement.comment_id in self.comments: del self.comments[self.acknowledgement.comment_id] # Should not be deleted, a None is Good self.acknowledgement = None self.broks.append(self.get_update_status_brok()) self.raise_unacknowledge_log_entry()
Remove the acknowledge, reset the flag. The comment is deleted :return: None
def pickle_matpower_cases(case_paths, case_format=2): """ Parses the MATPOWER case files at the given paths and pickles the resulting Case objects to the same directory. """ import pylon.io if isinstance(case_paths, basestring): case_paths = [case_paths] for case_path in case_paths: # Read the MATPOWER case file. case = pylon.io.MATPOWERReader(case_format).read(case_path) # Give the new file the same name, but with a different extension. dir_path = os.path.dirname(case_path) case_basename = os.path.basename(case_path) root, _ = os.path.splitext(case_basename) pickled_case_path = os.path.join(dir_path, root + '.pkl') # Pickle the resulting Pylon Case object. pylon.io.PickleWriter(case).write(pickled_case_path)
Parses the MATPOWER case files at the given paths and pickles the resulting Case objects to the same directory.
def draw_tree(node, child_iter=lambda n: n.children, text_str=lambda n: str(n)): """ Args: node: the root of the tree to be drawn, child_iter: function that when called with a node, returns an iterable over all its children text_str: turns a node into the text to be displayed in the tree. The default implementations of these two arguments retrieve the children by accessing node.children and simply use str(node) to convert a node to a string. The resulting tree is drawn into a buffer and returned as a string. Based on https://pypi.python.org/pypi/asciitree/ """ return _draw_tree(node, '', child_iter, text_str)
Args: node: the root of the tree to be drawn, child_iter: function that when called with a node, returns an iterable over all its children text_str: turns a node into the text to be displayed in the tree. The default implementations of these two arguments retrieve the children by accessing node.children and simply use str(node) to convert a node to a string. The resulting tree is drawn into a buffer and returned as a string. Based on https://pypi.python.org/pypi/asciitree/
def transfer_project(self, to_project_id, **kwargs): """Transfer a project to this group. Args: to_project_id (int): ID of the project to transfer **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTransferProjectError: If the project could not be transfered """ path = '/groups/%s/projects/%s' % (self.id, to_project_id) self.manager.gitlab.http_post(path, **kwargs)
Transfer a project to this group. Args: to_project_id (int): ID of the project to transfer **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabTransferProjectError: If the project could not be transfered
def project_ecef_vector_onto_sc(inst, x_label, y_label, z_label, new_x_label, new_y_label, new_z_label, meta=None): """Express input vector using s/c attitude directions x - ram pointing y - generally southward z - generally nadir Parameters ---------- x_label : string Label used to get ECEF-X component of vector to be projected y_label : string Label used to get ECEF-Y component of vector to be projected z_label : string Label used to get ECEF-Z component of vector to be projected new_x_label : string Label used to set X component of projected vector new_y_label : string Label used to set Y component of projected vector new_z_label : string Label used to set Z component of projected vector meta : array_like of dicts (None) Dicts contain metadata to be assigned. """ import pysatMagVect x, y, z = pysatMagVect.project_ecef_vector_onto_basis(inst[x_label], inst[y_label], inst[z_label], inst['sc_xhat_ecef_x'], inst['sc_xhat_ecef_y'], inst['sc_xhat_ecef_z'], inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'], inst['sc_zhat_ecef_x'], inst['sc_zhat_ecef_y'], inst['sc_zhat_ecef_z']) inst[new_x_label] = x inst[new_y_label] = y inst[new_z_label] = z if meta is not None: inst.meta[new_x_label] = meta[0] inst.meta[new_y_label] = meta[1] inst.meta[new_z_label] = meta[2] return
Express input vector using s/c attitude directions x - ram pointing y - generally southward z - generally nadir Parameters ---------- x_label : string Label used to get ECEF-X component of vector to be projected y_label : string Label used to get ECEF-Y component of vector to be projected z_label : string Label used to get ECEF-Z component of vector to be projected new_x_label : string Label used to set X component of projected vector new_y_label : string Label used to set Y component of projected vector new_z_label : string Label used to set Z component of projected vector meta : array_like of dicts (None) Dicts contain metadata to be assigned.
def namedb_get_num_blockstack_ops_at( db, block_id ): """ Get the number of name/namespace/token operations that occurred at a particular block. """ cur = db.cursor() # preorders at this block preorder_count_rows_query = "SELECT COUNT(*) FROM preorders WHERE block_number = ?;" preorder_count_rows_args = (block_id,) num_preorders = namedb_select_count_rows(cur, preorder_count_rows_query, preorder_count_rows_args) # committed operations at this block query = "SELECT COUNT(*) FROM history WHERE block_id = ?;" args = (block_id,) rows_result = namedb_query_execute(cur, query, args) count = 0 for r in rows_result: count = r['COUNT(*)'] break log.debug("{} preorders; {} history rows at {}".format(num_preorders, count, block_id)) return count + num_preorders
Get the number of name/namespace/token operations that occurred at a particular block.
def remove_tweet(self, id): """ Delete a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise """ try: self._client.destroy_status(id=id) return True except TweepError as e: if e.api_code in [TWITTER_PAGE_DOES_NOT_EXISTS_ERROR, TWITTER_DELETE_OTHER_USER_TWEET]: return False raise
Delete a tweet. :param id: ID of the tweet in question :return: True if success, False otherwise
def read_trigger_parameters(filename): """Read the trigger parameters into trigger_parameter classes. :type filename: str :param filename: Parameter file :returns: List of :class:`eqcorrscan.utils.trigger.TriggerParameters` :rtype: list .. rubric:: Example >>> from eqcorrscan.utils.trigger import read_trigger_parameters >>> parameters = read_trigger_parameters('parameters') # doctest: +SKIP """ parameters = [] f = open(filename, 'r') print('Reading parameters with the following header:') for line in f: if line[0] == '#': print(line.rstrip('\n').lstrip('\n')) else: parameter_dict = ast.literal_eval(line) # convert the dictionary to the class trig_par = TriggerParameters(parameter_dict) parameters.append(trig_par) f.close() return parameters
Read the trigger parameters into trigger_parameter classes. :type filename: str :param filename: Parameter file :returns: List of :class:`eqcorrscan.utils.trigger.TriggerParameters` :rtype: list .. rubric:: Example >>> from eqcorrscan.utils.trigger import read_trigger_parameters >>> parameters = read_trigger_parameters('parameters') # doctest: +SKIP
def _wait_for_machine_booted(name, suffictinet_texts=None): """ Internal method wait until machine is ready, in common case means there is running systemd-logind :param name: str with machine name :param suffictinet_texts: alternative text to check in output :return: True or exception """ # TODO: rewrite it using probes module in utils suffictinet_texts = suffictinet_texts or ["systemd-logind"] # optionally use: "Unit: machine" for foo in range(constants.DEFAULT_RETRYTIMEOUT): time.sleep(constants.DEFAULT_SLEEP) out = run_cmd( ["machinectl", "--no-pager", "status", name], ignore_status=True, return_output=True) for restr in suffictinet_texts: if restr in out: time.sleep(constants.DEFAULT_SLEEP) return True raise ConuException( "Unable to start machine %s within %d (machinectl status command dos not contain %s)" % (name, constants.DEFAULT_RETRYTIMEOUT, suffictinet_texts))
Internal method wait until machine is ready, in common case means there is running systemd-logind :param name: str with machine name :param suffictinet_texts: alternative text to check in output :return: True or exception
def create(name, *effects, **kwargs): """ Annotate a non-idempotent create action to the model being defined. Should really be:: create(name, *effects, value=None, params=None, label=None, desc=None) but it is not supported by python < 3. @param name: item name unique for the model being defined. @type name: str or unicode @param effects: @type effects: str or unicode @param value: input value information or None if not required. @type value: IValuInfo or None @param params: action paremeter or list of action parameters. @type params: IActionPram or list of IActionParam @param label: the action label or None. @type label: str or unicode or None @param desc: the action description or None if not documented. @type desc: str or unicode or None """ value_info = kwargs.pop("value", None) params = kwargs.pop("params", None) label = kwargs.pop("label", None) desc = kwargs.pop("desc", None) if kwargs: raise TypeError("create() got an unexpected keyword '%s'" % kwargs.keys()[0]) _annotate("create", name, value_info=value_info, params=params, effects=effects, label=label, desc=desc)
Annotate a non-idempotent create action to the model being defined. Should really be:: create(name, *effects, value=None, params=None, label=None, desc=None) but it is not supported by python < 3. @param name: item name unique for the model being defined. @type name: str or unicode @param effects: @type effects: str or unicode @param value: input value information or None if not required. @type value: IValuInfo or None @param params: action paremeter or list of action parameters. @type params: IActionPram or list of IActionParam @param label: the action label or None. @type label: str or unicode or None @param desc: the action description or None if not documented. @type desc: str or unicode or None
def get_empty_dtype_and_na(join_units): """ Return dtype and N/A values to use when concatenating specified units. Returned N/A value may be None which means there was no casting involved. Returns ------- dtype na """ if len(join_units) == 1: blk = join_units[0].block if blk is None: return np.float64, np.nan if is_uniform_reindex(join_units): # XXX: integrate property empty_dtype = join_units[0].block.dtype upcasted_na = join_units[0].block.fill_value return empty_dtype, upcasted_na has_none_blocks = False dtypes = [None] * len(join_units) for i, unit in enumerate(join_units): if unit.block is None: has_none_blocks = True else: dtypes[i] = unit.dtype upcast_classes = defaultdict(list) null_upcast_classes = defaultdict(list) for dtype, unit in zip(dtypes, join_units): if dtype is None: continue if is_categorical_dtype(dtype): upcast_cls = 'category' elif is_datetime64tz_dtype(dtype): upcast_cls = 'datetimetz' elif issubclass(dtype.type, np.bool_): upcast_cls = 'bool' elif issubclass(dtype.type, np.object_): upcast_cls = 'object' elif is_datetime64_dtype(dtype): upcast_cls = 'datetime' elif is_timedelta64_dtype(dtype): upcast_cls = 'timedelta' elif is_sparse(dtype): upcast_cls = dtype.subtype.name elif is_extension_array_dtype(dtype): upcast_cls = 'object' elif is_float_dtype(dtype) or is_numeric_dtype(dtype): upcast_cls = dtype.name else: upcast_cls = 'float' # Null blocks should not influence upcast class selection, unless there # are only null blocks, when same upcasting rules must be applied to # null upcast classes. if unit.is_na: null_upcast_classes[upcast_cls].append(dtype) else: upcast_classes[upcast_cls].append(dtype) if not upcast_classes: upcast_classes = null_upcast_classes # create the result if 'object' in upcast_classes: return np.dtype(np.object_), np.nan elif 'bool' in upcast_classes: if has_none_blocks: return np.dtype(np.object_), np.nan else: return np.dtype(np.bool_), None elif 'category' in upcast_classes: return np.dtype(np.object_), np.nan elif 'datetimetz' in upcast_classes: # GH-25014. We use NaT instead of iNaT, since this eventually # ends up in DatetimeArray.take, which does not allow iNaT. dtype = upcast_classes['datetimetz'] return dtype[0], tslibs.NaT elif 'datetime' in upcast_classes: return np.dtype('M8[ns]'), tslibs.iNaT elif 'timedelta' in upcast_classes: return np.dtype('m8[ns]'), tslibs.iNaT else: # pragma try: g = np.find_common_type(upcast_classes, []) except TypeError: # At least one is an ExtensionArray return np.dtype(np.object_), np.nan else: if is_float_dtype(g): return g, g.type(np.nan) elif is_numeric_dtype(g): if has_none_blocks: return np.float64, np.nan else: return g, None msg = "invalid dtype determination in get_concat_dtype" raise AssertionError(msg)
Return dtype and N/A values to use when concatenating specified units. Returned N/A value may be None which means there was no casting involved. Returns ------- dtype na
def time_from_match(match_object): """Create a time object from a regular expression match. The regular expression match is expected to be from RE_TIME or RE_DATETIME. @param match_object: The regular expression match. @type value: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{time} """ hour = int(match_object.group('hour')) minute = int(match_object.group('minute')) second = int(match_object.group('second')) subsecond = match_object.group('subsecond') microsecond = 0 if subsecond is not None: subsecond_denominator = 10.0 ** len(subsecond) subsecond = int(subsecond) microsecond = subsecond * (1000000 / subsecond_denominator) microsecond = int(round(microsecond)) return datetime.time(hour, minute, second, microsecond)
Create a time object from a regular expression match. The regular expression match is expected to be from RE_TIME or RE_DATETIME. @param match_object: The regular expression match. @type value: B{re}.I{MatchObject} @return: A date object. @rtype: B{datetime}.I{time}
def _normalize_name(name): """Converts a name to Http-Header-Case. >>> HTTPHeaders._normalize_name("coNtent-TYPE") 'Content-Type' """ try: return HTTPHeaders._normalized_headers[name] except KeyError: if HTTPHeaders._NORMALIZED_HEADER_RE.match(name): normalized = name else: normalized = "-".join([w.capitalize() for w in name.split("-")]) HTTPHeaders._normalized_headers[name] = normalized return normalized
Converts a name to Http-Header-Case. >>> HTTPHeaders._normalize_name("coNtent-TYPE") 'Content-Type'
def _general_multithread(func): """ return the general multithreading function using func """ def multithread(templates, stream, *args, **kwargs): with pool_boy(ThreadPool, len(stream), **kwargs) as pool: return _pool_normxcorr(templates, stream, pool=pool, func=func) return multithread
return the general multithreading function using func
def remove(self, username=None): """Remove User instance based on supplied user name.""" self._user_list = [user for user in self._user_list if user.name != username]
Remove User instance based on supplied user name.
def get_terrain_height(self, pos: Union[Point2, Point3, Unit]) -> int: """ Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self._game_info.terrain_height[pos]
Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate.
def batch_commit(self, message): """ Instead of committing a lot of small commits you can batch it together using this controller. Example: with git.batch_commit('BATCHED'): git.commit_file('my commit 1', 'path/to/file', 'content from file') git.commit_json_file('[1, 2, 3]', 'path/to/file2', 'json array') Withing the `with` block you can use group the method calls of `commit_file` and `commit_json_file`, and every other method calling this two methods. :type message: str :return: with controller to be used with Python's `with git.batch_commit():` """ class controlled_execution: def __init__(self, git, message): self.git = git self.message = message def __enter__(self): self.git.git_batch_commit = True if self.git.job_id: # make sure we're always on the tip tree self.git.read_tree(self.git.ref_head) def __exit__(self, type, value, traceback): self.git.git_batch_commit = False # if nothing committed, we return early if not self.git.git_batch_commit_messages: return commit_message = self.message if self.git.git_batch_commit_messages: commit_message = commit_message + "\n\n" + "\n".join(self.git.git_batch_commit_messages) self.git.git_batch_commit_messages = [] self.git.commit_index(commit_message) return controlled_execution(self, message)
Instead of committing a lot of small commits you can batch it together using this controller. Example: with git.batch_commit('BATCHED'): git.commit_file('my commit 1', 'path/to/file', 'content from file') git.commit_json_file('[1, 2, 3]', 'path/to/file2', 'json array') Withing the `with` block you can use group the method calls of `commit_file` and `commit_json_file`, and every other method calling this two methods. :type message: str :return: with controller to be used with Python's `with git.batch_commit():`
def create_context(self, message_queue, task_id): """ Create values to be used by create_small_file function. :param message_queue: Queue: queue background process can send messages to us on :param task_id: int: id of this command's task so message will be routed correctly """ parent_data = ParentData(self.parent.kind, self.parent.remote_id) path_data = self.local_file.get_path_data() params = parent_data, path_data, self.local_file.remote_id return UploadContext(self.settings, params, message_queue, task_id)
Create values to be used by create_small_file function. :param message_queue: Queue: queue background process can send messages to us on :param task_id: int: id of this command's task so message will be routed correctly
def get_defaults(self): """Use argparse to determine and return dict of defaults.""" # dont need 'required' to determine the default options = [copy.copy(opt) for opt in self._options] for opt in options: try: del opt.kwargs['required'] except KeyError: pass parser = self.build_parser(options, permissive=True, add_help=False) parsed, _ = parser.parse_known_args([]) return vars(parsed)
Use argparse to determine and return dict of defaults.
def log_print_response(logger, response): """ Log an HTTP response data :param logger: logger to use :param response: HTTP response ('Requests' lib) :return: None """ log_msg = '<<<<<<<<<<<<<<<<<<<<<< Response <<<<<<<<<<<<<<<<<<\n' log_msg += '\t< Response code: {}\n'.format(str(response.status_code)) log_msg += '\t< Headers: {}\n'.format(str(dict(response.headers))) try: log_msg += '\t< Payload received:\n {}'.format(_get_pretty_body(dict(response.headers), response.content)) except ValueError: log_msg += '\t< Payload received:\n {}'.format(_get_pretty_body(dict(response.headers), response.content.text)) logger.debug(log_msg)
Log an HTTP response data :param logger: logger to use :param response: HTTP response ('Requests' lib) :return: None
def get_data_generator_by_id(hardware_source_id, sync=True): """ Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """ hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) def get_last_data(): return hardware_source.get_next_xdatas_to_finish()[0].data.copy() yield get_last_data
Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call.
def index_documents(self, fresh_docs, model): """ Update fresh index with new documents (potentially replacing old ones with the same id). `fresh_docs` is a dictionary-like object (=dict, sqlitedict, shelve etc) that maps document_id->document. """ docids = fresh_docs.keys() vectors = (model.docs2vecs(fresh_docs[docid] for docid in docids)) logger.info("adding %i documents to %s" % (len(docids), self)) self.qindex.add_documents(vectors) self.qindex.save() self.update_ids(docids)
Update fresh index with new documents (potentially replacing old ones with the same id). `fresh_docs` is a dictionary-like object (=dict, sqlitedict, shelve etc) that maps document_id->document.
def map_across_full_axis(self, axis, map_func): """Applies `map_func` to every partition. Note: This method should be used in the case that `map_func` relies on some global information about the axis. Args: axis: The axis to perform the map across (0 - index, 1 - columns). map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this. """ # Since we are already splitting the DataFrame back up after an # operation, we will just use this time to compute the number of # partitions as best we can right now. num_splits = self._compute_num_partitions() preprocessed_map_func = self.preprocess_func(map_func) partitions = self.column_partitions if not axis else self.row_partitions # For mapping across the entire axis, we don't maintain partitioning because we # may want to line to partitioning up with another BlockPartitions object. Since # we don't need to maintain the partitioning, this gives us the opportunity to # load-balance the data as well. result_blocks = np.array( [ part.apply(preprocessed_map_func, num_splits=num_splits) for part in partitions ] ) # If we are mapping over columns, they are returned to use the same as # rows, so we need to transpose the returned 2D numpy array to return # the structure to the correct order. return ( self.__constructor__(result_blocks.T) if not axis else self.__constructor__(result_blocks) )
Applies `map_func` to every partition. Note: This method should be used in the case that `map_func` relies on some global information about the axis. Args: axis: The axis to perform the map across (0 - index, 1 - columns). map_func: The function to apply. Returns: A new BaseFrameManager object, the type of object that called this.
def read_i2c_block_data(self, address, register, length): """ I2C block transactions do not limit the number of bytes transferred but the SMBus layer places a limit of 32 bytes. I2C Block Read: i2c_smbus_read_i2c_block_data() ================================================ This command reads a block of bytes from a device, from a designated register that is specified through the Comm byte. S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK """ return self.smbus.read_i2c_block_data(address, register, length)
I2C block transactions do not limit the number of bytes transferred but the SMBus layer places a limit of 32 bytes. I2C Block Read: i2c_smbus_read_i2c_block_data() ================================================ This command reads a block of bytes from a device, from a designated register that is specified through the Comm byte. S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
def get_logger(name): """Return logger with null handler added if needed.""" if not hasattr(logging.Logger, 'trace'): logging.addLevelName(TRACE_LEVEL, 'TRACE') def trace(self, message, *args, **kwargs): if self.isEnabledFor(TRACE_LEVEL): # Yes, logger takes its '*args' as 'args'. self._log(TRACE_LEVEL, message, args, **kwargs) logging.Logger.trace = trace log = logging.getLogger(name) if not log.handlers: log.addHandler(logging.NullHandler()) return log
Return logger with null handler added if needed.
def update_objective(self, objective_form): """Updates an existing objective. arg: objective_form (osid.learning.ObjectiveForm): the form containing the elements to be updated raise: IllegalState - ``objective_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``objective_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``objective_form`` did not originate from ``get_objective_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.update_resource_template collection = JSONClientValidated('learning', collection='Objective', runtime=self._runtime) if not isinstance(objective_form, ABCObjectiveForm): raise errors.InvalidArgument('argument type is not an ObjectiveForm') if not objective_form.is_for_update(): raise errors.InvalidArgument('the ObjectiveForm is for update only, not create') try: if self._forms[objective_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('objective_form already used in an update transaction') except KeyError: raise errors.Unsupported('objective_form did not originate from this session') if not objective_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') collection.save(objective_form._my_map) self._forms[objective_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: return objects.Objective( osid_object_map=objective_form._my_map, runtime=self._runtime, proxy=self._proxy)
Updates an existing objective. arg: objective_form (osid.learning.ObjectiveForm): the form containing the elements to be updated raise: IllegalState - ``objective_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``objective_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``objective_form`` did not originate from ``get_objective_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
def setFont(self, font): """ Assigns the font to this widget and all of its children. :param font | <QtGui.QFont> """ super(XTimeEdit, self).setFont(font) # update the fonts for the time combos self._hourCombo.setFont(font) self._minuteCombo.setFont(font) self._secondCombo.setFont(font) self._timeOfDayCombo.setFont(font)
Assigns the font to this widget and all of its children. :param font | <QtGui.QFont>
def predict_withGradients(self, X): """ Returns the mean, standard deviation, mean gradient and standard deviation gradient at X for all the MCMC samples. """ if X.ndim==1: X = X[None,:] ps = self.model.param_array.copy() means = [] stds = [] dmdxs = [] dsdxs = [] for s in self.hmc_samples: if self.model._fixes_ is None: self.model[:] = s else: self.model[self.model._fixes_] = s self.model._trigger_params_changed() m, v = self.model.predict(X) std = np.sqrt(np.clip(v, 1e-10, np.inf)) dmdx, dvdx = self.model.predictive_gradients(X) dmdx = dmdx[:,:,0] dsdx = dvdx / (2*std) means.append(m) stds.append(std) dmdxs.append(dmdx) dsdxs.append(dsdx) self.model.param_array[:] = ps self.model._trigger_params_changed() return means, stds, dmdxs, dsdxs
Returns the mean, standard deviation, mean gradient and standard deviation gradient at X for all the MCMC samples.
def isdir(self, relpath, rsc=None): """ Returns whether or not the resource is a directory. :return <bool> """ filepath = self.find(relpath, rsc) if filepath.startswith(':'): resource = QtCore.QResource(filepath) return not resource.isFile() else: return os.path.isdir(filepath)
Returns whether or not the resource is a directory. :return <bool>
def queryset(self, request, queryset): """Filter queryset using params from the form.""" if self.form.is_valid(): # get no null params filter_params = dict( filter(lambda x: bool(x[1]), self.form.cleaned_data.items()) ) return queryset.filter(**filter_params) return queryset
Filter queryset using params from the form.
def _on_drawing(self, object, name, old, new): """ Handles the containers of drawing components being set. """ attrs = [ "drawing", "arrowhead_drawing" ] others = [getattr(self, a) for a in attrs \ if (a != name) and (getattr(self, a) is not None)] x, y = self.component.position print "POS:", x, y, self.component.position abs_x = [d.x + x for d in others] abs_y = [d.y + y for d in others] print "ABS:", abs_x, abs_y # Assume that he new drawing is positioned relative to graph origin. x1 = min( abs_x + [new.x] ) y1 = min( abs_y + [new.y] ) print "DRAW:", new.position new.position = [ new.x - x1, new.y - y1 ] print "DRAW:", new.position # for i, b in enumerate( others ): # self.drawing.position = [100, 100] # self.drawing.request_redraw() # print "OTHER:", b.position, abs_x[i] - x1 # b.position = [ abs_x[i] - x1, abs_y[i] - y1 ] # b.x = 50 # b.y = 50 # print "OTHER:", b.position, abs_x[i], x1 # for attr in attrs: # if attr != name: # if getattr(self, attr) is not None: # drawing = getattr(self, attr) # drawing.position = [50, 50] if old is not None: self.component.remove( old ) if new is not None: self.component.add( new ) print "POS NEW:", self.component.position self.component.position = [ x1, y1 ] print "POS NEW:", self.component.position self.component.request_redraw() print "POS NEW:", self.component.position
Handles the containers of drawing components being set.
def to_array(self): """ Serializes this Animation to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Animation, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str array['width'] = int(self.width) # type int array['height'] = int(self.height) # type int array['duration'] = int(self.duration) # type int if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.file_name is not None: array['file_name'] = u(self.file_name) # py2: type unicode, py3: type str if self.mime_type is not None: array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array
Serializes this Animation to a dictionary. :return: dictionary representation of this object. :rtype: dict
def proxy(self): """Retrieve the upstream content and build an HttpResponse.""" headers = self.request.headers.filter(self.ignored_request_headers) qs = self.request.query_string if self.pass_query_string else '' # Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005 if (self.request.META.get('CONTENT_LENGTH', None) == '' and get_django_version() == '1.10'): del self.request.META['CONTENT_LENGTH'] request_kwargs = self.middleware.process_request( self, self.request, method=self.request.method, url=self.proxy_url, headers=headers, data=self.request.body, params=qs, allow_redirects=False, verify=self.verify_ssl, cert=self.cert, timeout=self.timeout) result = request(**request_kwargs) response = HttpResponse(result.content, status=result.status_code) # Attach forwardable headers to response forwardable_headers = HeaderDict(result.headers).filter( self.ignored_upstream_headers) for header, value in iteritems(forwardable_headers): response[header] = value return self.middleware.process_response( self, self.request, result, response)
Retrieve the upstream content and build an HttpResponse.
def _process_mappings(self, limit=None): """ This function imports linkage mappings of various entities to genetic locations in cM or cR. Entities include sequence variants, BAC ends, cDNA, ESTs, genes, PAC ends, RAPDs, SNPs, SSLPs, and STSs. Status: NEEDS REVIEW :param limit: :return: """ LOG.info("Processing chromosome mappings") if self.test_mode: graph = self.testgraph else: graph = self.graph line_counter = 0 model = Model(graph) geno = Genotype(graph) raw = '/'.join((self.rawdir, self.files['mappings']['file'])) taxon_num = '7955' taxon_id = 'NCBITaxon:' + taxon_num taxon_label = 'Danio rerio' # genome_id = geno.makeGenomeID(taxon_id) geno.addGenome(taxon_id, taxon_label) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (zfin_num, symbol, so_id, panel_symbol, chromosome, location, metric # , empty ) = row if self.test_mode and zfin_num \ not in self.test_ids['gene'] + self.test_ids['allele']: continue zfin_id = 'ZFIN:' + zfin_num.strip() if re.match(r'ZDB-GENE.*', zfin_num): # assume type and label get added elsewhere model.addClassToGraph(zfin_id, None) geno.addTaxon(taxon_id, zfin_id) elif re.match(r'ZDB-ALT.*', zfin_num): # assume type and label get added elsewhere model.addIndividualToGraph(zfin_id, None) geno.addTaxon(taxon_id, zfin_id) else: continue # skip any of the others # ZFIN don't catalog non-fish things, thankfully model.makeLeader(zfin_id) # make the chromosome class chr_id = makeChromID(chromosome, taxon_id, 'CHR') # chr_label = makeChromLabel(chromosome, taxon_label) geno.addChromosomeClass(chromosome, taxon_id, taxon_label) pinfo = self._get_mapping_panel_info(panel_symbol) panel_label = ' '.join((panel_symbol, pinfo['type'], 'map')) if pinfo is not None: # add the panel as a genome build panel_id = 'ZFIN:' + pinfo['id'] geno.addReferenceGenome(panel_id, panel_label, taxon_id) model.addSynonym(panel_id, panel_symbol) model.addDescription(panel_id, pinfo['name']) # add the mapping-panel chromosome chr_inst_id = makeChromID(chromosome, panel_id, 'MONARCH') geno.addChromosomeInstance( chromosome, panel_id, panel_label, chr_id) # add the feature to the mapping-panel chromosome feat = Feature(graph, zfin_id, None, None) feat.addSubsequenceOfFeature(chr_inst_id) # TODO add the coordinates see: # https://github.com/JervenBolleman/FALDO/issues/24 else: LOG.error( "There's a panel (%s) we don't have info for", panel_symbol) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with chromosome mappings") return
This function imports linkage mappings of various entities to genetic locations in cM or cR. Entities include sequence variants, BAC ends, cDNA, ESTs, genes, PAC ends, RAPDs, SNPs, SSLPs, and STSs. Status: NEEDS REVIEW :param limit: :return:
def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mu = 0.0 for x, p in self.d.iteritems(): mu += p * x return mu
Computes the mean of a PMF. Returns: float mean
def accuracy(self, outputs): '''Build a Theano expression for computing the accuracy of graph output. Parameters ---------- outputs : dict of Theano expressions A dictionary mapping network output names to Theano expressions representing the outputs of a computation graph. Returns ------- acc : Theano expression A Theano expression representing the accuracy of the output compared to the target data. ''' output = outputs[self.output_name] predict = TT.argmax(output, axis=-1) correct = TT.eq(predict, self._target) acc = correct.mean() if self._weights is not None: acc = (self._weights * correct).sum() / self._weights.sum() return acc
Build a Theano expression for computing the accuracy of graph output. Parameters ---------- outputs : dict of Theano expressions A dictionary mapping network output names to Theano expressions representing the outputs of a computation graph. Returns ------- acc : Theano expression A Theano expression representing the accuracy of the output compared to the target data.
def acquire(self, timeout=None): """Acquires the lock if in the unlocked state otherwise switch back to the parent coroutine. """ green = getcurrent() parent = green.parent if parent is None: raise MustBeInChildGreenlet('GreenLock.acquire in main greenlet') if self._local.locked: future = create_future(self._loop) self._queue.append(future) parent.switch(future) self._local.locked = green return self.locked()
Acquires the lock if in the unlocked state otherwise switch back to the parent coroutine.
def similar(self): """ iterator over similar artists as :class:`Artist` objects """ if self._similar is None: self._similar = [ Artist(artist['ArtistID'], artist['Name'], self._connection) for artist in self._connection.request( 'artistGetSimilarArtists', {'artistID': self.id}, self._connection.header('artistGetSimilarArtists'))[1]['SimilarArtists']] return iter(self._similar)
iterator over similar artists as :class:`Artist` objects
def read(self, sensors): """Read a set of keys.""" payload = {'destDev': [], 'keys': list(set([s.key for s in sensors]))} if self.sma_sid is None: yield from self.new_session() if self.sma_sid is None: return False body = yield from self._fetch_json(URL_VALUES, payload=payload) # On the first 401 error we close the session which will re-login if body.get('err') == 401: _LOGGER.warning("401 error detected, closing session to force " "another login attempt") self.close_session() return False _LOGGER.debug(json.dumps(body)) for sen in sensors: if sen.extract_value(body): _LOGGER.debug("%s\t= %s %s", sen.name, sen.value, sen.unit) return True
Read a set of keys.
def display(self, image): """ Takes a :py:mod:`PIL.Image` and makes a copy of it for later use/inspection. :param image: Image to display. :type image: PIL.Image.Image """ assert(image.size == self.size) self.image = self.preprocess(image).copy()
Takes a :py:mod:`PIL.Image` and makes a copy of it for later use/inspection. :param image: Image to display. :type image: PIL.Image.Image
def _analyze(self): '''Run-once function to generate analysis over all series, considering both full and partial data. Initializes the self.analysis dict which maps: (non-reference) column/series -> 'full' and/or 'partial' -> stats dict returned by get_xy_dataset_statistics ''' if not self.analysis: for dseries in self.data_series: # Count number of non-NaN rows dseries_count = self.df[dseries].count() assert(len(self.df_pruned) <= dseries_count <= len(self.df) or dseries_count) self.analysis[dseries] = dict( partial = None, full = None, ) # Compute the statistics for the common records stats = get_xy_dataset_statistics_pandas(self.df_pruned, self.reference_series, dseries, fcorrect_x_cutoff = 1.0, fcorrect_y_cutoff = 1.0, bootstrap_data = False, x_fuzzy_range = 0.1, y_scalar = 1.0, ignore_null_values = True) if (len(self.df_pruned) == len(self.df)): # There are no pruned records so these are actually the full stats self.analysis[dseries]['full'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True)) else: # Store the results for the partial dataset self.analysis[dseries]['partial'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True)) if dseries_count > len(self.df_pruned): # This dataset has records which are not in the pruned dataset stats = get_xy_dataset_statistics_pandas(self.df, self.reference_series, dseries, fcorrect_x_cutoff = 1.0, fcorrect_y_cutoff = 1.0, bootstrap_data = False, x_fuzzy_range = 0.1, y_scalar = 1.0, ignore_null_values = True) self.analysis[dseries]['full'] = dict(data = stats, description = format_stats(stats, floating_point_format = '%0.3f', sci_notation_format = '%.2E', return_string = True)) return self.analysis
Run-once function to generate analysis over all series, considering both full and partial data. Initializes the self.analysis dict which maps: (non-reference) column/series -> 'full' and/or 'partial' -> stats dict returned by get_xy_dataset_statistics
def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - The xml.dom.minidom DOM implementation "pulldom" - The xml.dom.pulldom event stream "etree" - A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). "lxml" - Optimized walker for lxml.etree "genshi" - a Genshi stream implementation - (Currently applies to the "etree" tree type only). A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree.""" treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType in ("dom", "pulldom"): name = "%s.%s" % (__name__, treeType) __import__(name) mod = sys.modules[name] treeWalkerCache[treeType] = mod.TreeWalker elif treeType == "genshi": from . import genshistream treeWalkerCache[treeType] = genshistream.TreeWalker elif treeType == "lxml": from . import lxmletree treeWalkerCache[treeType] = lxmletree.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # XXX: NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType)
Get a TreeWalker class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - The xml.dom.minidom DOM implementation "pulldom" - The xml.dom.pulldom event stream "etree" - A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). "lxml" - Optimized walker for lxml.etree "genshi" - a Genshi stream implementation - (Currently applies to the "etree" tree type only). A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree.
def notifyAppend(self, queue, force): ''' Internal notify for sub-queues :returns: If the append is blocked by parent, an EventMatcher is returned, None else. ''' if not force and not self.canAppend(): self.isWaited = True return self._matcher if self.parent is not None: m = self.parent.notifyAppend(self, force) if m is not None: return m self.totalSize = self.totalSize + 1 return None
Internal notify for sub-queues :returns: If the append is blocked by parent, an EventMatcher is returned, None else.
def build_penalties(self): """ builds the GAM block-diagonal penalty matrix in quadratic form out of penalty matrices specified for each feature. each feature penalty matrix is multiplied by a lambda for that feature. so for m features: P = block_diag[lam0 * P0, lam1 * P1, lam2 * P2, ... , lamm * Pm] Parameters ---------- None Returns ------- P : sparse CSC matrix containing the model penalties in quadratic form """ P = [] for term in self._terms: P.append(term.build_penalties()) return sp.sparse.block_diag(P)
builds the GAM block-diagonal penalty matrix in quadratic form out of penalty matrices specified for each feature. each feature penalty matrix is multiplied by a lambda for that feature. so for m features: P = block_diag[lam0 * P0, lam1 * P1, lam2 * P2, ... , lamm * Pm] Parameters ---------- None Returns ------- P : sparse CSC matrix containing the model penalties in quadratic form
def _patch_distribution_metadata_write_pkg_info(): """ Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local encoding to save the pkg_info. Monkey-patch its write_pkg_info method to correct this undesirable behavior. """ environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2) if not environment_local: return # from Python 3.4 def write_pkg_info(self, base_dir): """Write the PKG-INFO file into the release tree. """ with open(os.path.join(base_dir, 'PKG-INFO'), 'w', encoding='UTF-8') as pkg_info: self.write_pkg_file(pkg_info) distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local encoding to save the pkg_info. Monkey-patch its write_pkg_info method to correct this undesirable behavior.
def render_compressed(self, package, package_name, package_type): """Render HTML for the package. If ``PIPELINE_ENABLED`` is ``True``, this will render the package's output file (using :py:meth:`render_compressed_output`). Otherwise, this will render the package's source files (using :py:meth:`render_compressed_sources`). Subclasses can override this method to provide custom behavior for determining what to render. """ if settings.PIPELINE_ENABLED: return self.render_compressed_output(package, package_name, package_type) else: return self.render_compressed_sources(package, package_name, package_type)
Render HTML for the package. If ``PIPELINE_ENABLED`` is ``True``, this will render the package's output file (using :py:meth:`render_compressed_output`). Otherwise, this will render the package's source files (using :py:meth:`render_compressed_sources`). Subclasses can override this method to provide custom behavior for determining what to render.
def parse_iso_utc(s): """ Parses an ISO time with a hard-coded Z for zulu-time (UTC) at the end. Other timezones are not supported. :param str s: the ISO-formatted time :rtype: datetime.datetime :return: an timezone-naive datetime object >>> parse_iso_utc('2016-04-27T00:28:04.000Z') datetime.datetime(2016, 4, 27, 0, 28, 4) >>> parse_iso_utc('2016-04-27T00:28:04Z') datetime.datetime(2016, 4, 27, 0, 28, 4) >>> parse_iso_utc('2016-04-27T00:28:04X') Traceback (most recent call last): ... ValueError: Not a valid ISO datetime in UTC: 2016-04-27T00:28:04X """ m = rfc3339_datetime_re().match(s) if not m: raise ValueError( 'Not a valid ISO datetime in UTC: ' + s ) else: fmt = '%Y-%m-%dT%H:%M:%S' + ('.%f' if m.group(7) else '') + 'Z' return datetime.datetime.strptime(s, fmt)
Parses an ISO time with a hard-coded Z for zulu-time (UTC) at the end. Other timezones are not supported. :param str s: the ISO-formatted time :rtype: datetime.datetime :return: an timezone-naive datetime object >>> parse_iso_utc('2016-04-27T00:28:04.000Z') datetime.datetime(2016, 4, 27, 0, 28, 4) >>> parse_iso_utc('2016-04-27T00:28:04Z') datetime.datetime(2016, 4, 27, 0, 28, 4) >>> parse_iso_utc('2016-04-27T00:28:04X') Traceback (most recent call last): ... ValueError: Not a valid ISO datetime in UTC: 2016-04-27T00:28:04X
def ensure_exists(self): """ Make sure the local repository exists. :raises: :exc:`~exceptions.ValueError` when the local repository doesn't exist yet. """ if not self.exists: msg = "The local %s repository %s doesn't exist!" raise ValueError(msg % (self.friendly_name, format_path(self.local)))
Make sure the local repository exists. :raises: :exc:`~exceptions.ValueError` when the local repository doesn't exist yet.
def get_file_relative_path_by_id(self, id): """ Given an id, get the corresponding file info relative path joined with file name. Parameters: #. id (string): The file unique id string. :Returns: #. relativePath (string): The file relative path joined with file name. If None, it means file was not found. """ for path, info in self.walk_files_info(): if info['id']==id: return path # none was found return None
Given an id, get the corresponding file info relative path joined with file name. Parameters: #. id (string): The file unique id string. :Returns: #. relativePath (string): The file relative path joined with file name. If None, it means file was not found.
def gen_toyn(f, nsample, ntoy, bound, accuracy=10000, quiet=True, **kwd): """ just alias of gentoy for nample and then reshape to ntoy,nsample) :param f: :param nsample: :param bound: :param accuracy: :param quiet: :param kwd: :return: """ return gen_toy(f, nsample * ntoy, bound, accuracy, quiet, **kwd).reshape((ntoy, nsample))
just alias of gentoy for nample and then reshape to ntoy,nsample) :param f: :param nsample: :param bound: :param accuracy: :param quiet: :param kwd: :return:
def sign_out(entry, time_out=None, forgot=False): """Sign out of an existing entry in the timesheet. If the user forgot to sign out, flag the entry. :param entry: `models.Entry` object. The entry to sign out. :param time_out: (optional) `datetime.time` object. Specify the sign out time. :param forgot: (optional) If true, user forgot to sign out. Entry will be flagged as forgotten. :return: The signed out entry. """ # noqa if time_out is None: time_out = datetime.today().time() if forgot: entry.forgot_sign_out = True logger.info( '{} forgot to sign out on {}.'.format(entry.user_id, entry.date) ) else: entry.time_out = time_out logger.info('{} ({}) signed out.'.format(entry.user_id, entry.user_type)) return entry
Sign out of an existing entry in the timesheet. If the user forgot to sign out, flag the entry. :param entry: `models.Entry` object. The entry to sign out. :param time_out: (optional) `datetime.time` object. Specify the sign out time. :param forgot: (optional) If true, user forgot to sign out. Entry will be flagged as forgotten. :return: The signed out entry.
def get_rollup_caps(self, id=None, params=None): """ `<>`_ :arg id: The ID of the index to check rollup capabilities on, or left blank for all jobs """ return self.transport.perform_request( "GET", _make_path("_rollup", "data", id), params=params )
`<>`_ :arg id: The ID of the index to check rollup capabilities on, or left blank for all jobs
def A_array(l1,l2,PA,PB,CP,g): """ THO eq. 2.18 and 3.1 >>> A_array(0,0,0,0,0,1) [1.0] >>> A_array(0,1,1,1,1,1) [1.0, -1.0] >>> A_array(1,1,1,1,1,1) [1.5, -2.5, 1.0] """ Imax = l1+l2+1 A = [0]*Imax for i in range(Imax): for r in range(int(floor(i/2)+1)): for u in range(int(floor((i-2*r)/2)+1)): I = i-2*r-u A[I] = A[I] + A_term(i,r,u,l1,l2,PA,PB,CP,g) return A
THO eq. 2.18 and 3.1 >>> A_array(0,0,0,0,0,1) [1.0] >>> A_array(0,1,1,1,1,1) [1.0, -1.0] >>> A_array(1,1,1,1,1,1) [1.5, -2.5, 1.0]
def get_file_url(self, fid, public=None): """ Get url for the file :param string fid: File ID :param boolean public: public or internal url :rtype: string """ try: volume_id, rest = fid.strip().split(",") except ValueError: raise BadFidFormat( "fid must be in format: <volume_id>,<file_name_hash>") file_location = self.get_file_location(volume_id) if public is None: public = self.use_public_url volume_url = file_location.public_url if public else file_location.url url = "http://{volume_url}/{fid}".format( volume_url=volume_url, fid=fid) return url
Get url for the file :param string fid: File ID :param boolean public: public or internal url :rtype: string
def _calibrate_vis(radiance, k): """Convert VIS radiance to reflectance Note: Angle of incident radiation and annual variation of the earth-sun distance is not taken into account. A value of 100% corresponds to the radiance of a perfectly reflecting diffuse surface illuminated at normal incidence when the sun is at its annual-average distance from the Earth. TODO: Take angle of incident radiation (cos sza) and annual variation of the earth-sun distance into account. Reference: [VIS] Args: radiance: Radiance [mW m-2 cm-1 sr-1] k: pi / H, where H is the solar spectral irradiance at annual-average sun-earth distance, averaged over the spectral response function of the detector). Units of k: [m2 um sr W-1] Returns: Reflectance [%] """ logger.debug('Calibrating to reflectance') refl = 100 * k * radiance return refl.clip(min=0)
Convert VIS radiance to reflectance Note: Angle of incident radiation and annual variation of the earth-sun distance is not taken into account. A value of 100% corresponds to the radiance of a perfectly reflecting diffuse surface illuminated at normal incidence when the sun is at its annual-average distance from the Earth. TODO: Take angle of incident radiation (cos sza) and annual variation of the earth-sun distance into account. Reference: [VIS] Args: radiance: Radiance [mW m-2 cm-1 sr-1] k: pi / H, where H is the solar spectral irradiance at annual-average sun-earth distance, averaged over the spectral response function of the detector). Units of k: [m2 um sr W-1] Returns: Reflectance [%]