code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): if self.restrictions.is_restricted(asset, algo_datetime): self.handle_violation(asset, amount, algo_datetime)
Fail if the asset is in the restricted_list.
def check_path(file_path): directory = os.path.dirname(file_path) if directory != '': if not os.path.exists(directory): os.makedirs(directory, 0o775)
Checks if the directories for this path exist, and creates them in case.
def update(self): stats = self.get_init_value() if not LINUX: return self.stats if self.input_method == 'local': stats = self.irq.get() elif self.input_method == 'snmp': pass stats = sorted(stats, key=operator.itemgetter('irq_rate'), reverse=True)[:5] self.stats = stats return self.stats
Update the IRQ stats.
def show(self, wid=None, text=None, title=None, url=None, verbose=False): PARAMS={} for p,v in zip(["id","text","title","url"],[wid,text,title,url]): if v: PARAMS[p]=v response=api(url=self.__url+"/show?",PARAMS=PARAMS, method="GET", verbose=verbose) return response
Launch an HTML browser in the Results Panel. :param wid: Window ID :param text: HTML text :param title: Window Title :param url: URL :param verbose: print more
def expected_values(early_mean=early_mean, late_mean=late_mean, switchpoint=switchpoint): n = len(disasters_array) return concatenate( (ones(switchpoint) * early_mean, ones(n - switchpoint) * late_mean))
Discrepancy measure for GOF using the Freeman-Tukey statistic
def collapse_degenerate_markers(linkage_records): def degeneracy(linkage_record): linkage_group, genetic_distance, scaffold = ( linkage_record[1], linkage_record[2], linkage_record[3], ) return (linkage_group, genetic_distance, scaffold) degenerate_records = [] for _, degenerate_group in itertools.groupby( linkage_records, key=degeneracy ): group_list = list(degenerate_group) start_record, end_record = group_list[0], group_list[-1] assert (start_record[1], start_record[2], start_record[3]) == ( end_record[1], end_record[2], end_record[3], ) start_position = start_record[-1] end_position = end_record[-1] scaffold = start_record[3] linkage_group = start_record[1] record = [linkage_group, start_position, end_position, scaffold] degenerate_records.append(record) return degenerate_records
Group all markers with no genetic distance as distinct features to generate a BED file with. Simple example with sixteen degenerate markers: >>> marker_features = [ ... ['36915_sctg_207_31842', 1, 0, 207, 31842], ... ['36941_sctg_207_61615', 1, 0, 207, 61615], ... ['36956_sctg_207_77757', 1, 0, 207, 77757], ... ['36957_sctg_207_78332', 1, 0, 207, 78332], ... ['36972_sctg_207_94039', 1, 0, 207, 94039], ... ['36788_sctg_207_116303', 1, 0.652, 207, 116303], ... ['36812_sctg_207_158925', 1, 1.25, 207, 158925], ... ['36819_sctg_207_165424', 1, 1.25, 207, 165424], ... ['36828_sctg_207_190813', 1, 1.25, 207, 190813], ... ['36830_sctg_207_191645', 1, 1.25, 207, 191645], ... ['36834_sctg_207_195961', 1, 1.25, 207, 195961], ... ['36855_sctg_207_233632', 1, 1.25, 207, 233632], ... ['36881_sctg_207_258658', 1, 1.25, 207, 258658], ... ['82072_sctg_486_41893', 1, 3.756, 486, 41893], ... ['85634_sctg_516_36614', 1, 3.756, 516, 36614], ... ['85638_sctg_516_50582', 1, 3.756, 516, 50582]] >>> len(marker_features) 16 >>> collapsed_features = collapse_degenerate_markers(marker_features) >>> len(collapsed_features) 5 The degenerate features (identical linkage group, genetic distance and original scaffold) are collapsed into a region: >>> collapsed_features[0] [1, 31842, 94039, 207] The format is [linkage group, start, end, original scaffold]. If a singleton (non-degenerate) feature is found, the region is simply a single point in the genome: >>> collapsed_features[1] [1, 116303, 116303, 207] so 'start' and 'end' are identical. Two markers are not considered degenerate if they belong to different original scaffolds, even if they are in terms of genetic linkage: >>> collapsed_features[2] [1, 158925, 258658, 207] >>> collapsed_features[3:] [[1, 41893, 41893, 486], [1, 36614, 50582, 516]]
def clear_objects(self, obj): for obj_name, obj_mjcf in self.mujoco_objects.items(): if obj_name == obj: continue else: sim_state = self.sim.get_state() sim_state.qpos[self.sim.model.get_joint_qpos_addr(obj_name)[0]] = 10 self.sim.set_state(sim_state) self.sim.forward()
Clears objects with name @obj out of the task space. This is useful for supporting task modes with single types of objects, as in @self.single_object_mode without changing the model definition.
def register_algorithm(self, alg_id, alg_obj): if alg_id in self._algorithms: raise ValueError('Algorithm already has a handler.') if not isinstance(alg_obj, Algorithm): raise TypeError('Object is not of type `Algorithm`') self._algorithms[alg_id] = alg_obj self._valid_algs.add(alg_id)
Registers a new Algorithm for use when creating and verifying tokens.
def write_classifier(self, clf): with open(os.path.join(self.repopath, 'classifier.pkl'), 'w') as fp: pickle.dump(clf, fp)
Writes classifier object to pickle file
def setmode(mode): if hasattr(mode, '__getitem__'): set_custom_pin_mappings(mode) mode = CUSTOM assert mode in [BCM, BOARD, SUNXI, CUSTOM] global _mode _mode = mode
You must call this method prior to using all other calls. :param mode: the mode, one of :py:attr:`GPIO.BOARD`, :py:attr:`GPIO.BCM`, :py:attr:`GPIO.SUNXI`, or a `dict` or `object` representing a custom pin mapping.
def _generate_sequences_for_ngram(self, t1, t2, ngram, covered_spans): self._logger.debug('Generating sequences for n-gram "{}"'.format( ngram)) pattern = re.compile(re.escape(ngram)) context_length = len(ngram) t1_spans = [match.span() for match in pattern.finditer(t1)] t2_spans = [match.span() for match in pattern.finditer(t2)] sequences = [] self._logger.debug(t1) for t1_span in t1_spans: for t2_span in t2_spans: if self._is_inside(t1_span, t2_span, covered_spans): self._logger.debug( 'Skipping match due to existing coverage') continue sequence = self._generate_sequence( t1, t1_span, t2, t2_span, context_length, covered_spans) if sequence: sequences.append(sequence) return sequences
Generates aligned sequences for the texts `t1` and `t2`, based around `ngram`. Does not generate sequences that occur within `covered_spans`. :param t1: text content of first witness :type t1: `str` :param t2: text content of second witness :type t2: `str` :param ngram: n-gram to base sequences on :type ngram: `str` :param covered_spans: lists of start and end indices for parts of the texts already covered by a sequence :type covered_spans: `list` of two `list`s of 2-`tuple` of `int`
def _add_delta(self, other): if not isinstance(self.freq, Tick): _raise_on_incompatible(self, other) new_ordinals = super()._add_delta(other) return type(self)(new_ordinals, freq=self.freq)
Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new PeriodArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : PeriodArray
def get_session(): config = PyquilConfig() session = requests.Session() retry_adapter = HTTPAdapter(max_retries=Retry(total=3, method_whitelist=['POST'], status_forcelist=[502, 503, 504, 521, 523], backoff_factor=0.2, raise_on_status=False)) session.mount("http://", retry_adapter) session.mount("https://", retry_adapter) session.headers.update({"Accept": "application/octet-stream", "X-User-Id": config.user_id, "X-Api-Key": config.api_key}) session.headers.update({ 'Content-Type': 'application/json; charset=utf-8' }) return session
Create a requests session to access the REST API :return: requests session :rtype: Session
def destroy(self, prefix_names=None): if prefix_names is None: self.destroy(prefix_names=self.prefixes.keys()) return for prefix_name in prefix_names: if prefix_name == 'current' and self.current in prefix_names: continue elif prefix_name == 'current': prefix_name = self.current self.get_prefix(prefix_name).destroy() self.prefixes.pop(prefix_name) if self.prefixes: self._update_current() if not self.prefixes: shutil.rmtree(self.path)
Destroy all the given prefixes and remove any left files if no more prefixes are left Args: prefix_names(list of str): list of prefix names to destroy, if None passed (default) will destroy all of them
def iteritems(self): "Returns an iterator over the items of ConfigMap." return chain(self._pb.StringMap.items(), self._pb.IntMap.items(), self._pb.FloatMap.items(), self._pb.BoolMap.items())
Returns an iterator over the items of ConfigMap.
def from_data(cls, data): messages = [] filename = data['checker']['filename'] for m in data['messages']: for l in m['locations']: location = l['path'] if not location.startswith(filename): location = filename + '/' + location if l['line'] != -1: location += ':{}'.format(l['line']) if l['column'] != -1: location += ':{}'.format(l['column']) messages.append( cls(m['ID'], m['severity'], location, m['message'], m['suggestion']) ) return messages
Create a list of Messages from deserialized epubcheck json output. :param dict data: Decoded epubcheck json data :return list[Message]: List of messages
def offset(requestContext, seriesList, factor): for series in seriesList: series.name = "offset(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): if value is not None: series[i] = value + factor return seriesList
Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to each datapoint. Example:: &target=offset(Server.instance01.threads.busy,10)
def sort_and_distribute(array, splits=2): if not isinstance(array, (list,tuple)): raise TypeError("array must be a list") if not isinstance(splits, int): raise TypeError("splits must be an integer") remaining = sorted(array) if sys.version_info < (3, 0): myrange = xrange(splits) else: myrange = range(splits) groups = [[] for i in myrange] while len(remaining) > 0: for i in myrange: if len(remaining) > 0: groups[i].append(remaining.pop(0)) return groups
Sort an array of strings to groups by alphabetically continuous distribution
def save(self, parameterstep=None, simulationstep=None): par = parametertools.Parameter for (modelname, var2aux) in self: for filename in var2aux.filenames: with par.parameterstep(parameterstep), \ par.simulationstep(simulationstep): lines = [parametertools.get_controlfileheader( modelname, parameterstep, simulationstep)] for par in getattr(var2aux, filename): lines.append(repr(par) + '\n') hydpy.pub.controlmanager.save_file(filename, ''.join(lines))
Save all defined auxiliary control files. The target path is taken from the |ControlManager| object stored in module |pub|. Hence we initialize one and override its |property| `currentpath` with a simple |str| object defining the test target path: >>> from hydpy import pub >>> pub.projectname = 'test' >>> from hydpy.core.filetools import ControlManager >>> class Test(ControlManager): ... currentpath = 'test_directory' >>> pub.controlmanager = Test() Normally, the control files would be written to disk, of course. But to show (and test) the results in the following doctest, file writing is temporarily redirected via |Open|: >>> from hydpy import dummies >>> from hydpy import Open >>> with Open(): ... dummies.aux.save( ... parameterstep='1d', ... simulationstep='12h') ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ test_directory/file1.py ----------------------------------- # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.lland_v1 import * <BLANKLINE> simulationstep('12h') parameterstep('1d') <BLANKLINE> eqd1(200.0) <BLANKLINE> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ test_directory/file2.py ----------------------------------- # -*- coding: utf-8 -*- <BLANKLINE> from hydpy.models.lland_v2 import * <BLANKLINE> simulationstep('12h') parameterstep('1d') <BLANKLINE> eqd1(200.0) eqd2(100.0) <BLANKLINE> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _get_uri(self): if not self.service.exists(): logging.warning("Service does not yet exist.") return self.service.settings.data['uri']
Will return the uri for an existing instance.
def clear_all(): _TABLES.clear() _COLUMNS.clear() _STEPS.clear() _BROADCASTS.clear() _INJECTABLES.clear() _TABLE_CACHE.clear() _COLUMN_CACHE.clear() _INJECTABLE_CACHE.clear() for m in _MEMOIZED.values(): m.value.clear_cached() _MEMOIZED.clear() logger.debug('pipeline state cleared')
Clear any and all stored state from Orca.
def _handle_tag_salt_auth_creds(self, tag, data): key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
Handle a salt_auth_creds event
def text_has_changed(self, text): text = to_text_string(text) if text: self.lineno = int(text) else: self.lineno = None
Line edit's text has changed
def in_lamp_reach(p): v1 = XYPoint(Lime.x - Red.x, Lime.y - Red.y) v2 = XYPoint(Blue.x - Red.x, Blue.y - Red.y) q = XYPoint(p.x - Red.x, p.y - Red.y) s = cross_product(q, v2) / cross_product(v1, v2) t = cross_product(v1, q) / cross_product(v1, v2) return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)
Check if the provided XYPoint can be recreated by a Hue lamp.
def make_gradients(dims=DEFAULT_DIMS): return np.meshgrid( np.linspace(0.0, 1.0, dims[0]), np.linspace(0.0, 1.0, dims[1]) )
Makes a pair of gradients to generate textures from numpy primitives. Args: dims (pair): the dimensions of the surface to create Returns: pair: A pair of surfaces.
def safe_unicode_stdin(string): if string is None: return None if is_bytes(string): if FROZEN: return string.decode("utf-8") try: return string.decode(sys.stdin.encoding) except UnicodeDecodeError: return string.decode(sys.stdin.encoding, "replace") except: return string.decode("utf-8") return string
Safely convert the given string to a Unicode string, decoding using ``sys.stdin.encoding`` if needed. If running from a frozen binary, ``utf-8`` encoding is assumed. :param variant string: the byte string or Unicode string to convert :rtype: string
def get_context(self): if not self.context: return else: assert isinstance(self.context, tuple), 'Expected a Tuple not {0}'.format(type(self.context)) for model in self.context: model_cls = utils.get_model_class(model) key = utils.camel_to_snake(model_cls.__name__) self.context_data[key] = self.get_instance_of(model_cls)
Create a dict with the context data context is not required, but if it is defined it should be a tuple
def with_code(self, code): self.code = code if code != None else 'UNKNOWN' self.name = code return self
Sets a unique error code. This method returns reference to this exception to implement Builder pattern to chain additional calls. :param code: a unique error code :return: this exception object
def delete_all_volumes(self): if not self._manager: raise RuntimeError('Volumes can only be deleted ' 'on swarm manager nodes') volume_list = self.get_volume_list() for volumes in volume_list: self._api_client.remove_volume(volumes, force=True)
Remove all the volumes. Only the manager nodes can delete a volume
def _maybe_unstack(self, obj): if self._stacked_dim is not None and self._stacked_dim in obj.dims: obj = obj.unstack(self._stacked_dim) for dim in self._inserted_dims: if dim in obj.coords: del obj.coords[dim] return obj
This gets called if we are applying on an array with a multidimensional group.
def add_row_range_from_keys( self, start_key=None, end_key=None, start_inclusive=True, end_inclusive=False ): row_range = RowRange(start_key, end_key, start_inclusive, end_inclusive) self.row_ranges.append(row_range)
Add row range to row_ranges list from the row keys For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_range_from_keys] :end-before: [END bigtable_row_range_from_keys] :type start_key: bytes :param start_key: (Optional) Start key of the row range. If left empty, will be interpreted as the empty string. :type end_key: bytes :param end_key: (Optional) End key of the row range. If left empty, will be interpreted as the empty string and range will be unbounded on the high end. :type start_inclusive: bool :param start_inclusive: (Optional) Whether the ``start_key`` should be considered inclusive. The default is True (inclusive). :type end_inclusive: bool :param end_inclusive: (Optional) Whether the ``end_key`` should be considered inclusive. The default is False (exclusive).
def percentage_of_reoccurring_datapoints_to_all_datapoints(x): if len(x) == 0: return np.nan unique, counts = np.unique(x, return_counts=True) if counts.shape[0] == 0: return 0 return np.sum(counts > 1) / float(counts.shape[0])
Returns the percentage of unique values, that are present in the time series more than once. len(different values occurring more than once) / len(different values) This means the percentage is normalized to the number of unique values, in contrast to the percentage_of_reoccurring_values_to_all_values. :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float
def node(self, content): ns = content.type.namespace() if content.type.form_qualified: node = Element(content.tag, ns=ns) if ns[0]: node.addPrefix(ns[0], ns[1]) else: node = Element(content.tag) self.encode(node, content) log.debug("created - node:\n%s", node) return node
Create an XML node. The XML node is namespace qualified as defined by the corresponding schema element.
def fetch(self): fetch_kwargs = {'multiple': True} fetch_args = [] if self.is_prune(): fetch_kwargs['prune'] = True if self.settings['fetch.all']: fetch_kwargs['all'] = True else: if '.' in self.remotes: self.remotes.remove('.') if not self.remotes: return fetch_args.append(self.remotes) try: self.git.fetch(*fetch_args, **fetch_kwargs) except GitError as error: error.message = "`git fetch` failed" raise error
Fetch the recent refs from the remotes. Unless git-up.fetch.all is set to true, all remotes with locally existent branches will be fetched.
def origin_central_asia(origin): return origin_afghanistan(origin) or origin_kazakhstan(origin) \ or origin_kyrgyzstan(origin) or origin_tajikistan(origin) \ or origin_turkmenistan(origin) or origin_uzbekistan(origin)
\ Returns if the origin is located in Central Asia. Holds true for the following countries: * Afghanistan * Kazakhstan * Kyrgyzstan * Tajikistan * Turkmenistan * Uzbekistan `origin` The origin to check.
def get_library_progress(self): kbp_dict = self._get_api_call('get_library_progress') return {asin: KindleCloudReaderAPI._kbp_to_progress(kbp) for asin, kbp in kbp_dict.iteritems()}
Returns the reading progress for all books in the kindle library. Returns: A mapping of ASINs to `ReadingProgress` instances corresponding to the books in the current user's library.
def attach(self, container, stdout=True, stderr=True, stream=False, logs=False, demux=False): params = { 'logs': logs and 1 or 0, 'stdout': stdout and 1 or 0, 'stderr': stderr and 1 or 0, 'stream': stream and 1 or 0 } headers = { 'Connection': 'Upgrade', 'Upgrade': 'tcp' } u = self._url("/containers/{0}/attach", container) response = self._post(u, headers=headers, params=params, stream=True) output = self._read_from_socket( response, stream, self._check_is_tty(container), demux=demux) if stream: return CancellableStream(output, response) else: return output
Attach to a container. The ``.logs()`` function is a wrapper around this method, which you can use instead if you want to fetch/stream container output without first retrieving the entire backlog. Args: container (str): The container to attach to. stdout (bool): Include stdout. stderr (bool): Include stderr. stream (bool): Return container output progressively as an iterator of strings, rather than a single string. logs (bool): Include the container's previous output. demux (bool): Keep stdout and stderr separate. Returns: By default, the container's output as a single string (two if ``demux=True``: one for stdout and one for stderr). If ``stream=True``, an iterator of output strings. If ``demux=True``, two iterators are returned: one for stdout and one for stderr. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def generate_trunc_gr_magnitudes(bval, mmin, mmax, nsamples): sampler = np.random.uniform(0., 1., nsamples) beta = bval * np.log(10.) return (-1. / beta) * ( np.log(1. - sampler * (1 - np.exp(-beta * (mmax - mmin))))) + mmin
Generate a random list of magnitudes distributed according to a truncated Gutenberg-Richter model :param float bval: b-value :param float mmin: Minimum Magnitude :param float mmax: Maximum Magnitude :param int nsamples: Number of samples :returns: Vector of generated magnitudes
def execute_command(command=None): process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stdin = process.communicate() process.wait() return (stdout, stdin), process.returncode
Execute a command and return the stdout and stderr.
def _get_assessment_part(self, part_id=None): if part_id is None: return self._assessment_part if part_id not in self._assessment_parts: lookup_session = self._get_assessment_part_lookup_session() self._assessment_parts[part_id] = lookup_session.get_assessment_part(part_id) return self._assessment_parts[part_id]
Gets an AssessmentPart given a part_id. Returns this Section's own part if part_id is None. Make this a private part, so that it doesn't collide with the AssessmentPart.get_assessment_part method, which does not expect any arguments...
def reset(self, collection, jsonFieldInit = None) : if not jsonFieldInit: jsonFieldInit = {} self.collection = collection self.connection = self.collection.connection self.documentsURL = self.collection.documentsURL self.URL = None self.setPrivates(jsonFieldInit) self._store = DocumentStore(self.collection, validators=self.collection._fields, initDct=jsonFieldInit) if self.collection._validation['on_load']: self.validate() self.modified = True
replaces the current values in the document by those in jsonFieldInit
def inflate_dtype(arr, names): arr = np.asanyarray(arr) if has_structured_dt(arr): return arr.dtype s_dt = arr.dtype dt = [(n, s_dt) for n in names] dt = np.dtype(dt) return dt
Create structured dtype from a 2d ndarray with unstructured dtype.
def get_complete_version(version=None): if version is None: from graphene import VERSION as version else: assert len(version) == 5 assert version[3] in ("alpha", "beta", "rc", "final") return version
Returns a tuple of the graphene version. If version argument is non-empty, then checks for correctness of the tuple provided.
def add_sample(self, **data): missing_dimensions = set(data).difference(self.dimensions) if missing_dimensions: raise KeyError('Dimensions not defined in this series: %s' % ', '.join(missing_dimensions)) for dim in self.dimensions: getattr(self, dim).append(data.get(dim))
Add a sample to this series.
def process_announcement( sender_namerec, op, working_dir ): node_config = get_blockstack_opts() announce_hash = op['message_hash'] announcer_id = op['announcer_id'] name_history = sender_namerec['history'] allowed_value_hashes = [] for block_height in name_history.keys(): for historic_namerec in name_history[block_height]: if historic_namerec.get('value_hash'): allowed_value_hashes.append(historic_namerec['value_hash']) if announce_hash not in allowed_value_hashes: log.warning("Announce hash {} not found in name history for {}".format(announce_hash, announcer_id)) return zonefiles_dir = node_config.get('zonefiles', None) if not zonefiles_dir: log.warning("This node does not store zone files, so no announcement can be found") return announce_text = get_atlas_zonefile_data(announce_hash, zonefiles_dir) if announce_text is None: log.warning("No zone file {} found".format(announce_hash)) return log.critical("ANNOUNCEMENT (from %s): %s\n------BEGIN MESSAGE------\n%s\n------END MESSAGE------\n" % (announcer_id, announce_hash, announce_text)) store_announcement( working_dir, announce_hash, announce_text )
If the announcement is valid, then immediately record it.
def _get_lowstate(self): if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate
Format the incoming data into a lowstate object
def url_generator(network=None, path=''): network_object = ipaddress.ip_network(network) if network_object.num_addresses > 256: logger.error('Scan limited to 256 addresses, requested %d.', network_object.num_addresses) raise NotImplementedError elif network_object.num_addresses > 1: network_hosts = network_object.hosts() else: network_hosts = [network_object.network_address] return (urlunsplit(('http',str(ip),path,'','')) for ip in network_hosts)
Return a tuple of URLs with path, one for each host on network `network` - IP address and subnet mask compatible with [ipaddress library](https://docs.python.org/3/library/ipaddress.html#ipaddress.ip_network) `path` - Path portion of a URL as defined by [url(un)split](https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlsplit)
def send_reset_password_instructions(user): token = generate_reset_password_token(user) url = url_for("login.reset_password", token=token) reset_link = request.url_root[:-1] + url subject = _("Password reset instruction for {site_name}").format( site_name=current_app.config.get("SITE_NAME") ) mail_template = "password_reset_instructions" send_mail(subject, user.email, mail_template, user=user, reset_link=reset_link)
Send the reset password instructions email for the specified user. :param user: The user to send the instructions to
def handle_unsubscribe(self, request): subscribe = self._subscription_keys.pop(request.generate_key()) ret = self._tree.handle_unsubscribe(subscribe, subscribe.path[1:]) return ret
Handle a Unsubscribe request from outside. Called with lock taken
def _has_converged(pi_star, pi): node_count = pi.shape[0] EPS = 10e-6 for i in range(node_count): if pi[i] - pi_star[i] > EPS: return False return True
Checks if the random walk has converged. :param pi_star: the new vector :param pi: the old vector :returns: bool-- True iff pi has converged.
def getCursor(self): if self.connection is None: self.Connect() return self.connection.cursor(MySQLdb.cursors.DictCursor)
Get a Dictionary Cursor for executing queries
def log_commit( self, block_id, vtxindex, op, opcode, op_data ): debug_op = self.sanitize_op( op_data ) if 'history' in debug_op: del debug_op['history'] log.debug("COMMIT %s (%s) at (%s, %s) data: %s", opcode, op, block_id, vtxindex, ", ".join( ["%s='%s'" % (k, debug_op[k]) for k in sorted(debug_op.keys())] ) ) return
Log a committed operation
def handle_button(self, event, event_type): mouse_button_number = self._get_mouse_button_number(event) if event_type in (25, 26): event_type = event_type + (mouse_button_number * 0.1) event_type_string, event_code, value, scan = self.codes[event_type] if event_type_string == "Key": scan_event, key_event = self.emulate_press( event_code, scan, value, self.timeval) self.events.append(scan_event) self.events.append(key_event) click_state = self._get_click_state(event) repeat = self.emulate_repeat(click_state, self.timeval) self.events.append(repeat)
Convert the button information from quartz into evdev format.
def deserialize(self, data): ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, 'text/plain': salt.utils.json.loads } try: value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400)
Deserialize the data based on request content type headers
def write(self, fd, bytes): args = { 'fd': fd, 'block': base64.encodebytes(bytes).decode(), } return self._client.json('filesystem.write', args)
Write a block of bytes to an open file descriptor (that is open with one of the writing modes :param fd: file descriptor :param bytes: bytes block to write :return: :note: don't overkill the node with large byte chunks, also for large file upload check the upload method.
def dump(cls): d = OrderedDict(cls.Items()) d["__classname__"] = cls.__name__ for attr, klass in cls.Subclasses(): d[attr] = klass.dump() return OrderedDict([(cls.__name__, d)])
Dump data into a dict. .. versionadded:: 0.0.2
def recompute_tabs_titles(self): use_vte_titles = self.settings.general.get_boolean("use-vte-titles") if not use_vte_titles: return for terminal in self.get_notebook().iter_terminals(): page_num = self.get_notebook().page_num(terminal.get_parent()) self.get_notebook().rename_page(page_num, self.compute_tab_title(terminal), False)
Updates labels on all tabs. This is required when `self.abbreviate` changes
def children(self): from warnings import warn warn("Deprecated. Use Scraper.descendants.", DeprecationWarning) for descendant in self.descendants: yield descendant
Former, misleading name for descendants.
def find(self, name): for c in self.children: if c.name == name: return c result = c.find(name) if result: return result
Finds a possible child whose name match the name parameter. :param name: name of the child node to look up :type name: str :return: DocumentNode or None
def _check_for_boolean_pair_reduction(self, kwargs): if 'reduction_forcing_pairs' in self._meta_data: for key1, key2 in self._meta_data['reduction_forcing_pairs']: kwargs = self._reduce_boolean_pair(kwargs, key1, key2) return kwargs
Check if boolean pairs should be reduced in this resource.
def evalall(self, loc=None, defaults=None): self.check() if defaults is None: defaults = cma_default_options if 'N' in loc: popsize = self('popsize', defaults['popsize'], loc) for k in list(self.keys()): k = self.corrected_key(k) self.eval(k, defaults[k], {'N':loc['N'], 'popsize':popsize}) self._lock_setting = True return self
Evaluates all option values in environment `loc`. :See: `eval()`
def generate_schema_mapping(resolver, schema_uri, depth=1): visitor = SchemaVisitor({'$ref': schema_uri}, resolver) return _generate_schema_mapping(visitor, set(), depth)
Try and recursively iterate a JSON schema and to generate an ES mapping that encasulates it.
def format_stackdriver_json(record, message): subsecond, second = math.modf(record.created) payload = { "message": message, "timestamp": {"seconds": int(second), "nanos": int(subsecond * 1e9)}, "thread": record.thread, "severity": record.levelname, } return json.dumps(payload)
Helper to format a LogRecord in in Stackdriver fluentd format. :rtype: str :returns: JSON str to be written to the log file.
def text_entry(self, prompt, message=None, allow_blank=False, strip=True, rofi_args=None, **kwargs): def text_validator(text): if strip: text = text.strip() if not allow_blank: if not text: return None, "A value is required." return text, None return self.generic_entry(prompt, text_validator, message, rofi_args, **kwargs)
Prompt the user to enter a piece of text. Parameters ---------- prompt: string Prompt to display to the user. message: string, optional Message to display under the entry line. allow_blank: Boolean Whether to allow blank entries. strip: Boolean Whether to strip leading and trailing whitespace from the entered value. Returns ------- string, or None if the dialog was cancelled.
def dispatch(self, *args, **kwargs): return super(GetAppListJsonView, self).dispatch(*args, **kwargs)
Only staff members can access this view
def diffse(self, x1, x2): f1, f1se = self(x1) f2, f2se = self(x2) if self.paired: fx1 = np.array(self.cache[tuple(x1)]) fx2 = np.array(self.cache[tuple(x2)]) diffse = np.std(fx1-fx2, ddof=1)/self.N**.5 return diffse else: return (f1se**2 + f2se**2)**.5
Standard error of the difference between the function values at x1 and x2
def server_list(endpoint_id): endpoint, server_list = get_endpoint_w_server_list(endpoint_id) if server_list == "S3": server_list = {"s3_url": endpoint["s3_url"]} fields = [("S3 URL", "s3_url")] text_format = FORMAT_TEXT_RECORD else: fields = ( ("ID", "id"), ("URI", lambda s: (s["uri"] or "none (Globus Connect Personal)")), ) text_format = FORMAT_TEXT_TABLE formatted_print(server_list, text_format=text_format, fields=fields)
Executor for `globus endpoint server list`
def card(self): body, more, is_markdown = self._entry_content return TrueCallableProxy( self._get_card, body or more) if is_markdown else CallableProxy(None)
Get the entry's OpenGraph card
def get_virtualenv_env_data(mgr): if not mgr.find_virtualenv_envs: return {} mgr.log.debug("Looking for virtualenv environments in %s...", mgr.virtualenv_env_dirs) env_paths = find_env_paths_in_basedirs(mgr.virtualenv_env_dirs) mgr.log.debug("Scanning virtualenv environments for python kernels...") env_data = convert_to_env_data(mgr=mgr, env_paths=env_paths, validator_func=validate_IPykernel, activate_func=_get_env_vars_for_virtualenv_env, name_template=mgr.virtualenv_prefix_template, display_name_template=mgr.display_name_template, name_prefix="") return env_data
Finds kernel specs from virtualenv environments env_data is a structure {name -> (resourcedir, kernel spec)}
def theme(self, text): return self.theme_color + self.BRIGHT + text + self.RESET
Theme style.
def product(sequence, initial=1): if not isinstance(sequence, collections.Iterable): raise TypeError("'{}' object is not iterable".format(type(sequence).__name__)) return reduce(operator.mul, sequence, initial)
like the built-in sum, but for multiplication.
def execute_code_block(elem, doc): command = select_executor(elem, doc).split(' ') code = elem.text if 'plt' in elem.attributes or 'plt' in elem.classes: code = save_plot(code, elem) command.append(code) if 'args' in elem.attributes: for arg in elem.attributes['args'].split(): command.append(arg) cwd = elem.attributes['wd'] if 'wd' in elem.attributes else None return subprocess.run(command, encoding='utf8', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd).stdout
Executes a code block by passing it to the executor. Args: elem The AST element. doc The document. Returns: The output of the command.
def associate_blocks(blocks, layout_pairs, start_peb_num): seq_blocks = [] for layout_pair in layout_pairs: seq_blocks = sort.by_image_seq(blocks, blocks[layout_pair[0]].ec_hdr.image_seq) layout_pair.append(seq_blocks) return layout_pairs
Group block indexes with appropriate layout pairs Arguments: List:blocks -- List of block objects List:layout_pairs -- List of grouped layout blocks Int:start_peb_num -- Number of the PEB to start from. Returns: List -- Layout block pairs grouped with associated block ranges.
def write_config(ip, mac, single, double, long, touch): click.echo("Write configuration to device %s" % ip) data = { 'single': single, 'double': double, 'long': long, 'touch': touch, } request = requests.post( 'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT) if request.status_code == 200: click.echo("Configuration of %s set" % mac)
Write the current configuration of a myStrom button.
def add_user_to_group(self, GroupID, UserID): log.info('Add User %s to Group %s' % (UserID, GroupID)) self.put('groups/%s/add_user/%s.json' % (GroupID, UserID))
Add a user to a group.
def schema_columns(self): t = self.schema_term columns = [] if t: for i, c in enumerate(t.children): if c.term_is("Table.Column"): p = c.all_props p['pos'] = i p['name'] = c.value p['header'] = self._name_for_col_term(c, i) columns.append(p) return columns
Return column informatino only from this schema
def rvs(self, *args, **kwargs): size = kwargs.pop('size', 1) random_state = kwargs.pop('size', None) return self._kde.sample(n_samples=size, random_state=random_state)
Draw Random Variates. Parameters ---------- size: int, optional (default=1) random_state_: optional (default=None)
def last_metric_eval(multiplexer, session_name, metric_name): try: run, tag = run_tag_from_session_and_metric(session_name, metric_name) tensor_events = multiplexer.Tensors(run=run, tag=tag) except KeyError as e: raise KeyError( 'Can\'t find metric %s for session: %s. Underlying error message: %s' % (metric_name, session_name, e)) last_event = tensor_events[-1] return (last_event.wall_time, last_event.step, tf.make_ndarray(last_event.tensor_proto).item())
Returns the last evaluations of the given metric at the given session. Args: multiplexer: The EventMultiplexer instance allowing access to the exported summary data. session_name: String. The session name for which to get the metric evaluations. metric_name: api_pb2.MetricName proto. The name of the metric to use. Returns: A 3-tuples, of the form [wall-time, step, value], denoting the last evaluation of the metric, where wall-time denotes the wall time in seconds since UNIX epoch of the time of the evaluation, step denotes the training step at which the model is evaluated, and value denotes the (scalar real) value of the metric. Raises: KeyError if the given session does not have the metric.
def as_csv(self): from io import StringIO s = StringIO() w = csv.writer(s) for row in self.rows: w.writerow(row) return s.getvalue()
Return a CSV representation as a string
def create_organization_team(self, auth, org_name, name, description=None, permission="read"): data = { "name": name, "description": description, "permission": permission } url = "/admin/orgs/{o}/teams".format(o=org_name) response = self.post(url, auth=auth, data=data) return GogsTeam.from_json(response.json())
Creates a new team of the organization. :param auth.Authentication auth: authentication object, must be admin-level :param str org_name: Organization user name :param str name: Full name of the team :param str description: Description of the team :param str permission: Team permission, can be read, write or admin, default is read :return: a representation of the created team :rtype: GogsTeam :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced
def log(db, job_id, timestamp, level, process, message): db('INSERT INTO log (job_id, timestamp, level, process, message) ' 'VALUES (?X)', (job_id, timestamp, level, process, message))
Write a log record in the database. :param db: a :class:`openquake.server.dbapi.Db` instance :param job_id: a job ID :param timestamp: timestamp to store in the log record :param level: logging level to store in the log record :param process: process ID to store in the log record :param message: message to store in the log record
def get_default_domain(request, get_name=True): domain_id = request.session.get("domain_context", None) domain_name = request.session.get("domain_context_name", None) if VERSIONS.active >= 3 and domain_id is None: domain_id = request.user.user_domain_id domain_name = request.user.user_domain_name if get_name and not request.user.is_federated: try: domain = domain_get(request, domain_id) domain_name = domain.name except exceptions.NotAuthorized: LOG.debug("Cannot retrieve domain information for " "user (%(user)s) that does not have an admin role " "on project (%(project)s)", {'user': request.user.username, 'project': request.user.project_name}) except Exception: LOG.warning("Unable to retrieve Domain: %s", domain_id) domain = base.APIDictWrapper({"id": domain_id, "name": domain_name}) return domain
Gets the default domain object to use when creating Identity object. Returns the domain context if is set, otherwise return the domain of the logon user. :param get_name: Whether to get the domain name from Keystone if the context isn't set. Setting this to False prevents an unnecessary call to Keystone if only the domain ID is needed.
def parse_expression(source: str) -> ExpressionSource: if not is_expression(source): msg = 'Expression is not valid. Expression should be matched with regular expression: {0}'\ .format(EXPRESSION_REGEX) raise ExpressionError(msg, source) if not source.startswith('{'): [type_, source] = source.split(':', 1) elif source.endswith('}}'): type_ = 'twoways' else: type_ = 'oneway' return (type_, source[1:-1])
Returns tuple with expression type and expression body
def log(self, workunit, level, *msg_elements): with self._lock: for reporter in self._reporters.values(): reporter.handle_log(workunit, level, *msg_elements)
Log a message. Each element of msg_elements is either a message string or a (message, detail) pair.
def process_events(self, data): events = bridge.loads(data) if self.debug: print("======== Py <-- Native ======") for event in events: print(event) print("===========================") for event in events: if event[0] == 'event': self.handle_event(event)
The native implementation must use this call to
def subscribed(self, build_root, handlers): command_list = [['subscribe', build_root, handler.name, handler.metadata] for handler in handlers] self._logger.debug('watchman command_list is: {}'.format(command_list)) try: for event in self.client.stream_query(command_list): if event is None: yield None, None elif 'subscribe' in event: self._logger.info('confirmed watchman subscription: {}'.format(event)) yield None, None elif 'subscription' in event: yield event.get('subscription'), event else: self._logger.warning('encountered non-subscription event: {}'.format(event)) except self.client.WatchmanError as e: raise self.WatchmanCrash(e)
Bulk subscribe generator for StreamableWatchmanClient. :param str build_root: the build_root for all subscriptions. :param iterable handlers: a sequence of Watchman.EventHandler namedtuple objects. :yields: a stream of tuples in the form (subscription_name: str, subscription_event: dict).
def playlist(self, playlist_id, *, include_songs=False): playlist_info = next( ( playlist for playlist in self.playlists(include_songs=include_songs) if playlist['id'] == playlist_id ), None ) return playlist_info
Get information about a playlist. Parameters: playlist_id (str): A playlist ID. include_songs (bool, Optional): Include songs from the playlist in the returned dict. Default: ``False`` Returns: dict: Playlist information.
def _complete_statement(self, line: str) -> Statement: while True: try: statement = self.statement_parser.parse(line) if statement.multiline_command and statement.terminator: break if not statement.multiline_command: break except ValueError: statement = self.statement_parser.parse_command_only(line) if not statement.multiline_command: raise try: self.at_continuation_prompt = True newline = self.pseudo_raw_input(self.continuation_prompt) if newline == 'eof': newline = '\n' self.poutput(newline) line = '{}\n{}'.format(statement.raw, newline) except KeyboardInterrupt as ex: if self.quit_on_sigint: raise ex else: self.poutput('^C') statement = self.statement_parser.parse('') break finally: self.at_continuation_prompt = False if not statement.command: raise EmptyStatement() return statement
Keep accepting lines of input until the command is complete. There is some pretty hacky code here to handle some quirks of self.pseudo_raw_input(). It returns a literal 'eof' if the input pipe runs out. We can't refactor it because we need to retain backwards compatibility with the standard library version of cmd.
def update(self, *sources, follow_symlinks: bool=False, maximum_depth: int=20): for source in sources: if isinstance(source, self.klass): self.path_map[source.this.name.value] = source self.class_cache[source.this.name.value] = source continue source = str(source) if source.lower().endswith(('.zip', '.jar')): zf = ZipFile(source, 'r') self.path_map.update(zip(zf.namelist(), repeat(zf))) elif os.path.isdir(source): walker = _walk( source, follow_links=follow_symlinks, maximum_depth=maximum_depth ) for root, dirs, files in walker: for file_ in files: path_full = os.path.join(root, file_) path_suffix = os.path.relpath(path_full, source) self.path_map[path_suffix] = path_full
Add one or more ClassFile sources to the class loader. If a given source is a directory path, it is traversed up to the maximum set depth and all files under it are added to the class loader lookup table. If a given source is a .jar or .zip file it will be opened and the file index added to the class loader lookup table. If a given source is a ClassFile or a subclass, it's immediately added to the class loader lookup table and the class cache. :param sources: One or more ClassFile sources to be added. :param follow_symlinks: True if symlinks should be followed when traversing filesystem directories. [default: False] :param maximum_depth: The maximum sub-directory depth when traversing filesystem directories. If set to `None` no limit will be enforced. [default: 20]
def match_taking_agent_id(self, agent_id, match): self._add_match('takingAgentId', str(agent_id), bool(match))
Sets the agent ``Id`` for this query. arg: agent_id (osid.id.Id): an agent ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``agent_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def get_custom_value(self, key): self._handled.add(key) values = self._lookup[key] if len(values) > 1: raise RuntimeError( "More than one value for this customParameter: {}".format(key) ) if values: return values[0] return None
Return the first and only custom parameter matching the given name.
def points_possible(self, include_hidden=False): return sum([test_case.points for testable in self.testables for test_case in testable.test_cases if include_hidden or not testable.is_hidden])
Return the total points possible for this project.
def verify_scores(scores): scores = np.array(scores, copy=False) if np.any(~np.isfinite(scores)): raise ValueError("scores contains invalid values. " + "Please check that all values are finite.") if scores.ndim == 1: scores = scores[:,np.newaxis] return scores
Ensures that scores is stored as a numpy array and checks that all values are finite.
def start_output (self): super(HtmlLogger, self).start_output() header = { "encoding": self.get_charset_encoding(), "title": configuration.App, "body": self.colorbackground, "link": self.colorlink, "vlink": self.colorlink, "alink": self.colorlink, "url": self.colorurl, "error": self.colorerror, "valid": self.colorok, "warning": self.colorwarning, } self.write(HTML_HEADER % header) self.comment("Generated by %s" % configuration.App) if self.has_part('intro'): self.write(u"<h2>"+configuration.App+ "</h2><br/><blockquote>"+ configuration.Freeware+"<br/><br/>"+ (_("Start checking at %s") % strformat.strtime(self.starttime))+ os.linesep+"<br/>") self.check_date() self.flush()
Write start of checking info.
def get_ebuio_headers(request): retour = {} for (key, value) in request.headers: if key.startswith('X-Plugit-'): key = key[9:] retour[key] = value return retour
Return a dict with ebuio headers
def getpaths(self,libname): if os.path.isabs(libname): yield libname else: for path in self.getplatformpaths(libname): yield path path = ctypes.util.find_library(libname) if path: yield path
Return a list of paths where the library might be found.
def splitN(line, n): x0, y0, x1, y1 = line out = empty((n, 4), dtype=type(line[0])) px, py = x0, y0 dx = (x1 - x0) / n dy = (y1 - y0) / n for i in range(n): o = out[i] o[0] = px o[1] = py px += dx py += dy o[2] = px o[3] = py return out
split a line n times returns n sublines
def _validate(self): for key in self: if key not in DEFAULTS: raise exceptions.ConfigurationException( 'Unknown configuration key "{}"! Valid configuration keys are' " {}".format(key, list(DEFAULTS.keys())) ) validate_queues(self["queues"]) validate_bindings(self["bindings"]) validate_client_properties(self["client_properties"])
Perform checks on the configuration to assert its validity Raises: ConfigurationException: If the configuration is invalid.
def add_compound(self, compound): logger.debug("Adding compound {0} to variant {1}".format( compound, self['variant_id'])) self['compounds'].append(compound)
Add the information of a compound variant This adds a compound dict to variant['compounds'] Args: compound (dict): A compound dictionary
def list_subscriptions(self, client_id, client_secret): result_fetcher = functools.partial(self.protocol.get, '/push_subscriptions', client_id=client_id, client_secret=client_secret, use_webhook_server=True) return BatchedResultsIterator(entity=model.Subscription, bind_client=self, result_fetcher=result_fetcher)
List current webhook event subscriptions in place for the current application. http://strava.github.io/api/partner/v3/events/#list-push-subscriptions :param client_id: application's ID, obtained during registration :type client_id: int :param client_secret: application's secret, obtained during registration :type client_secret: str :return: An iterator of :class:`stravalib.model.Subscription` objects. :rtype: :class:`BatchedResultsIterator`