code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _merge_lib_dict(d1, d2): for required, requirings in d2.items(): if required in d1: d1[required].update(requirings) else: d1[required] = requirings return None
Merges lib_dict `d2` into lib_dict `d1`
def add(self, dist): new_path = ( dist.location not in self.paths and ( dist.location not in self.sitedirs or dist.location == os.getcwd() ) ) if new_path: self.paths.append(dist.location) self.dirty = True Environment.add(self, dist)
Add `dist` to the distribution map
def delete_downloads(): shutil.rmtree(vtki.EXAMPLES_PATH) os.makedirs(vtki.EXAMPLES_PATH) return True
Delete all downloaded examples to free space or update the files
def data_size(metadata): info = metadata['info'] if 'length' in info: total_size = info['length'] else: total_size = sum([f['length'] for f in info['files']]) return total_size
Calculate the size of a torrent based on parsed metadata.
def snils(self) -> str: numbers = [] control_codes = [] for i in range(0, 9): numbers.append(self.random.randint(0, 9)) for i in range(9, 0, -1): control_codes.append(numbers[9 - i] * i) control_code = sum(control_codes) code = ''.join(str(number) for number in numbers) if control_code in (100, 101): snils = code + '00' return snils if control_code < 100: snils = code + str(control_code) return snils if control_code > 101: control_code = control_code % 101 if control_code == 100: control_code = 0 snils = code + '{:02}'.format(control_code) return snils
Generate snils with special algorithm. :return: SNILS. :Example: 41917492600.
def guess_encoding(request): ctype = request.headers.get('content-type') if not ctype: LOGGER.warning("%s: no content-type; headers are %s", request.url, request.headers) return 'utf-8' match = re.search(r'charset=([^ ;]*)(;| |$)', ctype) if match: return match[1] if ctype.startswith('text/html'): return 'iso-8859-1' return 'utf-8'
Try to guess the encoding of a request without going through the slow chardet process
def walk(self, root="~/"): root = validate_type(root, *six.string_types) directories = [] files = [] query_fd_path = root if not query_fd_path.endswith("/"): query_fd_path += "/" for fd_object in self.get_filedata(fd_path == query_fd_path): if fd_object.get_type() == "directory": directories.append(fd_object) else: files.append(fd_object) yield (root, directories, files) for directory in directories: for dirpath, directories, files in self.walk(directory.get_full_path()): yield (dirpath, directories, files)
Emulation of os.walk behavior against Device Cloud filedata store This method will yield tuples in the form ``(dirpath, FileDataDirectory's, FileData's)`` recursively in pre-order (depth first from top down). :param str root: The root path from which the search should commence. By default, this is the root directory for this device cloud account (~). :return: Generator yielding 3-tuples of dirpath, directories, and files :rtype: 3-tuple in form (dirpath, list of :class:`FileDataDirectory`, list of :class:`FileDataFile`)
def step(self, action): if self.done: raise ValueError('cannot step in a done environment! call `reset`') self.controllers[0][:] = action _LIB.Step(self._env) reward = self._get_reward() self.done = self._get_done() info = self._get_info() self._did_step(self.done) if reward < self.reward_range[0]: reward = self.reward_range[0] elif reward > self.reward_range[1]: reward = self.reward_range[1] return self.screen, reward, self.done, info
Run one frame of the NES and return the relevant observation data. Args: action (byte): the bitmap determining which buttons to press Returns: a tuple of: - state (np.ndarray): next frame as a result of the given action - reward (float) : amount of reward returned after given action - done (boolean): whether the episode has ended - info (dict): contains auxiliary diagnostic information
def validate(self, require_all=True, scale='colors'): super(self.__class__, self).validate() required_attribs = ('data', 'scales', 'axes', 'marks') for elem in required_attribs: attr = getattr(self, elem) if attr: for entry in attr: entry.validate() names = [a.name for a in attr] if len(names) != len(set(names)): raise ValidationError(elem + ' has duplicate names') elif require_all: raise ValidationError( elem + ' must be defined for valid visualization')
Validate the visualization contents. Parameters ---------- require_all : boolean, default True If True (default), then all fields ``data``, ``scales``, ``axes``, and ``marks`` must be defined. The user is allowed to disable this if the intent is to define the elements client-side. If the contents of the visualization are not valid Vega, then a :class:`ValidationError` is raised.
def get_qapp(): global app app = QtGui.QApplication.instance() if app is None: app = QtGui.QApplication([], QtGui.QApplication.GuiClient) return app
Return an instance of QApplication. Creates one if neccessary. :returns: a QApplication instance :rtype: QApplication :raises: None
def get_handle(): global __handle__ if not __handle__: __handle__ = FT_Library() error = FT_Init_FreeType(byref(__handle__)) if error: raise RuntimeError(hex(error)) return __handle__
Get unique FT_Library handle
def runner_argspec(module=''): run_ = salt.runner.Runner(__opts__) return salt.utils.args.argspec_report(run_.functions, module)
Return the argument specification of functions in Salt runner modules. .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' sys.runner_argspec state salt '*' sys.runner_argspec http salt '*' sys.runner_argspec Runner names can be specified as globs. .. code-block:: bash salt '*' sys.runner_argspec 'winrepo.*'
def train_step(self, Xi, yi, **fit_params): step_accumulator = self.get_train_step_accumulator() def step_fn(): step = self.train_step_single(Xi, yi, **fit_params) step_accumulator.store_step(step) return step['loss'] self.optimizer_.step(step_fn) return step_accumulator.get_step()
Prepares a loss function callable and pass it to the optimizer, hence performing one optimization step. Loss function callable as required by some optimizers (and accepted by all of them): https://pytorch.org/docs/master/optim.html#optimizer-step-closure The module is set to be in train mode (e.g. dropout is applied). Parameters ---------- Xi : input data A batch of the input data. yi : target data A batch of the target data. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the train_split call.
def partition_pair(bif_point): n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) return (n, m)
Calculate the partition pairs at a bifurcation point The number of nodes in each child tree is counted. The partition pairs is the number of bifurcations in the two daughter subtrees at each branch point.
def _gather_beams(nested, beam_indices, batch_size, new_beam_size): batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size]) coordinates = tf.stack([batch_pos, beam_indices], axis=2) return nest.map_structure( lambda state: tf.gather_nd(state, coordinates), nested)
Gather beams from nested structure of tensors. Each tensor in nested represents a batch of beams, where beam refers to a single search state (beam search involves searching through multiple states in parallel). This function is used to gather the top beams, specified by beam_indices, from the nested tensors. Args: nested: Nested structure (tensor, list, tuple or dict) containing tensors with shape [batch_size, beam_size, ...]. beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each value in beam_indices must be between [0, beam_size), and are not necessarily unique. batch_size: int size of batch new_beam_size: int number of beams to be pulled from the nested tensors. Returns: Nested structure containing tensors with shape [batch_size, new_beam_size, ...]
def set_waypoint_quota(self, waypoint_quota): if self.get_waypoint_quota_metadata().is_read_only(): raise NoAccess() if not self.my_osid_object_form._is_valid_cardinal(waypoint_quota, self.get_waypoint_quota_metadata()): raise InvalidArgument() self.my_osid_object_form._my_map['waypointQuota'] = waypoint_quota
how many waypoint questions need to be answered correctly
def _decode_value(self, value): if isinstance(value, (int, float, str, bool, datetime)): return value elif isinstance(value, list): return [self._decode_value(item) for item in value] elif isinstance(value, dict): result = {} for key, item in value.items(): result[key] = self._decode_value(item) return result elif isinstance(value, ObjectId): if self._gridfs.exists({"_id": value}): return pickle.loads(self._gridfs.get(value).read()) else: raise DataStoreGridfsIdInvalid() else: raise DataStoreDecodeUnknownType()
Decodes the value by turning any binary data back into Python objects. The method searches for ObjectId values, loads the associated binary data from GridFS and returns the decoded Python object. Args: value (object): The value that should be decoded. Raises: DataStoreDecodingError: An ObjectId was found but the id is not a valid GridFS id. DataStoreDecodeUnknownType: The type of the specified value is unknown. Returns: object: The decoded value as a valid Python object.
def _find_pivot_addr(self, index): if not self.addresses or index.start == 0: return CharAddress('', self.tree, 'text', -1) if index.start > len(self.addresses): return self.addresses[-1] return self.addresses[index.start]
Inserting by slicing can lead into situation where no addresses are selected. In that case a pivot address has to be chosen so we know where to add characters.
def calc_qbgz_v1(self): con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess sta.qbgz = 0. for k in range(con.nhru): if con.lnk[k] == SEE: sta.qbgz += con.fhru[k]*(flu.nkor[k]-flu.evi[k]) elif con.lnk[k] not in (WASSER, FLUSS, VERS): sta.qbgz += con.fhru[k]*flu.qbb[k]
Aggregate the amount of base flow released by all "soil type" HRUs and the "net precipitation" above water areas of type |SEE|. Water areas of type |SEE| are assumed to be directly connected with groundwater, but not with the stream network. This is modelled by adding their (positive or negative) "net input" (|NKor|-|EvI|) to the "percolation output" of the soil containing HRUs. Required control parameters: |Lnk| |NHRU| |FHRU| Required flux sequences: |QBB| |NKor| |EvI| Calculated state sequence: |QBGZ| Basic equation: :math:`QBGZ = \\Sigma(FHRU \\cdot QBB) + \\Sigma(FHRU \\cdot (NKor_{SEE}-EvI_{SEE}))` Examples: The first example shows that |QBGZ| is the area weighted sum of |QBB| from "soil type" HRUs like arable land (|ACKER|) and of |NKor|-|EvI| from water areas of type |SEE|. All other water areas (|WASSER| and |FLUSS|) and also sealed surfaces (|VERS|) have no impact on |QBGZ|: >>> from hydpy.models.lland import * >>> parameterstep() >>> nhru(6) >>> lnk(ACKER, ACKER, VERS, WASSER, FLUSS, SEE) >>> fhru(0.1, 0.2, 0.1, 0.1, 0.1, 0.4) >>> fluxes.qbb = 2., 4.0, 300.0, 300.0, 300.0, 300.0 >>> fluxes.nkor = 200.0, 200.0, 200.0, 200.0, 200.0, 20.0 >>> fluxes.evi = 100.0, 100.0, 100.0, 100.0, 100.0, 10.0 >>> model.calc_qbgz_v1() >>> states.qbgz qbgz(5.0) The second example shows that large evaporation values above a HRU of type |SEE| can result in negative values of |QBGZ|: >>> fluxes.evi[5] = 30 >>> model.calc_qbgz_v1() >>> states.qbgz qbgz(-3.0)
def apply(self, axes="gca"): if axes == "gca": axes = _pylab.gca() self.reset() lines = axes.get_lines() for l in lines: l.set_color(self.get_line_color(1)) l.set_mfc(self.get_face_color(1)) l.set_marker(self.get_marker(1)) l.set_mec(self.get_edge_color(1)) l.set_linestyle(self.get_linestyle(1)) _pylab.draw()
Applies the style cycle to the lines in the axes specified
def victims(self, filters=None, params=None): victim = self._tcex.ti.victim(None) for v in self.tc_requests.victims_from_tag( victim, self.name, filters=filters, params=params ): yield v
Gets all victims from a tag.
def stage(self, name): for stage in self.stages(): if stage.data.name == name: return stage
Method for searching specific stage by it's name. :param name: name of the stage to search. :return: found stage or None. :rtype: yagocd.resources.stage.StageInstance
def get_value(self, attribute, section, default=""): if not self.attribute_exists(attribute, section): return default if attribute in self.__sections[section]: value = self.__sections[section][attribute] elif foundations.namespace.set_namespace(section, attribute) in self.__sections[section]: value = self.__sections[section][foundations.namespace.set_namespace(section, attribute)] LOGGER.debug("> Attribute: '{0}', value: '{1}'.".format(attribute, value)) return value
Returns requested attribute value. Usage:: >>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \ "[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"] >>> sections_file_parser = SectionsFileParser() >>> sections_file_parser.content = content >>> sections_file_parser.parse() <foundations.parsers.SectionsFileParser object at 0x679302423> >>> sections_file_parser.get_value("Attribute 1", "Section A") u'Value A' :param attribute: Attribute name. :type attribute: unicode :param section: Section containing the searched attribute. :type section: unicode :param default: Default return value. :type default: object :return: Attribute value. :rtype: unicode
def export_survey_participant_list(self, instrument, event=None, format='json'): pl = self.__basepl(content='participantList', format=format) pl['instrument'] = instrument if event: pl['event'] = event return self._call_api(pl, 'exp_survey_participant_list')
Export the Survey Participant List Notes ----- The passed instrument must be set up as a survey instrument. Parameters ---------- instrument: str Name of instrument as seen in second column of Data Dictionary. event: str Unique event name, only used in longitudinal projects format: (json, xml, csv), json by default Format of returned data
def create_schema(self, schema): if schema not in self.schemas: sql = "CREATE SCHEMA " + schema self.execute(sql)
Create specified schema if it does not already exist
def _add_id_or_name(flat_path, element_pb, empty_allowed): id_ = element_pb.id name = element_pb.name if id_ == 0: if name == u"": if not empty_allowed: raise ValueError(_EMPTY_ELEMENT) else: flat_path.append(name) else: if name == u"": flat_path.append(id_) else: msg = _BAD_ELEMENT_TEMPLATE.format(id_, name) raise ValueError(msg)
Add the ID or name from an element to a list. :type flat_path: list :param flat_path: List of accumulated path parts. :type element_pb: :class:`._app_engine_key_pb2.Path.Element` :param element_pb: The element containing ID or name. :type empty_allowed: bool :param empty_allowed: Indicates if neither ID or name need be set. If :data:`False`, then **exactly** one of them must be. :raises: :exc:`ValueError` if 0 or 2 of ID/name are set (unless ``empty_allowed=True`` and 0 are set).
def shutdown(self): with self._lock: for cid in list(self.connections.keys()): if self.connections[cid].executing: raise ConnectionBusyError(cid) if self.connections[cid].locked: self.connections[cid].free() self.connections[cid].close() del self.connections[cid]
Forcefully shutdown the entire pool, closing all non-executing connections. :raises: ConnectionBusyError
def normalize(self, body): resource = body['data'] data = {'rtype': resource['type']} if 'attributes' in resource: attributes = resource['attributes'] attributes = self._normalize_attributes(attributes) data.update(attributes) if 'relationships' in resource: relationships = resource['relationships'] relationships = self._normalize_relationships(relationships) data.update(relationships) if resource.get('id'): data['rid'] = resource['id'] return data
Invoke the JSON API normalizer Perform the following: * add the type as a rtype property * flatten the payload * add the id as a rid property ONLY if present We don't need to vet the inputs much because the Parser has already done all the work. :param body: the already vetted & parsed payload :return: normalized dict
def find_deck_spawns(provider: Provider, prod: bool=True) -> Iterable[str]: pa_params = param_query(provider.network) if isinstance(provider, RpcNode): if prod: decks = (i["txid"] for i in provider.listtransactions("PAPROD")) else: decks = (i["txid"] for i in provider.listtransactions("PATEST")) if isinstance(provider, Cryptoid) or isinstance(provider, Explorer): if prod: decks = (i for i in provider.listtransactions(pa_params.P2TH_addr)) else: decks = (i for i in provider.listtransactions(pa_params.test_P2TH_addr)) return decks
find deck spawn transactions via Provider, it requires that Deck spawn P2TH were imported in local node or that remote API knows about P2TH address.
def labels(self, hs_dims=None, prune=False): if self.ca_as_0th: labels = self._cube.labels(include_transforms_for_dims=hs_dims)[1:] else: labels = self._cube.labels(include_transforms_for_dims=hs_dims)[-2:] if not prune: return labels def prune_dimension_labels(labels, prune_indices): labels = [label for label, prune in zip(labels, prune_indices) if not prune] return labels labels = [ prune_dimension_labels(dim_labels, dim_prune_inds) for dim_labels, dim_prune_inds in zip(labels, self._prune_indices(hs_dims)) ] return labels
Get labels for the cube slice, and perform pruning by slice.
def rollforward(self, date): if self.onOffset(date): return date else: return date + QuarterEnd(month=self.month)
Roll date forward to nearest end of quarter
def _expression_to_sql(expression, node, context): _expression_transformers = { expressions.LocalField: _transform_local_field_to_expression, expressions.Variable: _transform_variable_to_expression, expressions.Literal: _transform_literal_to_expression, expressions.BinaryComposition: _transform_binary_composition_to_expression, } expression_type = type(expression) if expression_type not in _expression_transformers: raise NotImplementedError( u'Unsupported compiler expression "{}" of type "{}" cannot be converted to SQL ' u'expression.'.format(expression, type(expression))) return _expression_transformers[expression_type](expression, node, context)
Recursively transform a Filter block predicate to its SQLAlchemy expression representation. Args: expression: expression, the compiler expression to transform. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy Expression equivalent to the passed compiler expression.
def convert_to_xml(cls, degrees): if degrees < 0.0: degrees %= -360 degrees += 360 elif degrees > 0.0: degrees %= 360 return str(int(round(degrees * cls.DEGREE_INCREMENTS)))
Convert signed angle float like -427.42 to int 60000 per degree. Value is normalized to a positive value less than 360 degrees.
def count_delayed_jobs(cls, names): return sum([queue.delayed.zcard() for queue in cls.get_all(names)])
Return the number of all delayed jobs in queues with the given names
def log_likelihood(self): if self._log_likelihood is None: self._log_likelihood = logpdf(x=self.y, cov=self.S) return self._log_likelihood
log-likelihood of the last measurement.
def list(): kbs = [] ret = _pshell_json('Get-HotFix | Select HotFixID') for item in ret: kbs.append(item['HotFixID']) return kbs
Get a list of updates installed on the machine Returns: list: A list of installed updates CLI Example: .. code-block:: bash salt '*' wusa.list
def _get_match(self, prefix): if _cpr_response_re.match(prefix): return Keys.CPRResponse elif _mouse_event_re.match(prefix): return Keys.Vt100MouseEvent try: return ANSI_SEQUENCES[prefix] except KeyError: return None
Return the key that maps to this prefix.
def subclass_exception(name, parents, module, attached_to=None): class_dict = {'__module__': module} if attached_to is not None: def __reduce__(self): return (unpickle_inner_exception, (attached_to, name), self.args) def __setstate__(self, args): self.args = args class_dict['__reduce__'] = __reduce__ class_dict['__setstate__'] = __setstate__ return type(name, parents, class_dict)
Create exception subclass. If 'attached_to' is supplied, the exception will be created in a way that allows it to be pickled, assuming the returned exception class will be added as an attribute to the 'attached_to' class.
def parse_model_group(path, group): context = FilePathContext(path) for reaction_id in group.get('reactions', []): yield reaction_id for reaction_id in parse_model_group_list( context, group.get('groups', [])): yield reaction_id
Parse a structured model group as obtained from a YAML file Path can be given as a string or a context.
def on_selection_changed(self, sel): m, self.editing_iter = sel.get_selected() if self.editing_iter: self.editing_model = m[self.editing_iter][0] self.show_curr_model_view(self.editing_model, False) else: self.view.remove_currency_view() return
The user changed selection
def delay(self, params, now=None): if now is None: now = time.time() if not self.last: self.last = now elif now < self.last: now = self.last leaked = now - self.last self.last = now self.level = max(self.level - leaked, 0) difference = self.level + self.limit.cost - self.limit.unit_value if difference >= self.eps: self.next = now + difference return difference self.level += self.limit.cost self.next = now return None
Determine delay until next request.
def NormalizePath(path, sep="/"): if not path: return sep path = SmartUnicode(path) path_list = path.split(sep) if path_list[0] in [".", "..", ""]: path_list.pop(0) i = 0 while True: list_len = len(path_list) for i in range(i, len(path_list)): if path_list[i] == "." or not path_list[i]: path_list.pop(i) break elif path_list[i] == "..": path_list.pop(i) if (i == 1 and path_list[0]) or i > 1: i -= 1 path_list.pop(i) break if len(path_list) == list_len: return sep + sep.join(path_list)
A sane implementation of os.path.normpath. The standard implementation treats leading / and // as different leading to incorrect normal forms. NOTE: Its ok to use a relative path here (without leading /) but any /../ will still be removed anchoring the path at the top level (e.g. foo/../../../../bar => bar). Args: path: The path to normalize. sep: Separator used. Returns: A normalized path. In this context normalized means that all input paths that would result in the system opening the same physical file will produce the same normalized path.
def get_child_ids(self): if self.has_magic_children(): if self._child_parts is None: self.generate_children() child_ids = list() for part in self._child_parts: child_ids.append(part.get_id()) return IdList(child_ids, runtime=self.my_osid_object._runtime, proxy=self.my_osid_object._runtime) raise IllegalState()
gets the ids for the child parts
def tox_configure(config): if 'TRAVIS' not in os.environ: return ini = config._cfg if 'TOXENV' not in os.environ and not config.option.env: envlist = detect_envlist(ini) undeclared = set(envlist) - set(config.envconfigs) if undeclared: print('Matching undeclared envs is deprecated. Be sure all the ' 'envs that Tox should run are declared in the tox config.', file=sys.stderr) autogen_envconfigs(config, undeclared) config.envlist = envlist if override_ignore_outcome(ini): for envconfig in config.envconfigs.values(): envconfig.ignore_outcome = False if config.option.travis_after: print('The after all feature has been deprecated. Check out Travis\' ' 'build stages, which are a better solution. ' 'See https://tox-travis.readthedocs.io/en/stable/after.html ' 'for more details.', file=sys.stderr)
Check for the presence of the added options.
def remove(self, row): self._rows.remove(row) self._deleted_rows.add(row)
Removes the row from the list.
def process(self, job): sandbox = self.sandboxes.pop(0) try: with Worker.sandbox(sandbox): job.sandbox = sandbox job.process() finally: self.greenlets.pop(job.jid, None) self.sandboxes.append(sandbox)
Process a job
def cat_data(data_kw): if len(data_kw) == 0: return pd.DataFrame() return pd.DataFrame(pd.concat([ data.assign(ticker=ticker).set_index('ticker', append=True) .unstack('ticker').swaplevel(0, 1, axis=1) for ticker, data in data_kw.items() ], axis=1))
Concatenate data with ticker as sub column index Args: data_kw: key = ticker, value = pd.DataFrame Returns: pd.DataFrame Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2}) >>> sample.columns MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['ticker', None]) >>> r = sample.transpose().iloc[:, :2] >>> r.index.names = (None, None) >>> r 2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00 BHP AU price 31.08 31.10 volume 10,166.00 69,981.00 RIO AU price 70.81 70.78 volume 4,749.00 6,762.00
def lemmatize(self): _lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer()) _raw = " ".join(self) + "." _lemmas = _lemmatizer.lemmatize(_raw) return self.__class__([Word(l, t) for l, t in _lemmas])
Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property.
def get_create_batch_env_fun(batch_env_fn, time_limit): def create_env_fun(game_name=None, sticky_actions=None): del game_name, sticky_actions batch_env = batch_env_fn(in_graph=False) batch_env = ResizeBatchObservation(batch_env) batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit) return batch_env return create_env_fun
Factory for dopamine environment initialization function. Args: batch_env_fn: function(in_graph: bool) -> batch environment. time_limit: time steps limit for environment. Returns: function (with optional, unused parameters) initializing environment.
def name(self): python = self._python if self._python.startswith('pypy'): python = python[2:] return environment.get_env_name(self.tool_name, python, self._requirements)
Get a name to uniquely identify this environment.
def context(self, id): if id not in self.circuits: self.circuits[id] = self.factory(self.clock, self.log.getChild(id), self.error_types, self.maxfail, self.reset_timeout, self.time_unit, backoff_cap=self.backoff_cap, with_jitter=self.with_jitter) return self.circuits[id]
Return a circuit breaker for the given ID.
def id(opts): r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True) return r['dict']['id'].encode('ascii', 'ignore')
Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion
def down(self, migration_id): if not self.check_directory(): return for migration in self.get_migrations_to_down(migration_id): logger.info('Rollback migration %s' % migration.filename) migration_module = self.load_migration_file(migration.filename) if hasattr(migration_module, 'down'): migration_module.down(self.db) else: logger.info('No down method on %s' % migration.filename) self.collection.remove({'filename': migration.filename})
Rollback to migration.
def nt_yielder(self, graph, size): for grp in self.make_batch(size, graph): tmpg = Graph() tmpg += grp yield (len(tmpg), tmpg.serialize(format='nt'))
Yield n sized ntriples for a given graph. Used in sending chunks of data to the VIVO SPARQL API.
def interpolate(self): self.latitude = self._interp(self.lat_tiepoint) self.longitude = self._interp(self.lon_tiepoint) return self.latitude, self.longitude
Do the interpolation and return resulting longitudes and latitudes.
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None, seq_align="start", pad_value="N", encode_type="one_hot"): if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] if isinstance(seq_vec, str): raise ValueError("seq_vec should be an iterable returning " + "strings not a string itself") assert len(vocab[0]) == len(pad_value) assert pad_value in neutral_vocab assert encode_type in ["one_hot", "token"] seq_vec = pad_sequences(seq_vec, maxlen=maxlen, align=seq_align, value=pad_value) if encode_type == "one_hot": arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab)) for i, seq in enumerate(seq_vec)] elif encode_type == "token": arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec] return np.stack(arr_list)
Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`.
def __on_message(self, msg): msgtype = msg['type'] msgfrom = msg['from'] if msgtype == 'groupchat': if self._nick == msgfrom.resource: return elif msgtype not in ('normal', 'chat'): return self.__callback(msg)
XMPP message received
def make_token(cls, ephemeral_token: 'RedisEphemeralTokens') -> str: value = ephemeral_token.key if ephemeral_token.scope: value += ''.join(ephemeral_token.scope) return get_hmac(cls.KEY_SALT + ephemeral_token.salt, value)[::2]
Returns a token to be used x number of times to allow a user account to access certain resource.
def _get_marker_output(self, asset_quantities, metadata): payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload() script = openassets.protocol.MarkerOutput.build_script(payload) return bitcoin.core.CTxOut(0, script)
Creates a marker output. :param list[int] asset_quantities: The asset quantity list. :param bytes metadata: The metadata contained in the output. :return: An object representing the marker output. :rtype: TransactionOutput
def return_single_real_id_base(dbpath, set_object, object_id): engine = create_engine('sqlite:////' + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() tmp_object = session.query(set_object).get(object_id) session.close() return tmp_object.real_id
Generic function which returns a real_id string of an object specified by the object_id Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database object_id : int, id of object in database Returns ------- real_id : string
def log_url (self, url_data, priority=None): self.xml_starttag(u'url') self.xml_tag(u'loc', url_data.url) if url_data.modified: self.xml_tag(u'lastmod', self.format_modified(url_data.modified, sep="T")) self.xml_tag(u'changefreq', self.frequency) self.xml_tag(u'priority', "%.2f" % priority) self.xml_endtag(u'url') self.flush()
Log URL data in sitemap format.
def _get_bufsize_linux(iface): ret = {'result': False} cmd = '/sbin/ethtool -g {0}'.format(iface) out = __salt__['cmd.run'](cmd) pat = re.compile(r'^(.+):\s+(\d+)$') suffix = 'max-' for line in out.splitlines(): res = pat.match(line) if res: ret[res.group(1).lower().replace(' ', '-') + suffix] = int(res.group(2)) ret['result'] = True elif line.endswith('maximums:'): suffix = '-max' elif line.endswith('settings:'): suffix = '' if not ret['result']: parts = out.split() if parts[0].endswith('sh:'): out = ' '.join(parts[1:]) ret['comment'] = out return ret
Return network interface buffer information using ethtool
def monthly_build_list_regex(self): return r'nightly/%(YEAR)s/%(MONTH)s/' % { 'YEAR': self.date.year, 'MONTH': str(self.date.month).zfill(2)}
Return the regex for the folder containing builds of a month.
def precondition(precond): def decorator(f): def decorated(*args): if len(args) > 2: raise TypeError('%s takes only 1 argument (or 2 for instance methods)' % f.__name__) try: instance, data = args if not isinstance(instance, Pipe): raise TypeError('%s is not a valid pipe instance' % instance) except ValueError: data = args[0] try: precond(data) except UnmetPrecondition: return data else: return f(*args) return decorated return decorator
Runs the callable responsible for making some assertions about the data structure expected for the transformation. If the precondition is not achieved, a UnmetPrecondition exception must be raised, and then the transformation pipe is bypassed.
def get_directory_as_zip(self, remote_path, local_file): remote_path = self._normalize_path(remote_path) url = self.url + 'index.php/apps/files/ajax/download.php?dir=' \ + parse.quote(remote_path) res = self._session.get(url, stream=True) if res.status_code == 200: if local_file is None: local_file = os.path.basename(remote_path) file_handle = open(local_file, 'wb', 8192) for chunk in res.iter_content(8192): file_handle.write(chunk) file_handle.close() return True elif res.status_code >= 400: raise HTTPResponseError(res) return False
Downloads a remote directory as zip :param remote_path: path to the remote directory to download :param local_file: path and name of the target local file :returns: True if the operation succeeded, False otherwise :raises: HTTPResponseError in case an HTTP error status was returned
def get_max_id(self, object_type, role): if object_type == 'user': objectclass = 'posixAccount' ldap_attr = 'uidNumber' elif object_type == 'group': objectclass = 'posixGroup' ldap_attr = 'gidNumber' else: raise ldap_tools.exceptions.InvalidResult('Unknown object type') minID, maxID = Client.__set_id_boundary(role) filter = [ "(objectclass={})".format(objectclass), "({}>={})".format(ldap_attr, minID) ] if maxID is not None: filter.append("({}<={})".format(ldap_attr, maxID)) id_list = self.search(filter, [ldap_attr]) if id_list == []: id = minID else: if object_type == 'user': id = max([i.uidNumber.value for i in id_list]) + 1 elif object_type == 'group': id = max([i.gidNumber.value for i in id_list]) + 1 else: raise ldap_tools.exceptions.InvalidResult('Unknown object') return id
Get the highest used ID.
def _pick_unused_port_without_server(): rng = random.Random() for _ in range(10): port = int(rng.randrange(15000, 25000)) if is_port_free(port): _random_ports.add(port) return port for _ in range(10): port = bind(0, _PROTOS[0][0], _PROTOS[0][1]) if port and bind(port, _PROTOS[1][0], _PROTOS[1][1]): _random_ports.add(port) return port raise NoFreePortFoundError()
Pick an available network port without the help of a port server. This code ensures that the port is available on both TCP and UDP. This function is an implementation detail of PickUnusedPort(), and should not be called by code outside of this module. Returns: A port number that is unused on both TCP and UDP. Raises: NoFreePortFoundError: No free port could be found.
def readTableFromCSV(f, dialect="excel"): rowNames = [] columnNames = [] matrix = [] first = True for row in csv.reader(f, dialect): if first: columnNames = row[1:] first = False else: rowNames.append(row[0]) matrix.append([float(c) for c in row[1:]]) return Table(rowNames, columnNames, matrix)
Reads a table object from given CSV file.
def get_encoding(input_string, guesses=None, is_html=False): converted = UnicodeDammit(input_string, override_encodings=[guesses] if guesses else [], is_html=is_html) return converted.original_encoding
Return the encoding of a byte string. Uses bs4 UnicodeDammit. :param string input_string: Encoded byte string. :param list[string] guesses: (Optional) List of encoding guesses to prioritize. :param bool is_html: Whether the input is HTML.
def delete_account_metadata(self, prefix=None): if prefix is None: prefix = ACCOUNT_META_PREFIX curr_meta = self.get_account_metadata(prefix=prefix) for ckey in curr_meta: curr_meta[ckey] = "" new_meta = _massage_metakeys(curr_meta, prefix) uri = "/" resp, resp_body = self.api.method_post(uri, headers=new_meta) return 200 <= resp.status_code <= 299
Removes all metadata matching the specified prefix from the account. By default, the standard account metadata prefix ('X-Account-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
def __read_chunk(self, start, size): for _retries in range(3): command = 1504 command_string = pack('<ii', start, size) if self.tcp: response_size = size + 32 else: response_size = 1024 + 8 cmd_response = self.__send_command(command, command_string, response_size) data = self.__recieve_chunk() if data is not None: return data else: raise ZKErrorResponse("can't read chunk %i:[%i]" % (start, size))
read a chunk from buffer
def stop(self): if self.server.eio.async_mode == 'threading': func = flask.request.environ.get('werkzeug.server.shutdown') if func: func() else: raise RuntimeError('Cannot stop unknown web server') elif self.server.eio.async_mode == 'eventlet': raise SystemExit elif self.server.eio.async_mode == 'gevent': self.wsgi_server.stop()
Stop a running SocketIO web server. This method must be called from a HTTP or SocketIO handler function.
def get_protocol(handle: Handle, want_v2: bool) -> Protocol: force_v1 = int(os.environ.get("TREZOR_PROTOCOL_V1", 1)) if want_v2 and not force_v1: return ProtocolV2(handle) else: return ProtocolV1(handle)
Make a Protocol instance for the given handle. Each transport can have a preference for using a particular protocol version. This preference is overridable through `TREZOR_PROTOCOL_V1` environment variable, which forces the library to use V1 anyways. As of 11/2018, no devices support V2, so we enforce V1 here. It is still possible to set `TREZOR_PROTOCOL_V1=0` and thus enable V2 protocol for transports that ask for it (i.e., USB transports for Trezor T).
def _post_build(self, module, encoding): module.file_encoding = encoding self._manager.cache_module(module) for from_node in module._import_from_nodes: if from_node.modname == "__future__": for symbol, _ in from_node.names: module.future_imports.add(symbol) self.add_from_names_to_locals(from_node) for delayed in module._delayed_assattr: self.delayed_assattr(delayed) if self._apply_transforms: module = self._manager.visit_transforms(module) return module
Handles encoding and delayed nodes after a module has been built
def get_template(self, path): if self.options['debug'] and self.options['cache_size']: return self.cache.get(path, self.cache_template(path)) return self.load_template(path)
Load and compile template.
def get_feature_report(self, report_id, length): self._check_device_status() bufp = ffi.new("unsigned char[]", length+1) buf = ffi.buffer(bufp, length+1) buf[0] = report_id rv = hidapi.hid_get_feature_report(self._device, bufp, length+1) if rv == -1: raise IOError("Failed to get feature report from HID device: {0}" .format(self._get_last_error_string())) return buf[1:]
Get a feature report from the device. :param report_id: The Report ID of the report to be read :type report_id: int :return: The report data :rtype: str/bytes
def GetGRRVersionString(self): client_info = self.startup_info.client_info client_name = client_info.client_description or client_info.client_name if client_info.client_version > 0: client_version = str(client_info.client_version) else: client_version = _UNKNOWN_GRR_VERSION return " ".join([client_name, client_version])
Returns the client installation-name and GRR version as a string.
def parse_item(self, location: str, item_type: Type[T], item_name_for_log: str = None, file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> T: item_name_for_log = item_name_for_log or '' check_var(item_name_for_log, var_types=str, var_name='item_name_for_log') if len(item_name_for_log) > 0: item_name_for_log = item_name_for_log + ' ' self.logger.debug('**** Starting to parse single object ' + item_name_for_log + 'of type <' + get_pretty_type_str(item_type) + '> at location ' + location + ' ****') return self._parse__item(item_type, location, file_mapping_conf, options=options)
Main method to parse an item of type item_type :param location: :param item_type: :param item_name_for_log: :param file_mapping_conf: :param options: :return:
def create_token(self, request, refresh_token=False, **kwargs): if "save_token" in kwargs: warnings.warn("`save_token` has been deprecated, it was not called internally." "If you do, call `request_validator.save_token()` instead.", DeprecationWarning) if callable(self.expires_in): expires_in = self.expires_in(request) else: expires_in = self.expires_in request.expires_in = expires_in token = { 'access_token': self.token_generator(request), 'expires_in': expires_in, 'token_type': 'Bearer', } if request.scopes is not None: token['scope'] = ' '.join(request.scopes) if refresh_token: if (request.refresh_token and not self.request_validator.rotate_refresh_token(request)): token['refresh_token'] = request.refresh_token else: token['refresh_token'] = self.refresh_token_generator(request) token.update(request.extra_credentials or {}) return OAuth2Token(token)
Create a BearerToken, by default without refresh token. :param request: OAuthlib request. :type request: oauthlib.common.Request :param refresh_token:
def stop(self): if self.container_id is None: raise Exception('No Docker Selenium container was running') check_call(['docker', 'stop', self.container_id]) self.container_id = None
Stop the Docker container
def log_time(func): @functools.wraps(func) def _execute(*args, **kwargs): func_name = get_method_name(func) timer = Timer() log_message(func_name, "has started") with timer: result = func(*args, **kwargs) seconds = "{:.3f}".format(timer.elapsed_time()) log_message(func_name, "has finished. Execution time:", seconds, "s") return result return _execute
Executes function and logs time :param func: function to call :return: function result
def srun_nodes(self): count = self.execution.get('srun_nodes', 0) if isinstance(count, six.string_types): tag = count count = 0 elif isinstance(count, SEQUENCES): return count else: assert isinstance(count, int) tag = self.tag nodes = self._srun_nodes(tag, count) if 'srun_nodes' in self.execution: self.execution['srun_nodes'] = nodes self.execution['srun_nodes_count'] = len(nodes) return nodes
Get list of nodes where to execute the command
def processor(ctx, processor_cls, process_time_limit, enable_stdout_capture=True, get_object=False): g = ctx.obj Processor = load_cls(None, None, processor_cls) processor = Processor(projectdb=g.projectdb, inqueue=g.fetcher2processor, status_queue=g.status_queue, newtask_queue=g.newtask_queue, result_queue=g.processor2result, enable_stdout_capture=enable_stdout_capture, process_time_limit=process_time_limit) g.instances.append(processor) if g.get('testing_mode') or get_object: return processor processor.run()
Run Processor.
def unregister(self, label: str) -> None: self.unregister_encoder(label) self.unregister_decoder(label)
Unregisters the entries in the encoder and decoder registries which have the label ``label``.
def eval(thunk, env): key = Activation.key(thunk, env) if Activation.activated(key): raise exceptions.RecursionError('Reference cycle') with Activation(key): return eval_cache.get(key, thunk.eval, env)
Evaluate a thunk in an environment. Will defer the actual evaluation to the thunk itself, but adds two things: caching and recursion detection. Since we have to use a global evaluation stack (because there is a variety of functions that may be invoked, not just eval() but also __getitem__, and not all of them can pass along a context object), GCL evaluation is not thread safe. With regard to schemas: - A schema can be passed in from outside. The returned object will be validated to see that it conforms to the schema. The schema will be attached to the value if possible. - Some objects may contain their own schema, such as tuples. This would be out of scope of the eval() function, were it not for: - Schema validation can be disabled in an evaluation call stack. This is useful if we're evaluating a tuple only for its schema information. At that point, we're not interested if the object is value-complete.
def _process_event(self, event, tagged_data): event_type = event.WhichOneof('what') if event_type == 'summary': for value in event.summary.value: value = data_compat.migrate_value(value) tag, metadata, values = tagged_data.get(value.tag, (None, None, [])) values.append((event.step, event.wall_time, value.tensor)) if tag is None: tagged_data[value.tag] = sqlite_writer.TagData( value.tag, value.metadata, values) elif event_type == 'file_version': pass elif event_type == 'session_log': if event.session_log.status == event_pb2.SessionLog.START: pass elif event_type in ('graph_def', 'meta_graph_def'): pass elif event_type == 'tagged_run_metadata': pass
Processes a single tf.Event and records it in tagged_data.
def _get_reader(self, network_reader): with (yield from self._lock): if self._reader_process is None: self._reader_process = network_reader if self._reader: if self._reader_process == network_reader: self._current_read = asyncio.async(self._reader.read(READ_SIZE)) return self._current_read return None
Get a reader or None if another reader is already reading.
def cleanup(self, cluster): if self._storage_path and os.path.exists(self._storage_path): fname = '%s.%s' % (AnsibleSetupProvider.inventory_file_ending, cluster.name) inventory_path = os.path.join(self._storage_path, fname) if os.path.exists(inventory_path): try: os.unlink(inventory_path) if self._storage_path_tmp: if len(os.listdir(self._storage_path)) == 0: shutil.rmtree(self._storage_path) except OSError as ex: log.warning( "AnsibileProvider: Ignoring error while deleting " "inventory file %s: %s", inventory_path, ex)
Deletes the inventory file used last recently used. :param cluster: cluster to clear up inventory file for :type cluster: :py:class:`elasticluster.cluster.Cluster`
def pil2tensor(image:Union[NPImage,NPArray],dtype:np.dtype)->TensorImage: "Convert PIL style `image` array to torch style image tensor." a = np.asarray(image) if a.ndim==2 : a = np.expand_dims(a,2) a = np.transpose(a, (1, 0, 2)) a = np.transpose(a, (2, 1, 0)) return torch.from_numpy(a.astype(dtype, copy=False) )
Convert PIL style `image` array to torch style image tensor.
def get_explicit_resnorms(self, indices=None): res = self.get_explicit_residual(indices) linear_system = self._deflated_solver.linear_system Mres = linear_system.M * res resnorms = numpy.zeros(res.shape[1]) for i in range(resnorms.shape[0]): resnorms[i] = utils.norm(res[:, [i]], Mres[:, [i]], ip_B=linear_system.ip_B) return resnorms
Explicitly computes the Ritz residual norms.
def metadata_get(self, path, cached=True): try: value = graceful_chain_get(self.inspect(cached=cached).response, *path) except docker.errors.NotFound: logger.warning("object %s is not available anymore", self) raise NotAvailableAnymore() return value
get metadata from inspect, specified by path :param path: list of str :param cached: bool, use cached version of inspect if available
def _GetHeader(self): header = [] for value in self.values: try: header.append(value.Header()) except SkipValue: continue return header
Returns header.
def to_serializable_value(self): return { name: field.to_serializable_value() for name, field in self.value.__dict__.items() if isinstance(field, Field) and self.value }
Run through all fields of the object and parse the values :return: :rtype: dict
def new_document(self): doc = Document(self, None) doc.create() super(CouchDatabase, self).__setitem__(doc['_id'], doc) return doc
Creates a new, empty document in the remote and locally cached database, auto-generating the _id. :returns: Document instance corresponding to the new document in the database
def get_methodnames(self, node): nodekey = self.get_nodekey(node) prefix = self._method_prefix if isinstance(nodekey, self.GeneratorType): for nodekey in nodekey: yield self._method_prefix + nodekey else: yield self._method_prefix + nodekey
Given a node, generate all names for matching visitor methods.
def button_with_label(self, description, assistants=None): btn = self.create_button() label = self.create_label(description) if assistants is not None: h_box = self.create_box(orientation=Gtk.Orientation.VERTICAL) h_box.pack_start(label, False, False, 0) label_ass = self.create_label( assistants, justify=Gtk.Justification.LEFT ) label_ass.set_alignment(0, 0) h_box.pack_start(label_ass, False, False, 12) btn.add(h_box) else: btn.add(label) return btn
Function creates a button with lave. If assistant is specified then text is aligned
def get_field(self, page, language, initial=None): if self.parsed: help_text = _('Note: This field is evaluated as template code.') else: help_text = '' widget = self.get_widget(page, language) return self.field( widget=widget, initial=initial, help_text=help_text, required=False)
The field that will be shown within the admin.
def add_listener(self, event_name: str, listener: Callable): self.listeners[event_name].append(listener) return self
Add a listener.
def downloads_per_week(self): if len(self.cache_dates) < 7: logger.error("Only have %d days of data; cannot calculate " "downloads per week", len(self.cache_dates)) return None count, _ = self._downloads_for_num_days(7) logger.debug("Downloads per week = %d", count) return count
Return the number of downloads in the last 7 days. :return: number of downloads in the last 7 days; if we have less than 7 days of data, returns None. :rtype: int
def delete_user_avatar(self, username, avatar): params = {'username': username} url = self._get_url('user/avatar/' + avatar) return self._session.delete(url, params=params)
Delete a user's avatar. :param username: the user to delete the avatar from :param avatar: ID of the avatar to remove