code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def validate_list_of_identical_dicts(self, list_of_dicts): hashes = [] for _dict in list_of_dicts: hashes.append(hash(frozenset(_dict.items()))) self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: self.log.debug('Dicts within list are identical') else: return 'Dicts within list are not identical' return None
Check that all dicts within a list are identical.
def new_profile(self): dir = os.path.join(QgsApplication.qgisSettingsDirPath(), 'inasafe', 'minimum_needs') file_name, __ = QFileDialog.getSaveFileName( self, self.tr('Create a minimum needs profile'), expanduser(dir), self.tr('JSON files (*.json *.JSON)'), options=QFileDialog.DontUseNativeDialog) if not file_name: return file_name = basename(file_name) if self.profile_combo.findText(file_name) == -1: minimum_needs = { 'resources': [], 'provenance': '', 'profile': file_name} self.minimum_needs.update_minimum_needs(minimum_needs) self.minimum_needs.save_profile(file_name) self.profile_combo.addItem(file_name) self.clear_resource_list() self.profile_combo.setCurrentIndex( self.profile_combo.findText(file_name)) else: self.profile_combo.setCurrentIndex( self.profile_combo.findText(file_name)) self.select_profile_by_name(file_name)
Create a new profile by name.
def terminal(self, out=None, border=None): if out is None and sys.platform == 'win32': try: writers.write_terminal_win(self.matrix, self._version, border) except OSError: writers.write_terminal(self.matrix, self._version, sys.stdout, border) else: writers.write_terminal(self.matrix, self._version, out or sys.stdout, border)
\ Serializes the matrix as ANSI escape code. :param out: Filename or a file-like object supporting to write text. If ``None`` (default), the matrix is written to ``sys.stdout``. :param int border: Integer indicating the size of the quiet zone. If set to ``None`` (default), the recommended border size will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes).
def generate_values(self, *args, **kwargs): sample_size = kwargs.get('size', self.size) f = self.instantiate_distribution_function(self.module_name, self.distribution_name) distribution_function = partial(f, *self.random_function_params, size=sample_size) if self.sample_mean_value: sample = np.full(sample_size, self.get_mean(distribution_function)) else: sample = distribution_function() return sample
Generate a sample of values by sampling from a distribution. The size of the sample can be overriden with the 'size' kwarg. If `self.sample_mean_value == True` the sample will contain "size" times the mean value. :param args: :param kwargs: :return: sample as vector of given size
def run(self, context=None, stdout=None, stderr=None): "Like execute, but records a skip if the should_skip method returns True." if self.should_skip(): self._record_skipped_example(self.formatter) self.num_skipped += 1 else: self.execute(context, stdout, stderr) return self.num_successes, self.num_failures, self.num_skipped
Like execute, but records a skip if the should_skip method returns True.
def subsequent_mask(size: int, device: str = 'cpu') -> torch.Tensor: mask = torch.tril(torch.ones(size, size, device=device, dtype=torch.int32)).unsqueeze(0) return mask
Mask out subsequent positions.
def _iter_info(self, niter, level=logging.INFO): max_mis = self.iter_mis[niter - 1] msg = ' Iter {:<d}. max mismatch = {:8.7f}'.format(niter, max_mis) logger.info(msg)
Log iteration number and mismatch Parameters ---------- level logging level Returns ------- None
def __taint_move(self, instr): op0_taint = self.get_operand_taint(instr.operands[0]) self.set_operand_taint(instr.operands[2], op0_taint)
Taint registers move instruction.
def read_int64(self, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.unpack('%sq' % endian, 8)
Read 8 bytes as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
def send_signal(self, s): self._get_signal_event(s) pid = self.get_pid() if not pid: raise ValueError('Daemon is not running.') os.kill(pid, s)
Send a signal to the daemon process. The signal must have been enabled using the ``signals`` parameter of :py:meth:`Service.__init__`. Otherwise, a ``ValueError`` is raised.
def created(self): timestamp, current = self._created if timestamp.endswith('ago'): quantity, kind, ago = timestamp.split() quantity = int(quantity) if 'sec' in kind: current -= quantity elif 'min' in kind: current -= quantity * 60 elif 'hour' in kind: current -= quantity * 60 * 60 return datetime.datetime.fromtimestamp(current) current = datetime.datetime.fromtimestamp(current) timestamp = timestamp.replace( 'Y-day', str(current.date() - datetime.timedelta(days=1))) timestamp = timestamp.replace('Today', current.date().isoformat()) try: return dateutil.parser.parse(timestamp) except: return current
Attempt to parse the human readable torrent creation datetime.
def command(self, request_type, uri, payload): self.command_count += 1 if payload is None: payload = {} message = { 'id': "{}_{}".format(type, self.command_count), 'type': request_type, 'uri': "ssap://{}".format(uri), 'payload': payload, } self.last_response = None try: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop)) finally: loop.close()
Build and send a command.
def logline_timestamp_comparator(t1, t2): dt1 = _parse_logline_timestamp(t1) dt2 = _parse_logline_timestamp(t2) for u1, u2 in zip(dt1, dt2): if u1 < u2: return -1 elif u1 > u2: return 1 return 0
Comparator for timestamps in logline format. Args: t1: Timestamp in logline format. t2: Timestamp in logline format. Returns: -1 if t1 < t2; 1 if t1 > t2; 0 if t1 == t2.
def put(self, endpoint: str, **kwargs) -> dict: return self._request('PUT', endpoint, **kwargs)
HTTP PUT operation to API endpoint.
def destroy(self, request, pk=None): org = self.get_object() org.archived = True org.save() return Response(status=status.HTTP_204_NO_CONTENT)
For DELETE actions, archive the organization, don't delete.
def run(self, **kwargs): super().run(**kwargs) scheduler = self.scheduler_plugins[self.active_scheduler]() if not kwargs['no_daemon']: self.log.info('Starting {} worker with {} threads checking for new messages every {} seconds'.format( scheduler.name, kwargs['threads'], kwargs['delay'] )) for i in range(kwargs['threads']): thd = threading.Thread( target=self.execute_worker_thread, args=(scheduler.execute_worker, kwargs['delay']) ) thd.start() else: self.log.info('Starting {} worker for a single non-daemon execution'.format( scheduler.name )) scheduler.execute_worker()
Execute the worker thread. Returns: `None`
def get_appliance(self, id_or_uri, fields=''): uri = self.URI + '/image-streamer-appliances/' + extract_id_from_uri(id_or_uri) if fields: uri += '?fields=' + fields return self._client.get(uri)
Gets the particular Image Streamer resource based on its ID or URI. Args: id_or_uri: Can be either the Os Deployment Server ID or the URI fields: Specifies which fields should be returned in the result. Returns: dict: Image Streamer resource.
def _get_cached_response_from_django_cache(key): if TieredCache._should_force_django_cache_miss(): return CachedResponse(is_found=False, key=key, value=None) cached_value = django_cache.get(key, _CACHE_MISS) is_found = cached_value is not _CACHE_MISS return CachedResponse(is_found, key, cached_value)
Retrieves a CachedResponse for the given key from the django cache. If the request was set to force cache misses, then this will always return a cache miss response. Args: key (string) Returns: A CachedResponse with is_found status and value.
def reset(self): self.remaining_cycles = self.initial_training_cycles self.needle_index = random.randrange(self.input_size)
Reset the scenario, starting it over for a new run. Usage: if not scenario.more(): scenario.reset() Arguments: None Return: None
def read_dbf(dbf_path, index = None, cols = False, incl_index = False): db = ps.open(dbf_path) if cols: if incl_index: cols.append(index) vars_to_read = cols else: vars_to_read = db.header data = dict([(var, db.by_col(var)) for var in vars_to_read]) if index: index = db.by_col(index) db.close() return pd.DataFrame(data, index=index) else: db.close() return pd.DataFrame(data)
Read a dbf file as a pandas.DataFrame, optionally selecting the index variable and which columns are to be loaded. __author__ = "Dani Arribas-Bel <darribas@asu.edu> " ... Arguments --------- dbf_path : str Path to the DBF file to be read index : str Name of the column to be used as the index of the DataFrame cols : list List with the names of the columns to be read into the DataFrame. Defaults to False, which reads the whole dbf incl_index : Boolean If True index is included in the DataFrame as a column too. Defaults to False Returns ------- df : DataFrame pandas.DataFrame object created
def _handle_expander_message(self, data): msg = ExpanderMessage(data) self._update_internal_states(msg) self.on_expander_message(message=msg) return msg
Handle expander messages. :param data: expander message to parse :type data: string :returns: :py:class:`~alarmdecoder.messages.ExpanderMessage`
def parse_restriction_dist(self, f): parsed_data = dict() firstline = True for l in f['f']: if firstline: firstline = False continue s = l.split("\t") if len(s) > 1: nuc = float(s[0].strip()) v1 = float(s[1].strip()) v2 = float(s[2].strip()) v = v1 + v2 parsed_data.update({nuc:v}) return parsed_data
Parse HOMER tagdirectory petagRestrictionDistribution file.
def bulk_recover(workers, lbn, profile='default'): ret = {} if isinstance(workers, six.string_types): workers = workers.split(',') for worker in workers: try: ret[worker] = worker_recover(worker, lbn, profile) except Exception: ret[worker] = False return ret
Recover all the given workers in the specific load balancer CLI Examples: .. code-block:: bash salt '*' modjk.bulk_recover node1,node2,node3 loadbalancer1 salt '*' modjk.bulk_recover node1,node2,node3 loadbalancer1 other-profile salt '*' modjk.bulk_recover ["node1","node2","node3"] loadbalancer1 salt '*' modjk.bulk_recover ["node1","node2","node3"] loadbalancer1 other-profile
def list_commands(self, page_size=None): params = {} if page_size is not None: params['limit'] = page_size return pagination.Iterator( client=self._client, path='/mdb/{}/commands'.format(self._instance), params=params, response_class=mdb_pb2.ListCommandsResponse, items_key='command', item_mapper=Command, )
Lists the commands visible to this client. Commands are returned in lexicographical order. :rtype: :class:`.Command` iterator
def delete_group(self, group_id, force=False): params = {'force': force} response = self._do_request( 'DELETE', '/v2/groups/{group_id}'.format(group_id=group_id), params=params) return response.json()
Stop and destroy a group. :param str group_id: group ID :param bool force: apply even if a deployment is in progress :returns: a dict containing the deleted version :rtype: dict
def _sanitize_title(self, title): title = re.sub(self.inside_brackets, "", title) title = re.sub(self.after_delimiter, "", title) return title.strip()
Remove redunant meta data from title and return it
def relativize(self, origin): if not origin is None and self.is_subdomain(origin): return Name(self[: -len(origin)]) else: return self
If self is a subdomain of origin, return a new name which is self relative to origin. Otherwise return self. @rtype: dns.name.Name object
def acl(self): r = fapi.get_workspace_acl(self.namespace, self.name, self.api_url) fapi._check_response_code(r, 200) return r.json()
Get the access control list for this workspace.
def reset(self, path, pretend=False): self._notes = [] migrations = sorted(self._repository.get_ran(), reverse=True) count = len(migrations) if count == 0: self._note("<info>Nothing to rollback.</info>") else: for migration in migrations: self._run_down(path, {"migration": migration}, pretend) return count
Rolls all of the currently applied migrations back. :param path: The path :type path: str :param pretend: Whether we execute the migrations as dry-run :type pretend: bool :rtype: count
def _AddMessageMethods(message_descriptor, cls): _AddListFieldsMethod(message_descriptor, cls) _AddHasFieldMethod(message_descriptor, cls) _AddClearFieldMethod(message_descriptor, cls) if message_descriptor.is_extendable: _AddClearExtensionMethod(cls) _AddHasExtensionMethod(cls) _AddEqualsMethod(message_descriptor, cls) _AddStrMethod(message_descriptor, cls) _AddReprMethod(message_descriptor, cls) _AddUnicodeMethod(message_descriptor, cls) _AddByteSizeMethod(message_descriptor, cls) _AddSerializeToStringMethod(message_descriptor, cls) _AddSerializePartialToStringMethod(message_descriptor, cls) _AddMergeFromStringMethod(message_descriptor, cls) _AddIsInitializedMethod(message_descriptor, cls) _AddMergeFromMethod(cls) _AddWhichOneofMethod(message_descriptor, cls) _AddReduceMethod(cls) cls.Clear = _Clear cls.DiscardUnknownFields = _DiscardUnknownFields cls._SetListener = _SetListener
Adds implementations of all Message methods to cls.
def _extract_file(zip_fp, info, path): zip_fp.extract(info.filename, path=path) out_path = os.path.join(path, info.filename) perm = info.external_attr >> 16 perm |= stat.S_IREAD os.chmod(out_path, perm)
Extract files while explicitly setting the proper permissions
def scale(self, scale, center=None): scale = transforms.scale(as_vec4(scale, default=(1, 1, 1, 1))[0, :3]) if center is not None: center = as_vec4(center)[0, :3] scale = np.dot(np.dot(transforms.translate(-center), scale), transforms.translate(center)) self.matrix = np.dot(self.matrix, scale)
Scale the matrix about a given origin. The scaling is applied *after* the transformations already present in the matrix. Parameters ---------- scale : array-like Scale factors along x, y and z axes. center : array-like or None The x, y and z coordinates to scale around. If None, (0, 0, 0) will be used.
def change_count(self): status = self.git.status(porcelain=True, untracked_files='no').strip() if not status: return 0 else: return len(status.split('\n'))
The number of changes in the working directory.
def link_contentkey_authorization_policy(access_token, ckap_id, options_id, \ ams_redirected_rest_endpoint): path = '/ContentKeyAuthorizationPolicies' full_path = ''.join([path, "('", ckap_id, "')", "/$links/Options"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeyAuthorizationPolicyOptions', \ "('", options_id, "')"]) body = '{"uri": "' + uri + '"}' return do_ams_post(endpoint, full_path_encoded, body, access_token, "json_only", "1.0;NetFx")
Link Media Service Content Key Authorization Policy. Args: access_token (str): A valid Azure authentication token. ckap_id (str): A Media Service Asset Content Key Authorization Policy ID. options_id (str): A Media Service Content Key Authorization Policy Options . ams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint. Returns: HTTP response. JSON body.
def total_seconds(td): secs = td.seconds + td.days * 24 * 3600 if td.microseconds: secs += 1 return secs
convert a timedelta to seconds. This is patterned after timedelta.total_seconds, which is only available in python 27. Args: td: a timedelta object. Returns: total seconds within a timedelta. Rounded up to seconds.
def _extract_obo_synonyms(rawterm): synonyms = set() keys = set(owl_synonyms).intersection(rawterm.keys()) for k in keys: for s in rawterm[k]: synonyms.add(Synonym(s, owl_synonyms[k])) return synonyms
Extract the synonyms defined in the rawterm.
def set_deferred_transfer(self, enable): if self._deferred_transfer and not enable: self.flush() self._deferred_transfer = enable
Allow transfers to be delayed and buffered By default deferred transfers are turned off. All reads and writes will be completed by the time the function returns. When enabled packets are buffered and sent all at once, which increases speed. When memory is written to, the transfer might take place immediately, or might take place on a future memory write. This means that an invalid write could cause an exception to occur on a later, unrelated write. To guarantee that previous writes are complete call the flush() function. The behaviour of read operations is determined by the modes READ_START, READ_NOW and READ_END. The option READ_NOW is the default and will cause the read to flush all previous writes, and read the data immediately. To improve performance, multiple reads can be made using READ_START and finished later with READ_NOW. This allows the reads to be buffered and sent at once. Note - All READ_ENDs must be called before a call using READ_NOW can be made.
def validate_and_decode(jwt_bu64, cert_obj): public_key = cert_obj.public_key() message = '.'.join(d1_common.cert.jwt.get_bu64_tup(jwt_bu64)[:2]) signature = d1_common.cert.jwt.get_jwt_tup(jwt_bu64)[2] try: public_key.verify( signature, message, cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15(), cryptography.hazmat.primitives.hashes.SHA256(), ) except cryptography.exceptions.InvalidSignature as e: raise Exception('Signature is invalid. error="{}"'.format(str(e))) return d1_common.cert.jwt.get_jwt_dict(jwt_bu64)
Example for validating the signature of a JWT using only the cryptography library. Note that this does NOT validate the claims in the claim set.
def merge_extra_options(self, needs_info): extra_keys = set(self.options.keys()).difference(set(needs_info.keys())) for key in extra_keys: needs_info[key] = self.options[key] for key in self.option_spec: if key not in needs_info.keys(): needs_info[key] = "" return extra_keys
Add any extra options introduced via options_ext to needs_info
def _get_triplet_scores(self, triangles_list): triplet_scores = {} for triplet in triangles_list: triplet_intersections = [intersect for intersect in it.combinations(triplet, 2)] ind_max = sum([np.amax(self.objective[frozenset(intersect)].values) for intersect in triplet_intersections]) joint_max = self.objective[frozenset(triplet_intersections[0])] for intersect in triplet_intersections[1:]: joint_max += self.objective[frozenset(intersect)] joint_max = np.amax(joint_max.values) score = ind_max - joint_max triplet_scores[frozenset(triplet)] = score return triplet_scores
Returns the score of each of the triplets found in the current model Parameters --------- triangles_list: list The list of variables forming the triangles to be updated. It is of the form of [['var_5', 'var_8', 'var_7'], ['var_4', 'var_5', 'var_7']] Return: {frozenset({'var_8', 'var_5', 'var_7'}): 5.024, frozenset({'var_5', 'var_4', 'var_7'}): 10.23}
def _pages_to_generate(self): all_pages = self.get_page_names() ptg = [] for slug in all_pages: p = s2page.Page(self, slug, isslug=True) if p.published: ptg.append({'slug': p.slug, 'title':p.title, 'date': p.creation_date }) sptg = sorted(ptg, key=lambda x : x['date'],reverse=True) res = [ pinfo['slug'] for pinfo in sptg] return res
Return list of slugs that correspond to pages to generate.
def main(dimension, iterations): optimizer = PSOOptimizer() solution = optimizer.minimize(sphere, -5.12, 5.12, dimension, max_iterations(iterations)) return solution, optimizer
Main function for PSO optimizer example. Instantiate PSOOptimizer to optimize 30-dimensional spherical function.
def stats(self, result=None, counter=0): if result is None: result = dict() if counter == 0: if len(self): result[0] = {"depth": 0, "leaf": 0, "root": 1} else: result[0] = {"depth": 0, "leaf": 1, "root": 0} counter += 1 if len(self): result.setdefault( counter, {"depth": counter, "leaf": 0, "root": 0}) for dict_tree in self.values(): if len(dict_tree): result[counter]["root"] += 1 else: result[counter]["leaf"] += 1 dict_tree.stats(result, counter) return [ collections.OrderedDict([ ("depth", info["depth"]), ("leaf", info["leaf"]), ("root", info["root"]), ]) for info in sorted(result.values(), key=lambda x: x["depth"]) ]
Display the node stats info on specific depth in this dict. :: [ {"depth": 0, "leaf": M0, "root": N0}, {"depth": 1, "leaf": M1, "root": N1}, ... {"depth": k, "leaf": Mk, "root": Nk}, ]
def get(cls): if cls.is_twoconspect: return cls.subconspect_el.value or None input_value = cls.input_el.value.strip() if not input_value: return None mdt = conspectus.mdt_by_name.get(input_value) if not mdt: alert("Invalid sub-conspect `%s`!" % input_value) return None return mdt
Get code selected by user. Returns: str: Code or None in case that user didn't selected anything yet.
def update_state(self, state_arr, action_arr): x, y = np.where(action_arr[-1] == 1) self.__agent_pos = (x[0], y[0]) self.__route_memory_list.append((x[0], y[0])) self.__route_long_memory_list.append((x[0], y[0])) self.__route_long_memory_list = list(set(self.__route_long_memory_list)) while len(self.__route_memory_list) > self.__memory_num: self.__route_memory_list = self.__route_memory_list[1:] return self.extract_now_state()
Update state. Override. Args: state_arr: `np.ndarray` of state in `self.t`. action_arr: `np.ndarray` of action in `self.t`. Returns: `np.ndarray` of state in `self.t+1`.
def validate(self): changes = self.change_collector.collect_changes() features = [] imported_okay = True for importer, modname, modpath in changes.new_feature_info: try: mod = importer() features.extend(_get_contrib_features(mod)) except (ImportError, SyntaxError): logger.info( 'Failed to import module at {}' .format(modpath)) logger.exception('Exception details: ') imported_okay = False if not imported_okay: return False if not features: logger.info('Failed to collect any new features.') return False return all( validate_feature_api(feature, self.X, self.y, subsample=False) for feature in features )
Collect and validate all new features
def add_to_history(self, command): command = to_text_string(command) if command in ['', '\n'] or command.startswith('Traceback'): return if command.endswith('\n'): command = command[:-1] self.histidx = None if len(self.history) > 0 and self.history[-1] == command: return self.history.append(command) text = os.linesep + command if self.history_filename not in self.HISTORY_FILENAMES: self.HISTORY_FILENAMES.append(self.history_filename) text = self.SEPARATOR + text try: encoding.write(text, self.history_filename, mode='ab') except EnvironmentError: pass if self.append_to_history is not None: self.append_to_history.emit(self.history_filename, text)
Add command to history
def line_model(freq, data, tref, amp=1, phi=0): freq_line = TimeSeries(zeros(len(data)), delta_t=data.delta_t, epoch=data.start_time) times = data.sample_times - float(tref) alpha = 2 * numpy.pi * freq * times + phi freq_line.data = amp * numpy.exp(1.j * alpha) return freq_line
Simple time-domain model for a frequency line. Parameters ---------- freq: float Frequency of the line. data: pycbc.types.TimeSeries Reference data, to get delta_t, start_time, duration and sample_times. tref: float Reference time for the line model. amp: {1., float}, optional Amplitude of the frequency line. phi: {0. float}, optional Phase of the frequency line (radians). Returns ------- freq_line: pycbc.types.TimeSeries A timeseries of the line model with frequency 'freq'. The returned data are complex to allow measuring the amplitude and phase of the corresponding frequency line in the strain data. For extraction, use only the real part of the data.
def set_year(self, year): self.year = YEARS.get(year, year) data = {'idCursus': self.year} soup = self.post_soup('/~etudiant/login.php', data=data) return bool(soup.select('ul.rMenu-hor'))
Set an user's year. This is required on magma just before the login. It's called by default by ``login``.
def transform_with(self, estimator, out_ds, fmt=None): if isinstance(out_ds, str): out_ds = self.create_derived(out_ds, fmt=fmt) elif isinstance(out_ds, _BaseDataset): err = "Dataset must be opened in write mode." assert out_ds.mode in ('w', 'a'), err else: err = "Please specify a dataset path or an existing dataset." raise ValueError(err) for key in self.keys(): out_ds[key] = estimator.partial_transform(self[key]) return out_ds
Call the partial_transform method of the estimator on this dataset Parameters ---------- estimator : object with ``partial_fit`` method This object will be used to transform this dataset into a new dataset. The estimator should be fitted prior to calling this method. out_ds : str or Dataset This dataset will be transformed and saved into out_ds. If out_ds is a path, a new dataset will be created at that path. fmt : str The type of dataset to create if out_ds is a string. Returns ------- out_ds : Dataset The tranformed dataset.
def get_value(self, Meta: Type[object], base_classes_meta, mcs_args: McsArgs) -> Any: value = self.default if self.inherit and base_classes_meta is not None: value = getattr(base_classes_meta, self.name, value) if Meta is not None: value = getattr(Meta, self.name, value) return value
Returns the value for ``self.name`` given the class-under-construction's class ``Meta``. If it's not found there, and ``self.inherit == True`` and there is a base class that has a class ``Meta``, use that value, otherwise ``self.default``. :param Meta: the class ``Meta`` (if any) from the class-under-construction (**NOTE:** this will be an ``object`` or ``None``, NOT an instance of :class:`MetaOptionsFactory`) :param base_classes_meta: the :class:`MetaOptionsFactory` instance (if any) from the base class of the class-under-construction :param mcs_args: the :class:`McsArgs` for the class-under-construction
def atype_view_asset(self, ): if not self.cur_atype: return i = self.atype_asset_treev.currentIndex() item = i.internalPointer() if item: asset = item.internal_data() if isinstance(asset, djadapter.models.Asset): self.view_asset(asset)
View the project of the current assettype :returns: None :rtype: None :raises: None
def sim_sedfile(self, **kwargs): if 'seed' not in kwargs: kwargs['seed'] = 'SEED' return self._format_from_dict(NameFactory.sim_sedfile_format, **kwargs)
Return the name for the simulated SED file for a particular target
def _filter_by_m2m_schema(self, qs, lookup, sublookup, value, schema, model=None): model = model or self.model schemata = dict((s.name, s) for s in model.get_schemata_for_model()) try: schema = schemata[lookup] except KeyError: raise ValueError(u'Could not find schema for lookup "%s"' % lookup) sublookup = '__%s'%sublookup if sublookup else '' return { 'attrs__schema': schema, 'attrs__choice%s'%sublookup: value, }
Filters given entity queryset by an attribute which is linked to given many-to-many schema.
def _assemble_modification(stmt): sub_str = _assemble_agent_str(stmt.sub) if stmt.enz is not None: enz_str = _assemble_agent_str(stmt.enz) if _get_is_direct(stmt): mod_str = ' ' + _mod_process_verb(stmt) + ' ' else: mod_str = ' leads to the ' + _mod_process_noun(stmt) + ' of ' stmt_str = enz_str + mod_str + sub_str else: stmt_str = sub_str + ' is ' + _mod_state_stmt(stmt) if stmt.residue is not None: if stmt.position is None: mod_str = 'on ' + ist.amino_acids[stmt.residue]['full_name'] else: mod_str = 'on ' + stmt.residue + stmt.position else: mod_str = '' stmt_str += ' ' + mod_str return _make_sentence(stmt_str)
Assemble Modification statements into text.
def _setup_model_loss(self, lr): if not hasattr(self, "loss"): self.loss = SoftCrossEntropyLoss() if not hasattr(self, "optimizer"): self.optimizer = optim.Adam(self.parameters(), lr=lr)
Setup loss and optimizer for PyTorch model.
def smooth(self, n_iter=20, convergence=0.0, edge_angle=15, feature_angle=45, boundary_smoothing=True, feature_smoothing=False, inplace=False): alg = vtk.vtkSmoothPolyDataFilter() alg.SetInputData(self) alg.SetNumberOfIterations(n_iter) alg.SetConvergence(convergence) alg.SetFeatureEdgeSmoothing(feature_smoothing) alg.SetFeatureAngle(feature_angle) alg.SetEdgeAngle(edge_angle) alg.SetBoundarySmoothing(boundary_smoothing) alg.Update() mesh = _get_output(alg) if inplace: self.overwrite(mesh) else: return mesh
Adjust point coordinates using Laplacian smoothing. The effect is to "relax" the mesh, making the cells better shaped and the vertices more evenly distributed. Parameters ---------- n_iter : int Number of iterations for Laplacian smoothing, convergence : float, optional Convergence criterion for the iteration process. Smaller numbers result in more smoothing iterations. Range from (0 to 1). edge_angle : float, optional Edge angle to control smoothing along edges (either interior or boundary). feature_angle : float, optional Feature angle for sharp edge identification. boundary_smoothing : bool, optional Boolean flag to control smoothing of boundary edges. feature_smoothing : bool, optional Boolean flag to control smoothing of feature edges. inplace : bool, optional Updates mesh in-place while returning nothing. Returns ------- mesh : vtki.PolyData Decimated mesh. None when inplace=True.
def list(self, filter_guid=None, filter_ids=None, detailed=None, page=None): filters = [ 'filter[guid]={0}'.format(filter_guid) if filter_guid else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'detailed={0}'.format(detailed) if detailed is not None else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}plugins.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) )
This API endpoint returns a paginated list of the plugins associated with your New Relic account. Plugins can be filtered by their name or by a list of IDs. :type filter_guid: str :param filter_guid: Filter by name :type filter_ids: list of ints :param filter_ids: Filter by user ids :type detailed: bool :param detailed: Include all data about a plugin :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "plugins": [ { "id": "integer", "name": "string", "guid": "string", "publisher": "string", "details": { "description": "integer", "is_public": "string", "created_at": "time", "updated_at": "time", "last_published_at": "time", "has_unpublished_changes": "boolean", "branding_image_url": "string", "upgraded_at": "time", "short_name": "string", "publisher_about_url": "string", "publisher_support_url": "string", "download_url": "string", "first_edited_at": "time", "last_edited_at": "time", "first_published_at": "time", "published_version": "string" }, "summary_metrics": [ { "id": "integer", "name": "string", "metric": "string", "value_function": "string", "thresholds": { "caution": "float", "critical": "float" }, "values": { "raw": "float", "formatted": "string" } } ] } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/plugins.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/plugins.json?page=2", "rel": "next" } } }
def get_idp_sso_supported_bindings(idp_entity_id=None, config=None): if config is None: from djangosaml2.conf import get_config config = get_config() meta = getattr(config, 'metadata', {}) if idp_entity_id is None: try: idp_entity_id = list(available_idps(config).keys())[0] except IndexError: raise ImproperlyConfigured("No IdP configured!") try: return meta.service(idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service').keys() except UnknownSystemEntity: return []
Returns the list of bindings supported by an IDP This is not clear in the pysaml2 code, so wrapping it in a util
def get_catalogue(self, locale): if locale is None: locale = self.locale if locale not in self.catalogues or datetime.now() - self.last_reload > timedelta(seconds=1): self._load_catalogue(locale) self.last_reload = datetime.now() return self.catalogues[locale]
Reloads messages catalogue if requested after more than one second since last reload
def get_biased_correlations(data, threshold= 10): data = data.toDense() correlations = numpy.corrcoef(data, rowvar = False) highest_correlations = [] for row in correlations: highest_correlations += sorted(row, reverse = True)[1:threshold+1] return numpy.mean(highest_correlations)
Gets the highest few correlations for each bit, across the entirety of the data. Meant to provide a comparison point for the pairwise correlations reported in the literature, which are typically between neighboring neurons tuned to the same inputs. We would expect these neurons to be among the most correlated in any region, so pairwise correlations between most likely do not provide an unbiased estimator of correlations between arbitrary neurons.
def score( self, data, metric="accuracy", break_ties="random", verbose=True, print_confusion_matrix=True, **kwargs, ): Y_p, Y, Y_s = self._get_predictions( data, break_ties=break_ties, return_probs=True, **kwargs ) return_list = isinstance(metric, list) metric_list = metric if isinstance(metric, list) else [metric] scores = [] for metric in metric_list: score = metric_score(Y, Y_p, metric, probs=Y_s, ignore_in_gold=[0]) scores.append(score) if verbose: print(f"{metric.capitalize()}: {score:.3f}") if print_confusion_matrix and verbose: confusion_matrix(Y, Y_p, pretty_print=True) if len(scores) == 1 and not return_list: return scores[0] else: return scores
Scores the predictive performance of the Classifier on all tasks Args: data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y): X: The input for the predict method Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in {1,...,k} metric: A metric (string) with which to score performance or a list of such metrics break_ties: A tie-breaking policy (see Classifier._break_ties()) verbose: The verbosity for just this score method; it will not update the class config. print_confusion_matrix: Print confusion matrix (overwritten to False if verbose=False) Returns: scores: A (float) score or a list of such scores if kwarg metric is a list
def run(self, parameter_space, kernel_options, tuning_options): logging.debug('sequential runner started for ' + kernel_options.kernel_name) results = [] for element in parameter_space: params = OrderedDict(zip(tuning_options.tune_params.keys(), element)) time = self.dev.compile_and_benchmark(self.gpu_args, params, kernel_options, tuning_options) if time is None: logging.debug('received time is None, kernel configuration was skipped silently due to compile or runtime failure') continue params['time'] = time output_string = get_config_string(params, self.units) logging.debug(output_string) if not self.quiet: print(output_string) results.append(params) return results, self.dev.get_environment()
Iterate through the entire parameter space using a single Python process :param parameter_space: The parameter space as an iterable. :type parameter_space: iterable :param kernel_options: A dictionary with all options for the kernel. :type kernel_options: kernel_tuner.interface.Options :param tuning_options: A dictionary with all options regarding the tuning process. :type tuning_options: kernel_tuner.iterface.Options :returns: A list of dictionaries for executed kernel configurations and their execution times. And a dictionary that contains a information about the hardware/software environment on which the tuning took place. :rtype: list(dict()), dict()
def get_variable(name, temp_s): return tf.Variable(tf.zeros(temp_s), name=name)
Get variable by name.
def _OpenFilesForRead(self, metadata_value_pairs, token): aff4_paths = [ result.AFF4Path(metadata.client_urn) for metadata, result in metadata_value_pairs ] fds = aff4.FACTORY.MultiOpen(aff4_paths, mode="r", token=token) fds_dict = dict([(fd.urn, fd) for fd in fds]) return fds_dict
Open files all at once if necessary.
def from_format( string, fmt, tz=UTC, locale=None, ): parts = _formatter.parse(string, fmt, now(), locale=locale) if parts["tz"] is None: parts["tz"] = tz return datetime(**parts)
Creates a DateTime instance from a specific format.
def update_links(self, request, admin_site=None): if admin_site: bundle = admin_site.get_bundle_for_model(self.model.to) if bundle: self._api_link = self._get_bundle_link(bundle, self.view, request.user) self._add_link = self._get_bundle_link(bundle, self.add_view, request.user)
Called to update the widget's urls. Tries to find the bundle for the model that this foreign key points to and then asks it for the urls for adding and listing and sets them on this widget instance. The urls are only set if request.user has permissions on that url. :param request: The request for which this widget is being rendered. :param admin_site: If provided, the `admin_site` is used to lookup \ the bundle that is registered as the primary url for the model \ that this foreign key points to.
def safe_import(self, name): module = None if name not in self._modules: self._modules[name] = importlib.import_module(name) module = self._modules[name] if not module: dist = next(iter( dist for dist in self.base_working_set if dist.project_name == name ), None) if dist: dist.activate() module = importlib.import_module(name) if name in sys.modules: try: six.moves.reload_module(module) six.moves.reload_module(sys.modules[name]) except TypeError: del sys.modules[name] sys.modules[name] = self._modules[name] return module
Helper utility for reimporting previously imported modules while inside the env
def fromimporterror(cls, bundle, importerid, rsid, exception, endpoint): return RemoteServiceAdminEvent( RemoteServiceAdminEvent.IMPORT_ERROR, bundle, importerid, rsid, None, None, exception, endpoint, )
Creates a RemoteServiceAdminEvent object from an import error
def _upload(self, files, voice_clip=False): file_dict = {"upload_{}".format(i): f for i, f in enumerate(files)} data = {"voice_clip": voice_clip} j = self._postFile( self.req_url.UPLOAD, files=file_dict, query=data, fix_request=True, as_json=True, ) if len(j["payload"]["metadata"]) != len(files): raise FBchatException( "Some files could not be uploaded: {}, {}".format(j, files) ) return [ (data[mimetype_to_key(data["filetype"])], data["filetype"]) for data in j["payload"]["metadata"] ]
Uploads files to Facebook `files` should be a list of files that requests can upload, see: http://docs.python-requests.org/en/master/api/#requests.request Returns a list of tuples with a file's ID and mimetype
def registration_update_or_create(self): if not getattr(self, self.registration_unique_field): raise UpdatesOrCreatesRegistrationModelError( f'Cannot update or create RegisteredSubject. ' f'Field value for \'{self.registration_unique_field}\' is None.') registration_value = getattr(self, self.registration_unique_field) registration_value = self.to_string(registration_value) try: obj = self.registration_model.objects.get( **{self.registered_subject_unique_field: registration_value}) except self.registration_model.DoesNotExist: pass else: self.registration_raise_on_illegal_value_change(obj) registered_subject, created = self.registration_model.objects.update_or_create( **{self.registered_subject_unique_field: registration_value}, defaults=self.registration_options) return registered_subject, created
Creates or Updates the registration model with attributes from this instance. Called from the signal
def flush(self): for seq in self.buffer: SeqIO.write(seq, self.handle, self.format) self.buffer = []
Empty the buffer.
def get_endpoint_by_endpoint_id(self, endpoint_id): self._validate_uuid(endpoint_id) url = "/notification/v1/endpoint/{}".format(endpoint_id) response = NWS_DAO().getURL(url, self._read_headers) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) return self._endpoint_from_json(data.get("Endpoint"))
Get an endpoint by endpoint id
def remove_collisions(self, min_dist=0.5): s_f_coords = self.structure.frac_coords f_coords = self.extrema_coords if len(f_coords) == 0: if self.extrema_type is None: logger.warning( "Please run ChargeDensityAnalyzer.get_local_extrema first!") return new_f_coords = [] self._update_extrema(new_f_coords, self.extrema_type) return new_f_coords dist_matrix = self.structure.lattice.get_all_distances(f_coords, s_f_coords) all_dist = np.min(dist_matrix, axis=1) new_f_coords = [] for i, f in enumerate(f_coords): if all_dist[i] > min_dist: new_f_coords.append(f) self._update_extrema(new_f_coords, self.extrema_type) return new_f_coords
Remove predicted sites that are too close to existing atoms in the structure. Args: min_dist (float): The minimum distance (in Angstrom) that a predicted site needs to be from existing atoms. A min_dist with value <= 0 returns all sites without distance checking.
async def game( self, short_name, *, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): result = types.InputBotInlineResultGame( id=id or '', short_name=short_name, send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ) ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result
Creates a new inline result of game type. Args: short_name (`str`): The short name of the game to use.
def maybe_clean(self): now = time.time() if self.next_cleaning <= now: keys_to_delete = [] for (k, v) in self.data.iteritems(): if v.expiration <= now: keys_to_delete.append(k) for k in keys_to_delete: del self.data[k] now = time.time() self.next_cleaning = now + self.cleaning_interval
Clean the cache if it's time to do so.
def distribute(self, volume: float, source: Well, dest: List[Well], *args, **kwargs) -> 'InstrumentContext': self._log.debug("Distributing {} from {} to {}" .format(volume, source, dest)) kwargs['mode'] = 'distribute' kwargs['disposal_volume'] = kwargs.get('disposal_vol', self.min_volume) return self.transfer(volume, source, dest, **kwargs)
Move a volume of liquid from one source to multiple destinations. :param volume: The amount of volume to distribute to each destination well. :param source: A single well from where liquid will be aspirated. :param dest: List of Wells where liquid will be dispensed to. :param kwargs: See :py:meth:`transfer`. :returns: This instance
def get_balance(self): if not SMSGLOBAL_CHECK_BALANCE_COUNTRY: raise Exception('SMSGLOBAL_CHECK_BALANCE_COUNTRY setting must be set to check balance.') params = { 'user' : self.get_username(), 'password' : self.get_password(), 'country' : SMSGLOBAL_CHECK_BALANCE_COUNTRY, } req = urllib2.Request(SMSGLOBAL_API_URL_CHECKBALANCE, urllib.urlencode(params)) response = urllib2.urlopen(req).read() if response.startswith('ERROR'): raise Exception('Error retrieving balance: %s' % response.replace('ERROR:', '')) return dict([(p.split(':')[0].lower(), p.split(':')[1]) for p in response.split(';') if len(p) > 0])
Get balance with provider.
def extract(filepath, taxonomy, output_mode, output_limit, spires, match_mode, detect_author_keywords, extract_acronyms, rebuild_cache, only_core_tags, no_cache): if not filepath or not taxonomy: print("No PDF file or taxonomy given!", file=sys.stderr) sys.exit(0) click.echo( ">>> Going extract keywords from {0} as '{1}'...".format( filepath, output_mode ) ) if not os.path.isfile(filepath): click.echo( "Path to non-existing file\n", ) sys.exit(1) result = get_keywords_from_local_file( local_file=filepath, taxonomy_name=taxonomy, output_mode=output_mode, output_limit=output_limit, spires=spires, match_mode=match_mode, no_cache=no_cache, with_author_keywords=detect_author_keywords, rebuild_cache=rebuild_cache, only_core_tags=only_core_tags, extract_acronyms=extract_acronyms ) click.echo(result)
Run keyword extraction on given PDF file for given taxonomy.
def array_prepend(path, *values, **kwargs): return _gen_4spec(LCB_SDCMD_ARRAY_ADD_FIRST, path, MultiValue(*values), create_path=kwargs.pop('create_parents', False), **kwargs)
Add new values to the beginning of an array. :param path: Path to the array. The path should contain the *array itself* and not an element *within* the array :param values: one or more values to append :param create_parents: Create the array if it does not exist This operation is only valid in :cb_bmeth:`mutate_in`. .. seealso:: :func:`array_append`, :func:`upsert`
def ProcessHuntFlowLog(flow_obj, log_msg): if not hunt.IsLegacyHunt(flow_obj.parent_hunt_id): return hunt_urn = rdfvalue.RDFURN("hunts").Add(flow_obj.parent_hunt_id) flow_urn = hunt_urn.Add(flow_obj.flow_id) log_entry = rdf_flows.FlowLog( client_id=flow_obj.client_id, urn=flow_urn, flow_name=flow_obj.flow_class_name, log_message=log_msg) with data_store.DB.GetMutationPool() as pool: grr_collections.LogCollection.StaticAdd( hunt_urn.Add("Logs"), log_entry, mutation_pool=pool)
Processes log message from a given hunt-induced flow.
def finalize(self): for name, count in sorted(self.blocks.items(), key=lambda x: x[1]): print('{:3} {}'.format(count, name)) print('{:3} total'.format(sum(self.blocks.values())))
Output the aggregate block count results.
def do_opt(self, *args, **kwargs): args = list(args) if not args: largest = 0 keys = [key for key in self.conf if not key.startswith("_")] for key in keys: largest = max(largest, len(key)) for key in keys: print("%s : %s" % (key.rjust(largest), self.conf[key])) return option = args.pop(0) if not args and not kwargs: method = getattr(self, "getopt_" + option, None) if method is None: self.getopt_default(option) else: method() else: method = getattr(self, "opt_" + option, None) if method is None: print("Unrecognized option %r" % option) else: method(*args, **kwargs) self.save_config()
Get and set options
def remove_span(self,span): this_node = span.get_node() self.node.remove(this_node)
Removes a specific span from the coref object
def ensemble_center(self, site_list, indices, cartesian=True): if cartesian: return np.average([site_list[i].coords for i in indices], axis=0) else: return np.average([site_list[i].frac_coords for i in indices], axis=0)
Finds the center of an ensemble of sites selected from a list of sites. Helper method for the find_adsorption_sites algorithm. Args: site_list (list of sites): list of sites indices (list of ints): list of ints from which to select sites from site list cartesian (bool): whether to get average fractional or cartesian coordinate
def to_volume(self): if hasattr(self.header.definitions, "Lattice"): X, Y, Z = self.header.definitions.Lattice else: raise ValueError("Unable to determine data size") volume = self.decoded_data.reshape(Z, Y, X) return volume
Return a 3D volume of the data
def dgeodr(x, y, z, re, f): x = ctypes.c_double(x) y = ctypes.c_double(y) z = ctypes.c_double(z) re = ctypes.c_double(re) f = ctypes.c_double(f) jacobi = stypes.emptyDoubleMatrix() libspice.dgeodr_c(x, y, z, re, f, jacobi) return stypes.cMatrixToNumpy(jacobi)
This routine computes the Jacobian of the transformation from rectangular to geodetic coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dgeodr_c.html :param x: X-coordinate of point. :type x: float :param y: Y-coordinate of point. :type y: float :param z: Z-coord :type z: float :param re: Equatorial radius of the reference spheroid. :type re: float :param f: Flattening coefficient. :type f: float :return: Matrix of partial derivatives. :rtype: 3x3-Element Array of floats
def stop_func_accept_retry_state(stop_func): if not six.callable(stop_func): return stop_func if func_takes_retry_state(stop_func): return stop_func @_utils.wraps(stop_func) def wrapped_stop_func(retry_state): warn_about_non_retry_state_deprecation( 'stop', stop_func, stacklevel=4) return stop_func( retry_state.attempt_number, retry_state.seconds_since_start, ) return wrapped_stop_func
Wrap "stop" function to accept "retry_state" parameter.
def get_negative(self, cls=None, **kwargs): for attr, set_of_values in kwargs.iteritems(): defaults = {key: kwargs[key][-1]["default"] for key in kwargs} defaults.pop(attr) for value in set_of_values[:-1]: case = cls() if cls else self._CasesClass() setattr(case, attr, value) for key in defaults: setattr(case, key, defaults[key]) yield case
Returns a generator that generates negative cases by "each negative value in separate case" algorithm.
def get_todo_items(self, **kwargs): def inner(self): for item in self.get_all_as_list(): yield item self._unlock() if not self._is_locked(): if self._lock(): return inner(self) raise RuntimeError("RuntimeError: Index Already Locked")
Returns an iterator that will provide each item in the todo queue. Note that to complete each item you have to run complete method with the output of this iterator. That will move the item to the done directory and prevent it from being retrieved in the future.
def get_file_lines(self, subsystem, option): assert subsystem in self with open(os.path.join(self.per_subsystem[subsystem], subsystem + '.' + option)) as f: for line in f: yield line
Read the lines of the given file from the given subsystem. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available.
def demo(quiet, shell, speed, prompt, commentecho): run( DEMO, shell=shell, speed=speed, test_mode=TESTING, prompt_template=prompt, quiet=quiet, commentecho=commentecho, )
Run a demo doitlive session.
def delete_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False): tenant_name = fw_dict.get('tenant_name') ret = self._delete_service_nwk(tenant_id, tenant_name, 'in') if ret: res = fw_const.DCNM_IN_NETWORK_DEL_SUCCESS LOG.info("In Service network deleted for tenant %s", tenant_id) else: res = fw_const.DCNM_IN_NETWORK_DEL_FAIL LOG.info("In Service network deleted failed for tenant %s", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret
Delete the DCNM In Network and store the result in DB.
def write_config_file(self, params, path): cfgp = ConfigParser() cfgp.add_section(params['name']) for p in params: if p == 'name': continue cfgp.set(params['name'], p, params[p]) f = open(os.path.join(path, 'experiment.cfg'), 'w') cfgp.write(f) f.close()
write a config file for this single exp in the folder path.
def error(self, msg): body = msg['body'].replace(NULL, '') brief_msg = "" if 'message' in msg['headers']: brief_msg = msg['headers']['message'] self.log.error("Received server error - message%s\n\n%s" % (brief_msg, body)) returned = NO_RESPONSE_NEEDED if self.testing: returned = 'error' return returned
Called to handle an error message received from the server. This method just logs the error message returned: NO_RESPONSE_NEEDED
def get_new_version(self, last_version, last_commit, diff_to_increase_ratio): version = Version(last_version) diff = self.get_diff(last_commit, self.get_last_commit_hash()) total_changed = diff[Diff.ADD] + diff[Diff.DEL] version.increase_by_changes(total_changed, diff_to_increase_ratio) return version
Gets new version :param last_version: last version known :param last_commit: hash of commit of last version :param diff_to_increase_ratio: Ratio to convert number of changes into :return: new version
def get_input_files(self): input_files = list(self.__input_files) if isinstance(self.job(), CondorDAGJob): input_files = input_files + self.job().get_input_files() return input_files
Return list of input files for this DAG node and its job.
def from_path(cls, path: pathlib.Path) -> 'File': if not path.is_file(): raise ValueError('Path does not point to a file') return File(path.name, path.stat().st_size, cls._md5(path))
Create a file entity from a file path. :param path: The path of the file. :return: A file entity instance representing the file. :raises ValueError: If the path does not point to a file.
def create_app(self, app_id, app, minimal=True): app.id = app_id data = app.to_json(minimal=minimal) response = self._do_request('POST', '/v2/apps', data=data) if response.status_code == 201: return self._parse_response(response, MarathonApp) else: return False
Create and start an app. :param str app_id: application ID :param :class:`marathon.models.app.MarathonApp` app: the application to create :param bool minimal: ignore nulls and empty collections :returns: the created app (on success) :rtype: :class:`marathon.models.app.MarathonApp` or False
def _iter_vals(key): for i in range(winreg.QueryInfoKey(key)[1]): yield winreg.EnumValue(key, i)
! Iterate over values of a key