code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def exists(self): return self.rpc_model.search_count( self.domain, context=self.context ) > 0
A convenience method that returns True if a record satisfying the query exists
def search_all_payments(payment_status=None, page_size=20, start_cursor=None, offset=0, use_cache=True, cache_begin=True, relations=None): if payment_status: return PaymentsByStatusSearch(payment_status, page_size, start_cursor, offset, use_cache, cache_begin, relations) return AllPaymentsSearch(page_size, start_cursor, offset, use_cache, cache_begin, relations)
Returns a command to search all payments ordered by creation desc @param payment_status: The payment status. If None is going to return results independent from status @param page_size: number of payments per page @param start_cursor: cursor to continue the search @param offset: offset number of payment on search @param use_cache: indicates with should use cache or not for results @param cache_begin: indicates with should use cache on beginning or not for results @param relations: list of relations to bring with payment objects. possible values on list: logs, pay_items, owner @return: Returns a command to search all payments ordered by creation desc
def keep_(self, *cols) -> "Ds": try: ds2 = self._duplicate_(self.df[list(cols)]) except Exception as e: self.err(e, "Can not remove colums") return self.ok("Columns", " ,".join(cols), "kept") return ds2
Returns a dataswim instance with a dataframe limited to some columns :param cols: names of the columns :type cols: str :return: a dataswim instance :rtype: Ds :example: ``ds2 = ds.keep_("Col 1", "Col 2")``
def serialize_with_sampled_logs(self, logs_limit=-1): return { 'id': self.id, 'pathName': self.path_name, 'name': self.name, 'isUnregistered': self.is_unregistered, 'logs': [log.serialize for log in self.sampled_logs(logs_limit)], 'args': self.args.serialize if self.args is not None else [], 'commands': [cmd.serialize for cmd in self.commands], 'snapshots': [cmd.serialize for cmd in self.snapshots], 'logModifiedAt': self.log_modified_at.isoformat() }
serialize a result with up to `logs_limit` logs. If `logs_limit` is -1, this function will return a result with all its logs.
def recursive_refs(envs, name): refs_by_name = { env['name']: set(env['refs']) for env in envs } refs = refs_by_name[name] if refs: indirect_refs = set(itertools.chain.from_iterable([ recursive_refs(envs, ref) for ref in refs ])) else: indirect_refs = set() return set.union(refs, indirect_refs)
Return set of recursive refs for given env name >>> local_refs = sorted(recursive_refs([ ... {'name': 'base', 'refs': []}, ... {'name': 'test', 'refs': ['base']}, ... {'name': 'local', 'refs': ['test']}, ... ], 'local')) >>> local_refs == ['base', 'test'] True
def pipe_substr(context=None, _INPUT=None, conf=None, **kwargs): conf['start'] = conf.pop('from', dict.get(conf, 'start')) splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs)) parsed = utils.dispatch(splits, *get_dispatch_funcs()) _OUTPUT = starmap(parse_result, parsed) return _OUTPUT
A string module that returns a substring. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : iterable of items or strings conf : { 'from': {'type': 'number', value': <starting position>}, 'length': {'type': 'number', 'value': <count of characters to return>} } Returns ------- _OUTPUT : generator of substrings
def new_record(self, key, value): new_record = self.get('new_record', {}) ids = self.get('ids', []) for value in force_list(value): for id_ in force_list(value.get('a')): ids.append({ 'schema': 'SPIRES', 'value': id_, }) new_recid = force_single_element(value.get('d', '')) if new_recid: new_record = get_record_ref(new_recid, 'authors') self['ids'] = ids return new_record
Populate the ``new_record`` key. Also populates the ``ids`` key through side effects.
def form_invalid(self, form): if self.request.is_ajax(): return self.render_json_response(self.get_error_result(form)) return super(AjaxFormMixin, self).form_invalid(form)
We have errors in the form. If ajax, return them as json. Otherwise, proceed as normal.
def _isLastCodeColumn(self, block, column): return column >= self._lastColumn(block) or \ self._isComment(block, self._nextNonSpaceColumn(block, column + 1))
Return true if the given column is at least equal to the column that contains the last non-whitespace character at the given line, or if the rest of the line is a comment.
def _get_metrics_to_collect(self, instance_key, additional_metrics): if instance_key not in self.metrics_to_collect_by_instance: self.metrics_to_collect_by_instance[instance_key] = self._build_metric_list_to_collect(additional_metrics) return self.metrics_to_collect_by_instance[instance_key]
Return and cache the list of metrics to collect.
def filter_files(self, path): excludes = r'|'.join([fnmatch.translate(x) for x in self.project.EXCLUDES]) or r'$.' for root, dirs, files in os.walk(path, topdown=True): dirs[:] = [d for d in dirs if not re.match(excludes, d)] dirs[:] = [os.path.join(root, d) for d in dirs] rel_path = os.path.relpath(root, path) paths = [] for f in files: if rel_path == '.': file_path = f else: file_path = os.path.join(rel_path, f) if not re.match(excludes, file_path): paths.append(f) files[:] = paths yield root, dirs, files
Exclude files based on blueprint and project configuration as well as hidden files.
def get_as_float_with_default(self, key, default_value): value = self.get(key) return FloatConverter.to_float_with_default(value, default_value)
Converts map element into a float or returns default value if conversion is not possible. :param key: an index of element to get. :param default_value: the default value :return: float value ot the element or default value if conversion is not supported.
def get_old_filename(diff_part): regexps = ( r'^--- a/(.*)', r'^\-\-\- (.*)', ) for regexp in regexps: r = re.compile(regexp, re.MULTILINE) match = r.search(diff_part) if match is not None: return match.groups()[0] raise MalformedGitDiff("No old filename in diff part found. " "Examined diff part: {}".format(diff_part))
Returns the filename for the original file that was changed in a diff part.
def _is_sparse(x): return ( isinstance(x, (tf.SparseTensor, tf_v1.SparseTensorValue)) or (hasattr(x, "is_sparse") and x.is_sparse))
Returns whether x is a SparseTensor or a parsed sparse tensor info.
def files(): if not _scanned: if not module_files(sys.modules['__main__'], _process_files): for module in sys.modules.values(): if hasattr(module, '__file__'): filename = module.__file__ if filename not in _process_files: realname, modified_time = _get_filename_and_modified(filename) if realname and realname not in _process_files: _process_files[realname] = modified_time return _process_files
Scan all modules in the currently running app to create a dict of all files and their modified time. @note The scan only occurs the first time this function is called. Subsequent calls simply return the global dict. @return: A dict containing filenames as keys with their modified time as value
def subset(data, sel0, sel1): data = np.asarray(data) if data.ndim < 2: raise ValueError('data must have 2 or more dimensions') sel0 = asarray_ndim(sel0, 1, allow_none=True) sel1 = asarray_ndim(sel1, 1, allow_none=True) if sel0 is not None and sel0.dtype.kind == 'b': sel0, = np.nonzero(sel0) if sel1 is not None and sel1.dtype.kind == 'b': sel1, = np.nonzero(sel1) if sel0 is not None and sel1 is not None: sel0 = sel0[:, np.newaxis] if sel0 is None: sel0 = _total_slice if sel1 is None: sel1 = _total_slice return data[sel0, sel1]
Apply selections on first and second axes.
def _getOccurs(self, e): minOccurs = maxOccurs = '1' nillable = True return minOccurs,maxOccurs,nillable
return a 3 item tuple
def _remove(self, removeList, selfValue): for removeValue in removeList: print(removeValue, removeList) removeEverything(removeValue, selfValue)
Remove elements from a list by matching the elements in the other list. This method only looks inside current instance's value, not recursive. There is no need for a recursive one anyway. Match by == operation. Args: removeList (list): The list of matching elements. selfValue (list): The list you remove value from. Usually ``self.value``
def append(self, item): if len(self) == 0: self.index = 0 self.items.append(item)
Adds a new item to the end of the collection.
def select_group(self, group_id): new_query = copy.deepcopy(self) new_query._filter.group_id = group_id return new_query
Copy the query and add filtering by group. Example:: query = query.select_group('1234567') :type group_id: str :param group_id: The ID of a group to filter by. :rtype: :class:`Query` :returns: The new query object.
def read_tuple(self): cmd = self.read_command() source = cmd["comp"] stream = cmd["stream"] values = cmd["tuple"] val_type = self._source_tuple_types[source].get(stream) return Tuple( cmd["id"], source, stream, cmd["task"], tuple(values) if val_type is None else val_type(*values), )
Read a tuple from the pipe to Storm.
def get_team_players(self, team): team_id = self.team_names.get(team, None) try: req = self._get('teams/{}/'.format(team_id)) team_players = req.json()['squad'] if not team_players: click.secho("No players found for this team", fg="red", bold=True) else: self.writer.team_players(team_players) except APIErrorException: click.secho("No data for the team. Please check the team code.", fg="red", bold=True)
Queries the API and fetches the players for a particular team
def init_datastore(config): if 'datastore' in config: return config['datastore'] factory = config.pop('factory') if isinstance(factory, str): factory = pkg_resources.EntryPoint.parse('x=' + factory).resolve() return factory(**config)
Take the config definition and initialize the datastore. The config must contain either a 'datastore' parameter, which will be simply returned, or must contain a 'factory' which is a callable or entry point definition. The callable should take the remainder of the params in config as kwargs and return a DataStore instance.
def _cast_value(value, _type): if _type.upper() == 'FLOAT64': return float64(value) elif _type.upper() == 'FLOAT32': return float32(value) elif _type.upper() == 'INT32': return int32(value) elif _type.upper() == 'UINT16': return uint16(value) elif _type.upper() == 'INT16': return int16(value) elif _type.upper() == 'BOOLEAN': return uint8(value) else: return float64(value)
cast value to _type
def fieldstorage(self): if self._fieldstorage is None: if self._body is not None: raise ReadBodyTwiceError() self._fieldstorage = cgi.FieldStorage( environ=self._environ, fp=self._environ['wsgi.input'] ) return self._fieldstorage
`cgi.FieldStorage` from `wsgi.input`.
def create_activity(self, name, activity_type, start_date_local, elapsed_time, description=None, distance=None): if isinstance(elapsed_time, timedelta): elapsed_time = unithelper.timedelta_to_seconds(elapsed_time) if isinstance(distance, Quantity): distance = float(unithelper.meters(distance)) if isinstance(start_date_local, datetime): start_date_local = start_date_local.strftime("%Y-%m-%dT%H:%M:%SZ") if not activity_type.lower() in [t.lower() for t in model.Activity.TYPES]: raise ValueError("Invalid activity type: {0}. Possible values: {1!r}".format(activity_type, model.Activity.TYPES)) params = dict(name=name, type=activity_type, start_date_local=start_date_local, elapsed_time=elapsed_time) if description is not None: params['description'] = description if distance is not None: params['distance'] = distance raw_activity = self.protocol.post('/activities', **params) return model.Activity.deserialize(raw_activity, bind_client=self)
Create a new manual activity. If you would like to create an activity from an uploaded GPS file, see the :meth:`stravalib.client.Client.upload_activity` method instead. :param name: The name of the activity. :type name: str :param activity_type: The activity type (case-insensitive). Possible values: ride, run, swim, workout, hike, walk, nordicski, alpineski, backcountryski, iceskate, inlineskate, kitesurf, rollerski, windsurf, workout, snowboard, snowshoe :type activity_type: str :param start_date_local: Local date/time of activity start. (TZ info will be ignored) :type start_date_local: :class:`datetime.datetime` or string in ISO8601 format. :param elapsed_time: The time in seconds or a :class:`datetime.timedelta` object. :type elapsed_time: :class:`datetime.timedelta` or int (seconds) :param description: The description for the activity. :type description: str :param distance: The distance in meters (float) or a :class:`units.quantity.Quantity` instance. :type distance: :class:`units.quantity.Quantity` or float (meters)
def ensure_unicode_string(obj): try: return unicode_type(obj) except UnicodeDecodeError: if hasattr(obj, 'decode'): return obj.decode('utf-8', 'replace') return str(obj).decode('utf-8', 'replace')
Return a unicode string representation of the given obj. :param obj: The obj we want to represent in unicode :type obj: varies :rtype: `unicode`
def dynamize_value(self, val): def _str(val): if isinstance(val, bool): return str(int(val)) return str(val) dynamodb_type = self.get_dynamodb_type(val) if dynamodb_type == 'N': val = {dynamodb_type : _str(val)} elif dynamodb_type == 'S': val = {dynamodb_type : val} elif dynamodb_type == 'NS': val = {dynamodb_type : [ str(n) for n in val]} elif dynamodb_type == 'SS': val = {dynamodb_type : [ n for n in val]} return val
Take a scalar Python value and return a dict consisting of the Amazon DynamoDB type specification and the value that needs to be sent to Amazon DynamoDB. If the type of the value is not supported, raise a TypeError
def blacklist_token(): req = flask.request.get_json(force=True) data = guard.extract_jwt_token(req['token']) blacklist.add(data['jti']) return flask.jsonify(message='token blacklisted ({})'.format(req['token']))
Blacklists an existing JWT by registering its jti claim in the blacklist. .. example:: $ curl http://localhost:5000/blacklist_token -X POST \ -d '{"token":"<your_token>"}'
def _get_edge_tuple(source, target, edge_data: EdgeData, ) -> Tuple[str, str, str, Optional[str], Tuple[str, Optional[Tuple], Optional[Tuple]]]: return ( source.as_bel(), target.as_bel(), _get_citation_str(edge_data), edge_data.get(EVIDENCE), canonicalize_edge(edge_data), )
Convert an edge to a consistent tuple. :param BaseEntity source: The source BEL node :param BaseEntity target: The target BEL node :param edge_data: The edge's data dictionary :return: A tuple that can be hashed representing this edge. Makes no promises to its structure.
def _split_rules(rules): split = [] for rule in rules: cidr_ip = rule.get('cidr_ip') group_name = rule.get('source_group_name') group_id = rule.get('source_group_group_id') if cidr_ip and not isinstance(cidr_ip, six.string_types): for ip in cidr_ip: _rule = rule.copy() _rule['cidr_ip'] = ip split.append(_rule) elif group_name and not isinstance(group_name, six.string_types): for name in group_name: _rule = rule.copy() _rule['source_group_name'] = name split.append(_rule) elif group_id and not isinstance(group_id, six.string_types): for _id in group_id: _rule = rule.copy() _rule['source_group_group_id'] = _id split.append(_rule) else: split.append(rule) return split
Split rules with lists into individual rules. We accept some attributes as lists or strings. The data we get back from the execution module lists rules as individual rules. We need to split the provided rules into individual rules to compare them.
def create_database(self): self.print_message("Creating database '%s'" % self.databases['destination']['name']) self.export_pgpassword('destination') args = [ "createdb", self.databases['destination']['name'], ] args.extend(self.databases['destination']['args']) for arg in self.databases['destination']['args']: if arg[:7] == '--user=': args.append('--owner=%s' % arg[7:]) subprocess.check_call(args)
Create postgres database.
async def write(self, writer: Any, close_boundary: bool=True) -> None: if not self._parts: return for part, encoding, te_encoding in self._parts: await writer.write(b'--' + self._boundary + b'\r\n') await writer.write(part._binary_headers) if encoding or te_encoding: w = MultipartPayloadWriter(writer) if encoding: w.enable_compression(encoding) if te_encoding: w.enable_encoding(te_encoding) await part.write(w) await w.write_eof() else: await part.write(writer) await writer.write(b'\r\n') if close_boundary: await writer.write(b'--' + self._boundary + b'--\r\n')
Write body.
def find_deepest_user_frame(tb): tb.reverse() for frame in tb: filename = frame[0] if filename.find(os.sep+'SCons'+os.sep) == -1: return frame return tb[0]
Find the deepest stack frame that is not part of SCons. Input is a "pre-processed" stack trace in the form returned by traceback.extract_tb() or traceback.extract_stack()
def unescape(b, encoding): return string_literal_re.sub( lambda m: unescape_string_literal(m.group(), encoding), b )
Unescape all string and unicode literals in bytes.
def save(self, name, content, *args, **kwargs): super(ThumbnailerFieldFile, self).save(name, content, *args, **kwargs) self.get_source_cache(create=True, update=True)
Save the file, also saving a reference to the thumbnail cache Source model.
def recordtype_row_strategy(column_names): try: from namedlist import namedlist as recordtype except ImportError: from recordtype import recordtype column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)] recordtype_row_class = recordtype('Row', column_names) class Row(recordtype_row_class): def __getitem__(self, index): if isinstance(index, slice): return tuple(getattr(self, x) for x in self.__slots__[index]) return getattr(self, self.__slots__[index]) def __setitem__(self, index, value): setattr(self, self.__slots__[index], value) def row_factory(row): return Row(*row) return row_factory
Recordtype row strategy, rows returned as recordtypes Column names that are not valid Python identifiers will be replaced with col<number>_
def getspectrum(self, index): mz_bytes, intensity_bytes = self.get_spectrum_as_string(index) mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision) intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision) return mz_array, intensity_array
Reads the spectrum at specified index from the .ibd file. :param index: Index of the desired spectrum in the .imzML file Output: mz_array: numpy.ndarray Sequence of m/z values representing the horizontal axis of the desired mass spectrum intensity_array: numpy.ndarray Sequence of intensity values corresponding to mz_array
def filter(criterias, devices): if not criterias: return devices result = [] for device in devices: for criteria_name, criteria_values in criterias.items(): if criteria_name in device.keys(): if isinstance(device[criteria_name], list): for criteria_value in criteria_values: if criteria_value in device[criteria_name]: result.append(device) break elif isinstance(device[criteria_name], str): for criteria_value in criteria_values: if criteria_value == device[criteria_name]: result.append(device) elif isinstance(device[criteria_name], int): for criteria_value in criteria_values: if criteria_value == device[criteria_name]: result.append(device) else: continue return result
Filter a device by criterias on the root level of the dictionary.
def _get_span_name(servicer_context): method_name = servicer_context._rpc_event.call_details.method[1:] if isinstance(method_name, bytes): method_name = method_name.decode('utf-8') method_name = method_name.replace('/', '.') return '{}.{}'.format(RECV_PREFIX, method_name)
Generates a span name based off of the gRPC server rpc_request_info
def get_main_for(self, model): try: return self.for_model(model).get(is_main=True) except models.ObjectDoesNotExist: return None
Returns main image for given model
def _OpenFile(self, path): if not self._registry_file_reader: return None return self._registry_file_reader.Open( path, ascii_codepage=self._ascii_codepage)
Opens a Windows Registry file. Args: path (str): path of the Windows Registry file. Returns: WinRegistryFile: Windows Registry file or None if not available.
def comment_urlview(self): data = self.get_selected_item() comment = data.get('body') or data.get('text') or data.get('url_full') if comment: self.term.open_urlview(comment) else: self.term.flash()
Open the selected comment with the URL viewer
def previous_weekday(day=None, as_datetime=False): if day is None: day = datetime.datetime.now() else: day = datetime.datetime.strptime(day, '%Y-%m-%d') day -= datetime.timedelta(days=1) while day.weekday() > 4: day -= datetime.timedelta(days=1) if as_datetime: return day return day.strftime("%Y-%m-%d")
get the most recent business day
def should_indent(code): last = rem_comment(code.splitlines()[-1]) return last.endswith(":") or last.endswith("\\") or paren_change(last) < 0
Determines whether the next line should be indented.
def add_url (self, url, line=0, column=0, page=0, name=u"", base=None): webroot = self.aggregate.config["localwebroot"] if webroot and url and url.startswith(u"/"): url = webroot + url[1:] log.debug(LOG_CHECK, "Applied local webroot `%s' to `%s'.", webroot, url) super(FileUrl, self).add_url(url, line=line, column=column, page=page, name=name, base=base)
If a local webroot directory is configured, replace absolute URLs with it. After that queue the URL data for checking.
def get_nehrp_classes(self, sites): classes = sorted(self.NEHRP_VS30_UPPER_BOUNDS.keys()) bounds = [self.NEHRP_VS30_UPPER_BOUNDS[item] for item in classes] bounds = np.reshape(np.array(bounds), (-1, 1)) vs30s = np.reshape(sites.vs30, (1, -1)) site_classes = np.choose((vs30s < bounds).sum(axis=0) - 1, classes) return site_classes.astype('object')
Site classification threshholds from Section 4 "Site correction coefficients" p. 205. Note that site classes E and F are not supported.
def _get_hypocentral_depth_term(self, C, rup): if rup.hypo_depth <= 7.0: fhyp_h = 0.0 elif rup.hypo_depth > 20.0: fhyp_h = 13.0 else: fhyp_h = rup.hypo_depth - 7.0 if rup.mag <= 5.5: fhyp_m = C["c17"] elif rup.mag > 6.5: fhyp_m = C["c18"] else: fhyp_m = C["c17"] + ((C["c18"] - C["c17"]) * (rup.mag - 5.5)) return fhyp_h * fhyp_m
Returns the hypocentral depth scaling term defined in equations 21 - 23
def process_chunks(self, chunks): chunk, = chunks if chunk.keys: self.result[chunk.keys] = chunk.data else: self.result[...] = chunk.data
Store the incoming chunk at the corresponding position in the result array.
def question_default_loader(self, pk): try: obj = Question.objects.get(pk=pk) except Question.DoesNotExist: return None else: self.question_default_add_related_pks(obj) return obj
Load a Question from the database.
def read_ipv6_route(self, length, extension): if length is None: length = len(self) _next = self._read_protos(1) _hlen = self._read_unpack(1) _type = self._read_unpack(1) _left = self._read_unpack(1) ipv6_route = dict( next=_next, length=(_hlen + 1) * 8, type=_ROUTING_TYPE.get(_type, 'Unassigned'), seg_left=_left, ) _dlen = _hlen * 8 - 4 if _dlen: _func = _ROUTE_PROC.get(_type, 'none') _data = eval(f'self._read_data_type_{_func}')(_dlen) ipv6_route.update(_data) length -= ipv6_route['length'] ipv6_route['packet'] = self._read_packet(header=ipv6_route['length'], payload=length) if extension: self._protos = None return ipv6_route return self._decode_next_layer(ipv6_route, _next, length)
Read Routing Header for IPv6. Structure of IPv6-Route header [RFC 8200][RFC 5095]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len | Routing Type | Segments Left | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | . . . type-specific data . . . | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 route.data Type-Specific Data
def ReadStatusBit(self, bit): ' Report given status bit ' spi.SPI_write_byte(self.CS, 0x39) spi.SPI_write_byte(self.CS, 0x00) data0 = spi.SPI_read_byte() spi.SPI_write_byte(self.CS, 0x00) data1 = spi.SPI_read_byte() if bit > 7: OutputBit = (data0 >> (bit - 8)) & 1 else: OutputBit = (data1 >> bit) & 1 return OutputBit
Report given status bit
def build_command_tree(pattern, cmd_params): from docopt import Either, Optional, OneOrMore, Required, Option, Command, Argument if type(pattern) in [Either, Optional, OneOrMore]: for child in pattern.children: build_command_tree(child, cmd_params) elif type(pattern) in [Required]: for child in pattern.children: cmd_params = build_command_tree(child, cmd_params) elif type(pattern) in [Option]: suffix = "=" if pattern.argcount else "" if pattern.short: cmd_params.options.append(pattern.short + suffix) if pattern.long: cmd_params.options.append(pattern.long + suffix) elif type(pattern) in [Command]: cmd_params = cmd_params.get_subcommand(pattern.name) elif type(pattern) in [Argument]: cmd_params.arguments.append(pattern.name) return cmd_params
Recursively fill in a command tree in cmd_params according to a docopt-parsed "pattern" object.
def prepare_query_params(**kwargs): return [ (sub_key, sub_value) for key, value in kwargs.items() for sub_key, sub_value in expand(value, key) if sub_value is not None ]
Prepares given parameters to be used in querystring.
def world_info(world_name, world_config=None, initial_indent="", next_indent=" "): if world_config is None: for config, _ in _iter_packages(): for world in config["maps"]: if world["name"] == world_name: world_config = world if world_config is None: raise HolodeckException("Couldn't find world " + world_name) second_indent = initial_indent + next_indent agent_indent = second_indent + next_indent sensor_indent = agent_indent + next_indent print(initial_indent, world_config["name"]) print(second_indent, "Resolution:", world_config["window_width"], "x", world_config["window_height"]) print(second_indent, "Agents:") for agent in world_config["agents"]: print(agent_indent, "Name:", agent["agent_name"]) print(agent_indent, "Type:", agent["agent_type"]) print(agent_indent, "Sensors:") for sensor in agent["sensors"]: print(sensor_indent, sensor)
Gets and prints the information of a world. Args: world_name (str): the name of the world to retrieve information for world_config (dict optional): A dictionary containing the world's configuration. Will find the config if None. Defaults to None. initial_indent (str optional): This indent will apply to each output line. Defaults to "". next_indent (str optional): This indent will be applied within each nested line. Defaults to " ".
def sliceit(iterable, lower=0, upper=None): if upper is None: upper = len(iterable) try: result = iterable[lower: upper] except TypeError: result = [] if lower < 0: lower += len(iterable) if upper < 0: upper += len(iterable) if upper > lower: iterator = iter(iterable) for index in range(upper): try: value = next(iterator) except StopIteration: break else: if index >= lower: result.append(value) iterablecls = iterable.__class__ if not(isinstance(result, iterablecls) or issubclass(iterablecls, dict)): try: result = iterablecls(result) except TypeError: pass return result
Apply a slice on input iterable. :param iterable: object which provides the method __getitem__ or __iter__. :param int lower: lower bound from where start to get items. :param int upper: upper bound from where finish to get items. :return: sliced object of the same type of iterable if not dict, or specific object. otherwise, simple list of sliced items. :rtype: Iterable
def _printable_id_code(self): code = str(self.id_code) while len(code) < self._code_size: code = '0' + code return code
Returns the code in a printable form, filling with zeros if needed. :return: the ID code in a printable form
def save(self): req = datastore.CommitRequest() req.mode = datastore.CommitRequest.NON_TRANSACTIONAL req.mutations.add().upsert.CopyFrom(self.to_proto()) resp = datastore.commit(req) if not self.id: self.id = resp.mutation_results[0].key.path[-1].id return self
Update or insert a Todo item.
def end(self): _checkErr('vend', _C.Vfinish(self._hdf_inst._id), "cannot terminate V interface") self._hdf_inst = None
Close the V interface. Args:: No argument Returns:: None C library equivalent : Vend
def _executor(self, jobGraph, stats, fileStore): if stats is not None: startTime = time.time() startClock = getTotalCpuTime() baseDir = os.getcwd() yield if not self.checkpoint: for jobStoreFileID in Promise.filesToDelete: fileStore.deleteGlobalFile(jobStoreFileID) else: jobGraph.checkpointFilesToDelete = list(Promise.filesToDelete) Promise.filesToDelete.clear() fileStore._updateJobWhenDone() if os.getcwd() != baseDir: os.chdir(baseDir) if stats is not None: totalCpuTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage() stats.jobs.append( Expando( time=str(time.time() - startTime), clock=str(totalCpuTime - startClock), class_name=self._jobName(), memory=str(totalMemoryUsage) ) )
This is the core wrapping method for running the job within a worker. It sets up the stats and logging before yielding. After completion of the body, the function will finish up the stats and logging, and starts the async update process for the job.
def getRecentlyUpdatedSets(self, minutesAgo): params = { 'apiKey': self.apiKey, 'minutesAgo': minutesAgo } url = Client.ENDPOINT.format('getRecentlyUpdatedSets') returned = get(url, params=params) self.checkResponse(returned) root = ET.fromstring(returned.text) return [Build(i, self) for i in root]
Gets the information of recently updated sets. :param int minutesAgo: The amount of time ago that the set was updated. :returns: A list of Build instances that were updated within the given time. :rtype: list .. warning:: An empty list will be returned if there are no sets in the given time limit.
def cmd_update(args): markov = load(MarkovText, args.state, args) read(args.input, markov, args.progress) if args.output is None: if args.type == SQLITE: save(markov, None, args) elif args.type == JSON: name, ext = path.splitext(args.state) tmp = name + '.tmp' + ext save(markov, tmp, args) replace(tmp, args.state) else: save(markov, args.output, args)
Update a generator. Parameters ---------- args : `argparse.Namespace` Command arguments.
def is_normalized_address(value: Any) -> bool: if not is_address(value): return False else: return value == to_normalized_address(value)
Returns whether the provided value is an address in its normalized form.
def dump_config(self): yaml_content = self.get_merged_config() print('YAML Configuration\n%s\n' % yaml_content.read()) try: self.load() print('Python Configuration\n%s\n' % pretty(self.yamldocs)) except ConfigError: sys.stderr.write( 'config parse error. try running with --logfile=/dev/tty\n') raise
Pretty print the configuration dict to stdout.
def _input_to_raw_value(self, value: int) -> float: return (float(value) - self.min_raw_value) / self.max_raw_value
Convert the value read from evdev to a 0.0 to 1.0 range. :internal: :param value: a value ranging from the defined minimum to the defined maximum value. :return: 0.0 at minimum, 1.0 at maximum, linearly interpolating between those two points.
def authors_et_al(self, max_authors=5): author_list = self._author_list if len(author_list) <= max_authors: authors_et_al = self.authors else: authors_et_al = ", ".join( self._author_list[:max_authors]) + ", et al." return authors_et_al
Return string with a truncated author list followed by 'et al.'
def x10(cls, housecode, unitcode): if housecode.lower() in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']: byte_housecode = insteonplm.utils.housecode_to_byte(housecode) else: if isinstance(housecode, str): _LOGGER.error('X10 house code error: %s', housecode) else: _LOGGER.error('X10 house code is not a string') raise ValueError if unitcode in range(1, 17) or unitcode in range(20, 23): byte_unitcode = insteonplm.utils.unitcode_to_byte(unitcode) else: if isinstance(unitcode, int): _LOGGER.error('X10 unit code error: %d', unitcode) else: _LOGGER.error('X10 unit code is not an integer 1 - 16') raise ValueError addr = Address(bytearray([0x00, byte_housecode, byte_unitcode])) addr.is_x10 = True return addr
Create an X10 device address.
def setup(): config_name = ".wallaceconfig" config_path = os.path.join(os.path.expanduser("~"), config_name) if os.path.isfile(config_path): log("Wallace config file already exists.", chevrons=False) else: log("Creating Wallace config file at ~/.wallaceconfig...", chevrons=False) wallace_module_path = os.path.dirname(os.path.realpath(__file__)) src = os.path.join(wallace_module_path, "config", config_name) shutil.copyfile(src, config_path)
Walk the user though the Wallace setup.
def _astype(self, dtype, **kwargs): dtype = pandas_dtype(dtype) if is_datetime64tz_dtype(dtype): values = self.values if getattr(values, 'tz', None) is None: values = DatetimeIndex(values).tz_localize('UTC') values = values.tz_convert(dtype.tz) return self.make_block(values) return super()._astype(dtype=dtype, **kwargs)
these automatically copy, so copy=True has no effect raise on an except if raise == True
def stop(self): with self._lock: self._stop_event.set() self._shell_event.clear() if self._context is not None: self._context.remove_service_listener(self) self.clear_shell() self._context = None
Clears all members
def get_log_format_types(): ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret
Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types
def newFromSites(self, sites, exclude=False): if exclude: sites = set(range(len(self))) - sites newSequence = [] newStructure = [] for index, (base, structure) in enumerate(zip(self.sequence, self.structure)): if index in sites: newSequence.append(base) newStructure.append(structure) read = self.__class__(self.id, ''.join(newSequence), ''.join(newStructure)) return read
Create a new read from self, with only certain sites. @param sites: A set of C{int} 0-based sites (i.e., indices) in sequences that should be kept. If C{None} (the default), all sites are kept. @param exclude: If C{True} the C{sites} will be excluded, not included.
def _run_tox_env(self, env_name, extra_env_vars={}): projdir = self.projdir env = deepcopy(os.environ) env['PATH'] = self._fixed_path(projdir) env.update(extra_env_vars) cmd = [os.path.join(projdir, 'bin', 'tox'), '-e', env_name] logger.info( 'Running tox environment %s: args="%s" cwd=%s ' 'timeout=1800', env_name, ' '.join(cmd), projdir ) res = subprocess.run( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=projdir, timeout=1800, env=env ) logger.info('tox process exited %d', res.returncode) if res.returncode != 0: logger.error( 'ERROR: tox environment %s exitcode %d', env_name, res.returncode ) logger.error( 'tox output:\n%s', res.stdout.decode() ) res.check_returncode() return res.stdout.decode()
Run the specified tox environment. :param env_name: name of the tox environment to run :type env_name: str :param extra_env_vars: additional variables to set in the environment :type extra_env_vars: dict :raises: RuntimeError :returns: combined STDOUT / STDERR :rtype: str
def remove_network(self, net_id): url = self._url("/networks/{0}", net_id) res = self._delete(url) self._raise_for_status(res)
Remove a network. Similar to the ``docker network rm`` command. Args: net_id (str): The network's id
def authorize(self): version = self.con.makefile().readline() self.con.send(version.encode()) self.con.recv(2) self.con.send(struct.pack('>B', 1)) msg = self.con.recv(4) response = struct.unpack(">I", msg) if response[0] != 0: log.debug("Failed to authorize with set-top at %s:%s.", self.ip, self.port) raise AuthenticationError() self.con.send(b'0') log.debug('Authorized succesfully with set-top box at %s:%s.', self.ip, self.port)
Use the magic of a unicorn and summon the set-top box to listen to us. / ,.. / ,' '; ,,.__ _,' /'; . :',' ~~~~ '. '~ :' ( ) )::, '. '. .=----=..-~ .;' ' ;' :: ':. '" (: ': ;) \\ '" ./ '" '" Seriously, I've no idea what I'm doing here.
def reftrack_status_data(rt, role): status = rt.status() if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: if status: return status else: return "Not in scene!"
Return the data for the status :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the status :rtype: depending on role :raises: None
def get_access_token(self, method='POST', decoder=parse_utf8_qsl, key='access_token', **kwargs): r = self.get_raw_access_token(method, **kwargs) access_token, = process_token_request(r, decoder, key) return access_token
Returns an access token. :param method: A string representation of the HTTP method to be used, defaults to `POST`. :type method: str :param decoder: A function used to parse the Response content. Should return a dictionary. :type decoder: func :param key: The key the access token will be decoded by, defaults to 'access_token'. :type string: :param \*\*kwargs: Optional arguments. Same as Requests. :type \*\*kwargs: dict
def get_modules(paths, toplevel=True): modules = [] for path in paths: path = os.path.abspath(path) if toplevel and path.endswith('.pyc'): sys.exit('.pyc files are not supported: {0}'.format(path)) if os.path.isfile(path) and (path.endswith('.py') or toplevel): modules.append(path) elif os.path.isdir(path): subpaths = [ os.path.join(path, filename) for filename in sorted(os.listdir(path))] modules.extend(get_modules(subpaths, toplevel=False)) elif toplevel: sys.exit('Error: {0} could not be found.'.format(path)) return modules
Take files from the command line even if they don't end with .py.
def get_responsibles_data(self, reports): if not reports: return [] recipients = [] recipient_names = [] for num, report in enumerate(reports): ar = report.getAnalysisRequest() report_recipient_names = [] responsibles = ar.getResponsible() for manager_id in responsibles.get("ids", []): responsible = responsibles["dict"][manager_id] name = responsible.get("name") email = responsible.get("email") record = { "name": name, "email": email, "valid": True, } if record not in recipients: recipients.append(record) report_recipient_names.append(name) recipient_names.append(report_recipient_names) common_names = set(recipient_names[0]).intersection(*recipient_names) for recipient in recipients: if recipient.get("name") not in common_names: recipient["valid"] = False return recipients
Responsibles data to be used in the template
def get_student_email(cmd_args, endpoint=''): log.info("Attempting to get student email") if cmd_args.local: return None access_token = authenticate(cmd_args, endpoint=endpoint, force=False) if not access_token: return None try: return get_info(cmd_args, access_token)['email'] except IOError as e: return None
Attempts to get the student's email. Returns the email, or None.
def get_point_name(self, context): metadata_table = self.parse_context(context) return metadata_table.apply(self.strip_point_name, axis=1)
Get point name. Parameters ---------- context : ??? ??? Returns ------- ??? ???
def _validate_cert_chain(self, cert_chain): now = datetime.utcnow() if not (cert_chain.not_valid_before <= now <= cert_chain.not_valid_after): raise VerificationException("Signing Certificate expired") ext = cert_chain.extensions.get_extension_for_oid( ExtensionOID.SUBJECT_ALTERNATIVE_NAME) if CERT_CHAIN_DOMAIN not in ext.value.get_values_for_type( DNSName): raise VerificationException( "{} domain missing in Signature Certificate Chain".format( CERT_CHAIN_DOMAIN))
Validate the certificate chain. This method checks if the passed in certificate chain is valid, i.e it is not expired and the Alexa domain is present in the SAN extensions of the certificate chain. A :py:class:`VerificationException` is raised if the certificate chain is not valid. :param cert_chain: Certificate chain to be validated :type cert_chain: cryptography.x509.Certificate :return: None :raises: :py:class:`VerificationException` if certificated is not valid
def key_hash(key): hashed = hashlib.md5() for k, v in sorted(key.items()): hashed.update(str(v).encode()) return hashed.hexdigest()
32-byte hash used for lookup of primary keys of jobs
def extract_file_args(subparsers): extract_parser = subparsers.add_parser('extract_file', help='Extract a single secret from' 'Vault to a local file') extract_parser.add_argument('vault_path', help='Full path (including key) to secret') extract_parser.add_argument('destination', help='Location of destination file') base_args(extract_parser)
Add the command line options for the extract_file operation
def produce(self, **kwargs): key_schema = kwargs.pop('key_schema', self._key_schema) value_schema = kwargs.pop('value_schema', self._value_schema) topic = kwargs.pop('topic', None) if not topic: raise ClientError("Topic name not specified.") value = kwargs.pop('value', None) key = kwargs.pop('key', None) if value is not None: if value_schema: value = self._serializer.encode_record_with_schema(topic, value_schema, value) else: raise ValueSerializerError("Avro schema required for values") if key is not None: if key_schema: key = self._serializer.encode_record_with_schema(topic, key_schema, key, True) else: raise KeySerializerError("Avro schema required for key") super(AvroProducer, self).produce(topic, value, key, **kwargs)
Asynchronously sends message to Kafka by encoding with specified or default avro schema. :param str topic: topic name :param object value: An object to serialize :param str value_schema: Avro schema for value :param object key: An object to serialize :param str key_schema: Avro schema for key Plus any other parameters accepted by confluent_kafka.Producer.produce :raises SerializerError: On serialization failure :raises BufferError: If producer queue is full. :raises KafkaException: For other produce failures.
def log_histogram(self, step, tag, val): hist = Histogram() hist.add(val) summary = Summary(value=[Summary.Value(tag=tag, histo=hist.encode_to_proto())]) self._add_event(step, summary)
Write a histogram event. :param int step: Time step (x-axis in TensorBoard graphs) :param str tag: Label for this value :param numpy.ndarray val: Arbitrary-dimensional array containing values to be aggregated in the resulting histogram.
def clean_pdb(pdb_file, out_suffix='_clean', outdir=None, force_rerun=False, remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True, remove_res_hetero=True, keep_chemicals=None, keep_res_only=None, add_chain_id_if_empty='X', keep_chains=None): outfile = ssbio.utils.outfile_maker(inname=pdb_file, append_to_name=out_suffix, outdir=outdir, outext='.pdb') if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): my_pdb = StructureIO(pdb_file) my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt, remove_atom_hydrogen=remove_atom_hydrogen, keep_atom_alt_id=keep_atom_alt_id, add_atom_occ=add_atom_occ, remove_res_hetero=remove_res_hetero, keep_res_only=keep_res_only, add_chain_id_if_empty=add_chain_id_if_empty, keep_chains=keep_chains, keep_chemicals=keep_chemicals) my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix, out_dir=outdir, custom_selection=my_cleaner, force_rerun=force_rerun) return my_clean_pdb else: return outfile
Clean a PDB file. Args: pdb_file (str): Path to input PDB file out_suffix (str): Suffix to append to original filename outdir (str): Path to output directory force_rerun (bool): If structure should be re-cleaned if a clean file exists already remove_atom_alt (bool): Remove alternate positions keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep remove_atom_hydrogen (bool): Remove hydrogen atoms add_atom_occ (bool): Add atom occupancy fields if not present remove_res_hetero (bool): Remove all HETATMs keep_chemicals (str, list): If removing HETATMs, keep specified chemical names keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else! add_chain_id_if_empty (str): Add a chain ID if not present keep_chains (str, list): Keep only these chains Returns: str: Path to cleaned PDB file
async def get_all_platforms(self) -> AsyncIterator[Platform]: for name in self._classes.keys(): yield await self.get_platform(name)
Returns all platform instances
def build_entity_from_uri(self, uri, ontospyClass=None): if not ontospyClass: ontospyClass = RDF_Entity elif not issubclass(ontospyClass, RDF_Entity): click.secho("Error: <%s> is not a subclass of ontospy.RDF_Entity" % str(ontospyClass)) return None else: pass qres = self.sparqlHelper.entityTriples(uri) if qres: entity = ontospyClass(rdflib.URIRef(uri), None, self.namespaces) entity.triples = qres entity._buildGraph() test = entity.getValuesForProperty(rdflib.RDF.type) if test: entity.rdftype = test entity.rdftype_qname = [entity._build_qname(x) for x in test] return entity else: return None
Extract RDF statements having a URI as subject, then instantiate the RDF_Entity Python object so that it can be queried further. Passing <ontospyClass> allows to instantiate a user-defined RDF_Entity subclass. NOTE: the entity is not attached to any index. In future version we may create an index for these (individuals?) keeping into account that any existing model entity could be (re)created this way.
def request_path(request): url = request.get_full_url() parts = urlsplit(url) path = escape_path(parts.path) if not path.startswith("/"): path = "/" + path return path
Path component of request-URI, as defined by RFC 2965.
def wait(self, container, timeout=None, condition=None): url = self._url("/containers/{0}/wait", container) params = {} if condition is not None: if utils.version_lt(self._version, '1.30'): raise errors.InvalidVersion( 'wait condition is not supported for API version < 1.30' ) params['condition'] = condition res = self._post(url, timeout=timeout, params=params) return self._result(res, True)
Block until a container stops, then return its exit code. Similar to the ``docker wait`` command. Args: container (str or dict): The container to wait on. If a dict, the ``Id`` key is used. timeout (int): Request timeout condition (str): Wait until a container state reaches the given condition, either ``not-running`` (default), ``next-exit``, or ``removed`` Returns: (dict): The API's response as a Python dictionary, including the container's exit code under the ``StatusCode`` attribute. Raises: :py:class:`requests.exceptions.ReadTimeout` If the timeout is exceeded. :py:class:`docker.errors.APIError` If the server returns an error.
def cast_callback(value): if 'T' in value: value = value.replace('T', ' ') return datetime.strptime(value.split('.')[0], '%Y-%m-%d %H:%M:%S')
Override `cast_callback` method.
def _attempt_to_choose_formatting_pattern(self): if len(self._national_number) >= _MIN_LEADING_DIGITS_LENGTH: self._get_available_formats(self._national_number) formatted_number = self._attempt_to_format_accrued_digits() if len(formatted_number) > 0: return formatted_number if self._maybe_create_new_template(): return self._input_accrued_national_number() else: return self._accrued_input else: return self._append_national_number(self._national_number)
Attempts to set the formatting template and returns a string which contains the formatted version of the digits entered so far.
def db_remove(name, **connection_args): if not db_exists(name, **connection_args): log.info('DB \'%s\' does not exist', name) return False if name in ('mysql', 'information_scheme'): log.info('DB \'%s\' may not be removed', name) return False dbc = _connect(**connection_args) if dbc is None: return False cur = dbc.cursor() s_name = quote_identifier(name) qry = 'DROP DATABASE {0};'.format(s_name) try: _execute(cur, qry) except MySQLdb.OperationalError as exc: err = 'MySQL Error {0}: {1}'.format(*exc.args) __context__['mysql.error'] = err log.error(err) return False if not db_exists(name, **connection_args): log.info('Database \'%s\' has been removed', name) return True log.info('Database \'%s\' has not been removed', name) return False
Removes a databases from the MySQL server. CLI Example: .. code-block:: bash salt '*' mysql.db_remove 'dbname'
def trainSequences(sequences, exp, idOffset=0): for seqId in sequences: iterations = 3*len(sequences[seqId]) for p in range(iterations): s = sequences.provideObjectsToLearn([seqId]) objectSDRs = dict() objectSDRs[seqId + idOffset] = s[seqId] exp.learnObjects(objectSDRs, reset=False) exp.TMColumns[0].reset() exp.sendReset()
Train the network on all the sequences
def onesided_cl_to_dlnl(cl): alpha = 1.0 - cl return 0.5 * np.power(np.sqrt(2.) * special.erfinv(1 - 2 * alpha), 2.)
Compute the delta-loglikehood values that corresponds to an upper limit of the given confidence level. Parameters ---------- cl : float Confidence level. Returns ------- dlnl : float Delta-loglikelihood value with respect to the maximum of the likelihood function.
def _remove_unexpected_query_parameters(schema, req): additional_properties = schema.get('addtionalProperties', True) if additional_properties: pattern_regexes = [] patterns = schema.get('patternProperties', None) if patterns: for regex in patterns: pattern_regexes.append(re.compile(regex)) for param in set(req.GET.keys()): if param not in schema['properties'].keys(): if not (list(regex for regex in pattern_regexes if regex.match(param))): del req.GET[param]
Remove unexpected properties from the req.GET.
def return_value(self, *args, **kwargs): self._called() return self._return_value(*args, **kwargs)
Extracts the real value to be returned from the wrapping callable. :return: The value the double should return when called.
def all(self, scope=None, **kwargs): path = '/runners/all' query_data = {} if scope is not None: query_data['scope'] = scope return self.gitlab.http_list(path, query_data, **kwargs)
List all the runners. Args: scope (str): The scope of runners to show, one of: specific, shared, active, paused, online all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the server failed to perform the request Returns: list(Runner): a list of runners matching the scope.
def update(self): if self.input_method == 'local': stats = self.update_local() elif self.input_method == 'snmp': stats = self.update_snmp() else: stats = self.get_init_value() self.stats = stats return self.stats
Update CPU stats using the input method.