code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def generate_catalogue_subparser(subparsers): parser = subparsers.add_parser( 'catalogue', description=constants.CATALOGUE_DESCRIPTION, epilog=constants.CATALOGUE_EPILOG, formatter_class=ParagraphFormatter, help=constants.CATALOGUE_HELP) utils.add_common_arguments(parser) parser.set_defaults(func=generate_catalogue) parser.add_argument('corpus', help=constants.DB_CORPUS_HELP, metavar='CORPUS') utils.add_query_arguments(parser) parser.add_argument('-l', '--label', default='', help=constants.CATALOGUE_LABEL_HELP)
Adds a sub-command parser to `subparsers` to generate and save a catalogue file.
def postprocess(self): assert self.postscript envmod.setup() envmod.module('load', 'pbs') cmd = 'qsub {script}'.format(script=self.postscript) cmd = shlex.split(cmd) rc = sp.call(cmd) assert rc == 0, 'Postprocessing script submission failed.'
Submit a postprocessing script after collation
def tfrecord_iterator(filenames, gzipped=False, example_spec=None): with tf.Graph().as_default(): dataset = tf.data.Dataset.from_tensor_slices(filenames) def _load_records(filename): return tf.data.TFRecordDataset( filename, compression_type=tf.constant("GZIP") if gzipped else None, buffer_size=16 * 1000 * 1000) dataset = dataset.flat_map(_load_records) def _parse_example(ex_ser): return tf.parse_single_example(ex_ser, example_spec) if example_spec: dataset = dataset.map(_parse_example, num_parallel_calls=32) dataset = dataset.prefetch(100) record_it = dataset.make_one_shot_iterator().get_next() with tf.Session() as sess: while True: try: ex = sess.run(record_it) yield ex except tf.errors.OutOfRangeError: break
Yields records from TFRecord files. Args: filenames: list<str>, list of TFRecord filenames to read from. gzipped: bool, whether the TFRecord files are gzip-encoded. example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>, if provided, will parse each record as a tensorflow.Example proto. Yields: Records (or parsed Examples, if example_spec is provided) from files.
def lostitem_delete_view(request, item_id): if request.method == "POST": try: a = LostItem.objects.get(id=item_id) if request.POST.get("full_delete", False): a.delete() messages.success(request, "Successfully deleted lost item.") else: a.found = True a.save() messages.success(request, "Successfully marked lost item as found!") except LostItem.DoesNotExist: pass return redirect("index") else: lostitem = get_object_or_404(LostItem, id=item_id) return render(request, "lostfound/lostitem_delete.html", {"lostitem": lostitem})
Delete a lostitem. id: lostitem id
def to_pixel(self, wcs, mode='all'): pixel_params = self._to_pixel_params(wcs, mode=mode) return CircularAperture(**pixel_params)
Convert the aperture to a `CircularAperture` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `CircularAperture` object A `CircularAperture` object.
def RunValidationOutputFromOptions(feed, options): if options.output.upper() == "CONSOLE": return RunValidationOutputToConsole(feed, options) else: return RunValidationOutputToFilename(feed, options, options.output)
Validate feed, output results per options and return an exit code.
def get_all_subscriptions_by_topic(name, region=None, key=None, keyid=None, profile=None): cache_key = _subscriptions_cache_key(name) try: return __context__[cache_key] except KeyError: pass conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) ret = conn.get_all_subscriptions_by_topic(get_arn(name, region, key, keyid, profile)) __context__[cache_key] = ret['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions'] return __context__[cache_key]
Get list of all subscriptions to a specific topic. CLI example to delete a topic:: salt myminion boto_sns.get_all_subscriptions_by_topic mytopic region=us-east-1
def save_to_json(self): requestvalues = { 'DatasetId': self.dataset, 'Name': self.name, 'Description': self.description, 'Source': self.source, 'PubDate': self.publication_date, 'AccessedOn': self.accessed_on, 'Url': self.dataset_ref, 'UploadFormatType': self.upload_format_type, 'Columns': self.columns, 'FileProperty': self.file_property.__dict__, 'FlatDSUpdateOptions': self.flat_ds_update_options, 'Public': self.public } return json.dumps(requestvalues)
The method saves DatasetUpload to json from object
def url_rule(blueprint_or_app, rules, endpoint=None, view_func=None, **options): for rule in to_list(rules): blueprint_or_app.add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
Add one or more url rules to the given Flask blueprint or app. :param blueprint_or_app: Flask blueprint or app :param rules: a single rule string or a list of rules :param endpoint: endpoint :param view_func: view function :param options: other options
def PlistValueToPlainValue(plist): if isinstance(plist, dict): ret_value = dict() for key, value in iteritems(plist): ret_value[key] = PlistValueToPlainValue(value) return ret_value elif isinstance(plist, list): return [PlistValueToPlainValue(value) for value in plist] elif isinstance(plist, datetime.datetime): return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond return plist
Takes the plist contents generated by binplist and returns a plain dict. binplist uses rich types to express some of the plist types. We need to convert them to types that RDFValueArray will be able to transport. Args: plist: A plist to convert. Returns: A simple python type.
def _generate_relative_positions_matrix(length_q, length_k, max_relative_position, cache=False): if not cache: if length_q == length_k: range_vec_q = range_vec_k = tf.range(length_q) else: range_vec_k = tf.range(length_k) range_vec_q = range_vec_k[-length_q:] distance_mat = range_vec_k[None, :] - range_vec_q[:, None] else: distance_mat = tf.expand_dims(tf.range(-length_k+1, 1, 1), 0) distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position, max_relative_position) final_mat = distance_mat_clipped + max_relative_position return final_mat
Generates matrix of relative positions between inputs.
def convert_to_unit(self, unit): self._values = self._header.data_type.to_unit( self._values, unit, self._header.unit) self._header._unit = unit
Convert the Data Collection to the input unit.
def version_history(soup, html_flag=True): "extract the article version history details" convert = lambda xml_string: xml_to_html(html_flag, xml_string) version_history = [] related_object_tags = raw_parser.related_object(raw_parser.article_meta(soup)) for tag in related_object_tags: article_version = OrderedDict() date_tag = first(raw_parser.date(tag)) if date_tag: copy_attribute(date_tag.attrs, 'date-type', article_version, 'version') (day, month, year) = ymd(date_tag) article_version['day'] = day article_version['month'] = month article_version['year'] = year article_version['date'] = date_struct_nn(year, month, day) copy_attribute(tag.attrs, 'xlink:href', article_version, 'xlink_href') set_if_value(article_version, "comment", convert(node_contents_str(first(raw_parser.comment(tag))))) version_history.append(article_version) return version_history
extract the article version history details
def cmp_public_numbers(pn1, pn2): if pn1.n == pn2.n: if pn1.e == pn2.e: return True return False
Compare 2 sets of public numbers. These is a way to compare 2 public RSA keys. If the sets are the same then the keys are the same. :param pn1: The set of values belonging to the 1st key :param pn2: The set of values belonging to the 2nd key :return: True is the sets are the same otherwise False.
def make_speaker_utters(utterances: List[Utterance]) -> Dict[str, List[Utterance]]: speaker_utters = defaultdict(list) for utter in utterances: speaker_utters[utter.speaker].append(utter) return speaker_utters
Creates a dictionary mapping from speakers to their utterances.
def get_choices(field): empty_label = getattr(field.field, "empty_label", False) needs_empty_value = False choices = [] if hasattr(field.field, "_choices"): choices = field.field._choices elif hasattr(field.field, "_queryset"): queryset = field.field._queryset field_name = getattr(field.field, "to_field_name") or "pk" choices += ((getattr(obj, field_name), str(obj)) for obj in queryset) if choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]): needs_empty_value = True if not choices[0][0]: del choices[0] if empty_label == BLANK_CHOICE_DASH[0][1]: empty_label = None if empty_label or not field.field.required: if needs_empty_value: choices.insert(0, ("", empty_label or BLANK_CHOICE_DASH[0][1])) return choices
Find choices of a field, whether it has choices or has a queryset. Args: field (BoundField): Django form boundfield Returns: list: List of choices
def __receiver_loop(self): log.info("Starting receiver loop") notify_disconnected = True try: while self.running: try: while self.running: frames = self.__read() for frame in frames: f = utils.parse_frame(frame) if f is None: continue if self.__auto_decode: f.body = decode(f.body) self.process_frame(f, frame) except exception.ConnectionClosedException: if self.running: self.__recvbuf = b'' self.running = False notify_disconnected = True break finally: self.cleanup() finally: with self.__receiver_thread_exit_condition: self.__receiver_thread_exited = True self.__receiver_thread_exit_condition.notifyAll() log.info("Receiver loop ended") self.notify('receiver_loop_completed') if notify_disconnected: self.notify('disconnected') with self.__connect_wait_condition: self.__connect_wait_condition.notifyAll()
Main loop listening for incoming data.
def raw_encode(data): content_type = 'application/data' payload = data if isinstance(payload, unicode): content_encoding = 'utf-8' payload = payload.encode(content_encoding) else: content_encoding = 'binary' return content_type, content_encoding, payload
Special case serializer.
def generate_method(method_name): def call(self, *args, **kwargs): if not self.threadloop.is_ready(): self.threadloop.start() return self.threadloop.submit( getattr(self.async_thrift, method_name), *args, **kwargs ) return call
Generate a method for a given Thrift service. Uses the provided TChannelSyncClient's threadloop in order to convert RPC calls to concurrent.futures :param method_name: Method being called. :return: A method that invokes the RPC using TChannelSyncClient
def incomplete_relation_data(configs, required_interfaces): complete_ctxts = configs.complete_contexts() incomplete_relations = [ svc_type for svc_type, interfaces in required_interfaces.items() if not set(interfaces).intersection(complete_ctxts)] return { i: configs.get_incomplete_context_data(required_interfaces[i]) for i in incomplete_relations}
Check complete contexts against required_interfaces Return dictionary of incomplete relation data. configs is an OSConfigRenderer object with configs registered required_interfaces is a dictionary of required general interfaces with dictionary values of possible specific interfaces. Example: required_interfaces = {'database': ['shared-db', 'pgsql-db']} The interface is said to be satisfied if anyone of the interfaces in the list has a complete context. Return dictionary of incomplete or missing required contexts with relation status of interfaces and any missing data points. Example: {'message': {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, 'zeromq-configuration': {'related': False}}, 'identity': {'identity-service': {'related': False}}, 'database': {'pgsql-db': {'related': False}, 'shared-db': {'related': True}}}
def extract(self, destdir, decompress='auto'): for e in self.mardata.index.entries: name = e.name entry_path = safejoin(destdir, name) entry_dir = os.path.dirname(entry_path) mkdir(entry_dir) with open(entry_path, 'wb') as f: write_to_file(self.extract_entry(e, decompress), f) os.chmod(entry_path, e.flags)
Extract the entire MAR file into a directory. Args: destdir (str): A local directory on disk into which the contents of this MAR file will be extracted. Required parent directories will be created as necessary. decompress (obj, optional): Controls whether files are decompressed when extracted. Must be one of 'auto' or None. Defaults to 'auto'.
def remove_index(self, index): query = [] remainder = [] for i in range(0, len(self.pieces), 2): const = self.pieces[i] if const.hash_field == index.hash_key: query.append(const) elif index.range_key is not None and const.range_field == index.range_key: query.append(const) else: remainder.append(const) if len(query) == 1: query_constraints = query[0] else: query_constraints = Conjunction.and_(query) if not remainder: filter_constraints = None elif len(remainder) == 1: filter_constraints = remainder[0] else: filter_constraints = Conjunction.and_(remainder) return (query_constraints, filter_constraints)
This one takes some explanation. When we do a query with a WHERE statement, it may end up being a scan and it may end up being a query. If it is a query, we need to remove the hash and range key constraints from the expression and return that as the query_constraints. The remaining constraints, if any, are returned as the filter_constraints.
def mark_error(self, dispatch, error_log, message_cls): if message_cls.send_retry_limit is not None and (dispatch.retry_count + 1) >= message_cls.send_retry_limit: self.mark_failed(dispatch, error_log) else: dispatch.error_log = error_log self._st['error'].append(dispatch)
Marks a dispatch as having error or consequently as failed if send retry limit for that message type is exhausted. Should be used within send(). :param Dispatch dispatch: a Dispatch :param str error_log: error message :param MessageBase message_cls: MessageBase heir
def add(repo_path, dest_path): mkcfgdir() try: repo = getrepohandler(repo_path) except NotARepo as err: echo("ERROR: {}: {}".format(ERR_NOT_A_REPO, err.repo_path)) sys.exit(1) if repo.isremote: localrepo, needpull = addfromremote(repo, dest_path) elif dest_path: raise UsageError("DEST_PATH is only for repos hosted online") else: try: repoid = repo.getrepoid() except RepoHasNoCommitsError as err: echo("ERROR: {}".format(ERR_NO_COMMITS)) sys.exit(1) localrepo = RepoInfo(repo, repoid, None) needpull = False if not localrepo: return with saveconfig(RepoListConfig()) as cfg: cfg.add_repo(localrepo) success = run_update([localrepo], pullfirst=needpull, cancleanup=True) if not success: sys.exit(1)
Registers a git repository with homely so that it will run its `HOMELY.py` script on each invocation of `homely update`. `homely add` also immediately executes a `homely update` so that the dotfiles are installed straight away. If the git repository is hosted online, a local clone will be created first. REPO_PATH A path to a local git repository, or the URL for a git repository hosted online. If REPO_PATH is a URL, then it should be in a format accepted by `git clone`. If REPO_PATH is a URL, you may also specify DEST_PATH. DEST_PATH If REPO_PATH is a URL, then the local clone will be created at DEST_PATH. If DEST_PATH is omitted then the path to the local clone will be automatically derived from REPO_PATH.
def _to_java_object_rdd(rdd): rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer())) return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
Return an JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not.
def line_oriented(cls, line_oriented_options, console): if type(line_oriented_options) != cls.Options: raise AssertionError( 'Expected Options for `{}`, got: {}'.format(cls.__name__, line_oriented_options)) output_file = line_oriented_options.values.output_file sep = line_oriented_options.values.sep.encode('utf-8').decode('unicode_escape') stdout, stderr = console.stdout, console.stderr if output_file: stdout = open(output_file, 'w') try: print_stdout = lambda msg: print(msg, file=stdout, end=sep) print_stderr = lambda msg: print(msg, file=stderr) yield print_stdout, print_stderr finally: if output_file: stdout.close() else: stdout.flush() stderr.flush()
Given Goal.Options and a Console, yields functions for writing to stdout and stderr, respectively. The passed options instance will generally be the `Goal.Options` of a `LineOriented` `Goal`.
def get_type_id(context, **kw): portal_type = kw.get("portal_type", None) if portal_type: return portal_type if IAnalysisRequestPartition.providedBy(context): return "AnalysisRequestPartition" elif IAnalysisRequestRetest.providedBy(context): return "AnalysisRequestRetest" elif IAnalysisRequestSecondary.providedBy(context): return "AnalysisRequestSecondary" return api.get_portal_type(context)
Returns the type id for the context passed in
def diff(self): done = set(self.done) return [name for name in self.todo if name not in done]
Calculate difference between fs and db.
def _ssweek_of_month(date_value): "0-starting index which Sundaystarting-week in the month this date is" weekday_of_first = (date_value.replace(day=1).weekday() + 1) % 7 return (date_value.day + weekday_of_first - 1) // 7
0-starting index which Sundaystarting-week in the month this date is
def get_container_streaming_uri(self, container): resp, resp_body = self.api.cdn_request("/%s" % utils.get_name(container), method="HEAD") return resp.headers.get("x-cdn-streaming-uri")
Returns the URI for streaming content, or None if CDN is not enabled.
def seektime(self, disk): args = { 'disk': disk, } self._seektime_chk.check(args) return self._client.json("disk.seektime", args)
Gives seek latency on disk which is a very good indication to the `type` of the disk. it's a very good way to verify if the underlying disk type is SSD or HDD :param disk: disk path or name (/dev/sda, or sda) :return: a dict as follows {'device': '<device-path>', 'elapsed': <seek-time in us', 'type': '<SSD or HDD>'}
def _process_marked_candidate_indexes(candidate, markers): match = RE_SIGNATURE_CANDIDATE.match(markers[::-1]) return candidate[-match.end('candidate'):] if match else []
Run regexes against candidate's marked indexes to strip signature candidate. >>> _process_marked_candidate_indexes([9, 12, 14, 15, 17], 'clddc') [15, 17]
def escape(self, text): return self.__escapable.sub(self.__escape, compat.text_type(text) ).encode('ascii')
Replace characters with their character references. Replace characters by their named entity references. Non-ASCII characters, if they do not have a named entity reference, are replaced by numerical character references. The return value is guaranteed to be ASCII.
def read_contents(self, schema, name, conn): sql = log = get_logger() cur = conn.cursor() cur.execute(sql, [schema, name]) columns = cur.fetchall() for column in columns: column_dict = {'name': column[0], 'type': column[1], 'comment': column[2]} log.debug('{} {}: {}'.format(column[0], column[1], column[2])) self.contents.append(copy.deepcopy(column_dict)) cur.close()
Read table columns
def CheckGlobalStatic(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) if (match and not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def _clear_surface(self, surface, rect=None): clear_color = self._rgb_clear_color if self._clear_color is None else self._clear_color surface.fill(clear_color, rect)
Clear the buffer, taking in account colorkey or alpha :return:
def html(self, unicode=False): html = lxml.html.tostring(self.element, encoding=self.encoding) if unicode: html = html.decode(self.encoding) return html
Return HTML of element
def read_cyclic_can_msg(self, channel, count): c_channel = BYTE(channel) c_can_msg = (CanMsg * count)() c_count = DWORD(count) UcanReadCyclicCanMsg(self._handle, byref(c_channel), c_can_msg, c_count) return c_can_msg[:c_count.value]
Reads back the list of CAN messages for automatically sending. :param int channel: CAN channel, to be used (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param int count: The number of cyclic CAN messages to be received. :return: List of received CAN messages (up to 16, see structure :class:`CanMsg`). :rtype: list(CanMsg)
def protect_api(uuid=None, **kwargs): bucket, version_id, key = uuid.split(':', 2) g.obj = ObjectResource.get_object(bucket, key, version_id) return g.obj
Retrieve object and check permissions. Retrieve ObjectVersion of image being requested and check permission using the Invenio-Files-REST permission factory.
def clean(self,x): return x[~np.any(np.isnan(x) | np.isinf(x),axis=1)]
remove nan and inf rows from x
def load_tab_data(self): for tab in self._tabs.values(): if tab.load and not tab.data_loaded: try: tab._data = tab.get_context_data(self.request) except Exception: tab._data = False exceptions.handle(self.request)
Preload all data that for the tabs that will be displayed.
def get(self, id): with rconnect() as conn: if id is None: raise ValueError if isinstance(id, uuid.UUID): id = str(id) if type(id) != str and type(id) != unicode: raise ValueError try: query = self._base().get(id) log.debug(query) rv = query.run(conn) except ReqlOpFailedError as e: log.warn(e) raise except Exception as e: log.warn(e) raise if rv is not None: return self._model(rv) return None
Get a single instance by pk id. :param id: The UUID of the instance you want to retrieve.
def _build_search_query(self, from_date): sort = [{self._sort_on_field: {"order": "asc"}}] filters = [] if self._repo: filters.append({"term": {"origin": self._repo}}) if from_date: filters.append({"range": {self._sort_on_field: {"gte": from_date}}}) if filters: query = {"bool": {"filter": filters}} else: query = {"match_all": {}} search_query = { "query": query, "sort": sort } return search_query
Build an ElasticSearch search query to retrieve items for read methods. :param from_date: date to start retrieving items from. :return: JSON query in dict format
def get_modified_items(self, target, path, last_modified_cutoff): file_list = self.list_files(target, path) out_dict = {} for device_id, device_data in six.iteritems(file_list): if isinstance(device_data, ErrorInfo): out_dict[device_id] = device_data else: files = [] dirs = [] for cur_file in device_data.files: if cur_file.last_modified > last_modified_cutoff: files.append(cur_file) for cur_dir in device_data.directories: if cur_dir.last_modified > last_modified_cutoff: dirs.append(cur_dir) out_dict[device_id] = LsInfo(directories=dirs, files=files) return out_dict
Get all files and directories from a path on the device modified since a given time :param target: The device(s) to be targeted with this request :type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances :param path: The path on the target to the directory to check for modified files. :param last_modified_cutoff: The time (as Unix epoch time) to get files modified since :type last_modified_cutoff: int :return: A dictionary where the key is a device id and the value is either an :class:`~.ErrorInfo` if there was a problem with the operation or a :class:`~.LsInfo` with the items modified since the specified date
def _setup(self): context_module = os.environ.get(ENVIRONMENT_CONTEXT_VARIABLE, 'context') if not context_module: raise ImproperlyConfigured( 'Requested context points to an empty variable. ' 'You must either define the environment variable {0} ' 'or call context.configure() before accessing the context.' .format(ENVIRONMENT_CONTEXT_VARIABLE)) self._wrapped = Settings(context_module, default_settings=global_context)
Load the context module pointed to by the environment variable. This is used the first time we need the context at all, if the user has not previously configured the context manually.
def _get_snpeff_cmd(cmd_name, datadir, data, out_file): resources = config_utils.get_resources("snpeff", data["config"]) jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3g"]) jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust": {"direction": "increase", "maximum": "30000M", "magnitude": max(2, dd.get_cores(data))}}}) memory = " ".join(jvm_opts) snpeff = config_utils.get_program("snpEff", data["config"]) java_args = "-Djava.io.tmpdir=%s" % utils.safe_makedir(os.path.join(os.path.dirname(out_file), "tmp")) export = "unset JAVA_HOME && export PATH=%s:\"$PATH\" && " % (utils.get_java_binpath()) cmd = "{export} {snpeff} {memory} {java_args} {cmd_name} -dataDir {datadir}" return cmd.format(**locals())
Retrieve snpEff base command line.
def validate(self): for schema in (self.headers_schema, Message.headers_schema): _log.debug( 'Validating message headers "%r" with schema "%r"', self._headers, schema, ) jsonschema.validate(self._headers, schema) for schema in (self.body_schema, Message.body_schema): _log.debug( 'Validating message body "%r" with schema "%r"', self.body, schema ) jsonschema.validate(self.body, schema)
Validate the headers and body with the message schema, if any. In addition to the user-provided schema, all messages are checked against the base schema which requires certain message headers and the that body be a JSON object. .. warning:: This method should not be overridden by sub-classes. Raises: jsonschema.ValidationError: If either the message headers or the message body are invalid. jsonschema.SchemaError: If either the message header schema or the message body schema are invalid.
def enable(name): cmd = 'systemctl enable systemd-nspawn@{0}'.format(name) if __salt__['cmd.retcode'](cmd, python_shell=False) != 0: __context__['retcode'] = salt.defaults.exitcodes.EX_UNAVAILABLE return False return True
Set the named container to be launched at boot CLI Example: .. code-block:: bash salt myminion nspawn.enable <name>
def _merge_ovls(self, ovls): ret = reduce(lambda x, y: x.merge(y), ovls) ret.value = self.value(ovls=ovls) ret.set_props(self.props) return ret
Merge ovls and also setup the value and props.
def to_vobject(self, filename=None, uid=None): self._update() cal = iCalendar() if uid: self._gen_vevent(self._reminders[filename][uid], cal.add('vevent')) elif filename: for event in self._reminders[filename].values(): self._gen_vevent(event, cal.add('vevent')) else: for filename in self._reminders: for event in self._reminders[filename].values(): self._gen_vevent(event, cal.add('vevent')) return cal
Return iCal object of Remind lines If filename and UID are specified, the vObject only contains that event. If only a filename is specified, the vObject contains all events in the file. Otherwise the vObject contains all all objects of all files associated with the Remind object. filename -- the remind file uid -- the UID of the Remind line
def links( self, page: 'WikipediaPage', **kwargs ) -> PagesDict: params = { 'action': 'query', 'prop': 'links', 'titles': page.title, 'pllimit': 500, } used_params = kwargs used_params.update(params) raw = self._query( page, used_params ) self._common_attributes(raw['query'], page) pages = raw['query']['pages'] for k, v in pages.items(): if k == '-1': page._attributes['pageid'] = -1 return {} else: while 'continue' in raw: params['plcontinue'] = raw['continue']['plcontinue'] raw = self._query( page, params ) v['links'] += raw['query']['pages'][k]['links'] return self._build_links(v, page) return {}
Returns links to other pages with respect to parameters API Calls for parameters: - https://www.mediawiki.org/w/api.php?action=help&modules=query%2Blinks - https://www.mediawiki.org/wiki/API:Links :param page: :class:`WikipediaPage` :param kwargs: parameters used in API call :return: links to linked pages
def set_uuid(obj, uuid=None): from uuid import uuid4, UUID if uuid is None: uuid = uuid4() elif isinstance(uuid, bytes): if len(uuid) == 16: uuid = UUID(bytes=uuid) else: uuid = UUID(hex=uuid) if "uuid" in obj.attrs: del obj.attrs["uuid"] obj.attrs.create("uuid", str(uuid).encode('ascii'), dtype="|S36")
Set the uuid attribute of an HDF5 object. Use this method to ensure correct dtype
def get_rate_from_db(currency: str) -> Decimal: from .models import ConversionRate try: rate = ConversionRate.objects.get_rate(currency) except ConversionRate.DoesNotExist: raise ValueError('No conversion rate for %s' % (currency, )) return rate.rate
Fetch currency conversion rate from the database
def resolve(self, from_email, resolution=None): if from_email is None or not isinstance(from_email, six.string_types): raise MissingFromEmail(from_email) endpoint = '/'.join((self.endpoint, self.id,)) add_headers = {'from': from_email, } data = { 'incident': { 'type': 'incident', 'status': 'resolved', } } if resolution is not None: data['resolution'] = resolution result = self.request('PUT', endpoint=endpoint, add_headers=add_headers, data=data,) return result
Resolve an incident using a valid email address.
def render(self, content_state=None): if content_state is None: content_state = {} blocks = content_state.get('blocks', []) wrapper_state = WrapperState(self.block_map, blocks) document = DOM.create_element() entity_map = content_state.get('entityMap', {}) min_depth = 0 for block in blocks: depth = block['depth'] elt = self.render_block(block, entity_map, wrapper_state) if depth > min_depth: min_depth = depth if depth == 0: DOM.append_child(document, elt) if min_depth > 0 and wrapper_state.stack.length() != 0: DOM.append_child(document, wrapper_state.stack.tail().elt) return DOM.render(document)
Starts the export process on a given piece of content state.
def expireat(self, key, timestamp): if isinstance(timestamp, float): return self.pexpireat(key, int(timestamp * 1000)) if not isinstance(timestamp, int): raise TypeError("timestamp argument must be int, not {!r}" .format(timestamp)) fut = self.execute(b'EXPIREAT', key, timestamp) return wait_convert(fut, bool)
Set expire timestamp on a key. if timeout is float it will be multiplied by 1000 coerced to int and passed to `pexpireat` method. Otherwise raises TypeError if timestamp argument is not int.
def context_chunk(self, context, j): N_chunks = len(self.contexts[context]) start = self.contexts[context][j] if j == N_chunks - 1: end = len(self) else: end = self.contexts[context][j+1] return [self[i] for i in xrange(start, end)]
Retrieve the tokens in the ``j``th chunk of context ``context``. Parameters ---------- context : str Context name. j : int Index of a context chunk. Returns ------- chunk : list List of tokens in the selected chunk.
def create_widget(self, place, type, file=None, **kwargs): widget_class = self.widget_types.get(type, self.widget_types['base']) kwargs.update(place=place, type=type) try: element = widget_class(**kwargs) except TypeError as e: message = e.args[0] if e.args else '' if ( 'unexpected keyword argument' in message or 'required positional argument' in message ): raise WidgetParameterException( 'type %s; %s; available: %r' % (type, message, widget_class._fields) ) raise e if file and any(map(callable, element)): return self._resolve_widget(file, element) return element
Create a widget object based on given arguments. If file object is provided, callable arguments will be resolved: its return value will be used after calling them with file as first parameter. All extra `kwargs` parameters will be passed to widget constructor. :param place: place hint where widget should be shown. :type place: str :param type: widget type name as taken from :attr:`widget_types` dict keys. :type type: str :param file: optional file object for widget attribute resolving :type type: browsepy.files.Node or None :returns: widget instance :rtype: object
def has_zero_length_fragments(self, min_index=None, max_index=None): min_index, max_index = self._check_min_max_indices(min_index, max_index) zero = [i for i in range(min_index, max_index) if self[i].has_zero_length] self.log([u"Fragments with zero length: %s", zero]) return (len(zero) > 0)
Return ``True`` if the list has at least one interval with zero length withing ``min_index`` and ``max_index``. If the latter are not specified, check all intervals. :param int min_index: examine fragments with index greater than or equal to this index (i.e., included) :param int max_index: examine fragments with index smaller than this index (i.e., excluded) :raises ValueError: if ``min_index`` is negative or ``max_index`` is bigger than the current number of fragments :rtype: bool
def list_occupied_adb_ports(): out = AdbProxy().forward('--list') clean_lines = str(out, 'utf-8').strip().split('\n') used_ports = [] for line in clean_lines: tokens = line.split(' tcp:') if len(tokens) != 3: continue used_ports.append(int(tokens[1])) return used_ports
Lists all the host ports occupied by adb forward. This is useful because adb will silently override the binding if an attempt to bind to a port already used by adb was made, instead of throwing binding error. So one should always check what ports adb is using before trying to bind to a port with adb. Returns: A list of integers representing occupied host ports.
def grad(self, params, epsilon=0.0001): grad = [] for x in range(len(params)): temp = np.copy(params) temp[x] += epsilon temp2 = np.copy(params) temp2[x] -= epsilon grad.append((self.__cost_function(temp)-self.__cost_function(temp2))/(2*epsilon)) return np.array(grad)
Used to check gradient estimation through slope approximation.
def dict_from_hdf5(dict_like, h5group): for name, value in h5group.attrs.items(): dict_like[name] = value
Load a dictionnary-like object from a h5 file group
def write(self, fptr): self._validate(writing=True) num_components = len(self.association) fptr.write(struct.pack('>I4s', 8 + 2 + num_components * 6, b'cdef')) fptr.write(struct.pack('>H', num_components)) for j in range(num_components): fptr.write(struct.pack('>' + 'H' * 3, self.index[j], self.channel_type[j], self.association[j]))
Write a channel definition box to file.
def set_neighbor_out_filter(neigh_ip_address, filters): core = CORE_MANAGER.get_core_service() peer = core.peer_manager.get_by_addr(neigh_ip_address) peer.out_filters = filters return True
Sets the out_filter of a neighbor.
def center(self): image_center = Point(self.width / 2, self.height / 2) return self.to_world(image_center)
Return footprint center in world coordinates, as GeoVector.
def replaceSelectedText(self, text: str): undoObj = UndoReplaceSelectedText(self, text) self.qteUndoStack.push(undoObj)
Undo safe wrapper for the native ``replaceSelectedText`` method. |Args| * ``text`` (**str**): text to replace the current selection. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
async def start_pairing(self): self.srp.initialize() msg = messages.crypto_pairing({ tlv8.TLV_METHOD: b'\x00', tlv8.TLV_SEQ_NO: b'\x01'}) resp = await self.protocol.send_and_receive( msg, generate_identifier=False) pairing_data = _get_pairing_data(resp) if tlv8.TLV_BACK_OFF in pairing_data: time = int.from_bytes( pairing_data[tlv8.TLV_BACK_OFF], byteorder='big') raise Exception('back off {0}s'.format(time)) self._atv_salt = pairing_data[tlv8.TLV_SALT] self._atv_pub_key = pairing_data[tlv8.TLV_PUBLIC_KEY]
Start pairing procedure.
def package_existent(name): try: response = requests.get(PYPI_URL.format(name)) if response.ok: msg = ('[error] "{0}" is registered already in PyPI.\n' '\tSpecify another package name.').format(name) raise Conflict(msg) except (socket.gaierror, Timeout, ConnectionError, HTTPError) as exc: raise BackendFailure(exc)
Search package. * :class:`bootstrap_py.exceptions.Conflict` exception occurs when user specified name has already existed. * :class:`bootstrap_py.exceptions.BackendFailure` exception occurs when PyPI service is down. :param str name: package name
def callback(status, message, job, result, exception, stacktrace): assert status in ['invalid', 'success', 'timeout', 'failure'] assert isinstance(message, Message) if status == 'invalid': assert job is None assert result is None assert exception is None assert stacktrace is None if status == 'success': assert isinstance(job, Job) assert exception is None assert stacktrace is None elif status == 'timeout': assert isinstance(job, Job) assert result is None assert exception is None assert stacktrace is None elif status == 'failure': assert isinstance(job, Job) assert result is None assert exception is not None assert stacktrace is not None
Example callback function. :param status: Job status. Possible values are "invalid" (job could not be deserialized or was malformed), "failure" (job raised an exception), "timeout" (job timed out), or "success" (job finished successfully and returned a result). :type status: str :param message: Kafka message. :type message: kq.Message :param job: Job object, or None if **status** was "invalid". :type job: kq.Job :param result: Job result, or None if an exception was raised. :type result: object | None :param exception: Exception raised by job, or None if there was none. :type exception: Exception | None :param stacktrace: Exception traceback, or None if there was none. :type stacktrace: str | None
def dict_pick(dictionary, allowed_keys): return {key: value for key, value in viewitems(dictionary) if key in allowed_keys}
Return a dictionary only with keys found in `allowed_keys`
def build_calmjs_artifacts(dist, key, value, cmdclass=BuildCommand): if value is not True: return build_cmd = dist.get_command_obj('build') if not isinstance(build_cmd, cmdclass): logger.error( "'build' command in Distribution is not an instance of " "'%s:%s' (got %r instead)", cmdclass.__module__, cmdclass.__name__, build_cmd) return build_cmd.sub_commands.append((key, has_calmjs_artifact_declarations))
Trigger the artifact build process through the setuptools.
def _connect_kubectl(spec): return { 'method': 'kubectl', 'kwargs': { 'pod': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'kubectl_path': spec.mitogen_kubectl_path(), 'kubectl_args': spec.extra_args(), 'remote_name': get_remote_name(spec), } }
Return ContextService arguments for a Kubernetes connection.
def fmt_ces(c, title=None): if not c: return '()\n' if title is None: title = 'Cause-effect structure' concepts = '\n'.join(margin(x) for x in c) + '\n' title = '{} ({} concept{})'.format( title, len(c), '' if len(c) == 1 else 's') return header(title, concepts, HEADER_BAR_1, HEADER_BAR_1)
Format a |CauseEffectStructure|.
def _system(self, *args, **kwargs): sysinfo = SysInfo(__grains__.get("kernel")) data = dict() data['cpu'] = sysinfo._get_cpu() data['disks'] = sysinfo._get_fs() data['mounts'] = sysinfo._get_mounts() data['memory'] = sysinfo._get_mem() data['network'] = sysinfo._get_network() data['os'] = sysinfo._get_os() return data
This basically calls grains items and picks out only necessary information in a certain structure. :param args: :param kwargs: :return:
def change_password(self, password): self.make_request( ModificationFailed, method='update', resource='change_password', params={'password': password})
Change user password. Change is committed immediately. :param str password: new password :return: None
def format_raw_field(key): subfield = django_settings.WALDUR_CORE.get('ELASTICSEARCH', {}).get('raw_subfield', 'keyword') return '%s.%s' % (camel_case_to_underscore(key), subfield)
When ElasticSearch analyzes string, it breaks it into parts. In order make query for not-analyzed exact string values, we should use subfield instead. The index template for Elasticsearch 5.0 has been changed. The subfield for string multi-fields has changed from .raw to .keyword Thus workaround for backward compatibility during migration is required. See also: https://github.com/elastic/logstash/blob/v5.4.1/docs/static/breaking-changes.asciidoc
def acosh(x, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_acosh, (BigFloat._implicit_convert(x),), context, )
Return the inverse hyperbolic cosine of x.
def add_sync_callback(self, callback_cycles, callback): self.sync_callbacks_cyles[callback] = 0 self.sync_callbacks.append([callback_cycles, callback]) if self.quickest_sync_callback_cycles is None or \ self.quickest_sync_callback_cycles > callback_cycles: self.quickest_sync_callback_cycles = callback_cycles
Add a CPU cycle triggered callback
def log_time(method): def timed(*args, **kwargs): tic = time_function() result = method(*args, **kwargs) log.debug('%s executed in %.4f seconds.', method.__name__, time_function() - tic) return result timed.__name__ = method.__name__ timed.__doc__ = method.__doc__ return timed
A decorator for methods which will time the method and then emit a log.debug message with the method name and how long it took to execute.
def paths(self): paths = [] for tree in self.components(): paths += self._single_tree_paths(tree) return paths
Assuming the skeleton is structured as a single tree, return a list of all traversal paths across all components. For each component, start from the first vertex, find the most distant vertex by hops and set that as the root. Then use depth first traversal to produce paths. Returns: [ [(x,y,z), (x,y,z), ...], path_2, path_3, ... ]
def find_value_in_object(attr, obj): if isinstance(obj, (collections.Iterator, list)): for item in obj: yield from find_value_in_object(attr, item) elif isinstance(obj, collections.Mapping): if attr in obj: if isinstance(obj[attr], (collections.Iterator, list)): for item in obj[attr]: yield item else: yield obj[attr] for item in obj.values(): if item: yield from find_value_in_object(attr, item)
Return values for any key coincidence with attr in obj or any other nested dict.
async def fetching_data(self, *_): try: with async_timeout.timeout(10): resp = await self._websession.get(self._api_url, params=self._urlparams) if resp.status != 200: _LOGGER.error('%s returned %s', self._api_url, resp.status) return False text = await resp.text() except (asyncio.TimeoutError, aiohttp.ClientError) as err: _LOGGER.error('%s returned %s', self._api_url, err) return False try: self.data = xmltodict.parse(text)['weatherdata'] except (ExpatError, IndexError) as err: _LOGGER.error('%s returned %s', resp.url, err) return False return True
Get the latest data from met.no.
def restart(self): yield from self.manager.query("POST", "containers/{}/restart".format(self._cid)) log.info("Docker container '{name}' [{image}] restarted".format( name=self._name, image=self._image))
Restart this Docker container.
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None): launch_params = dict(command_line=command_line, job_id=self.job_id) submit_params_dict = submit_params(self.destination_params) if submit_params_dict: launch_params['params'] = json_dumps(submit_params_dict) if dependencies_description: launch_params['dependencies_description'] = json_dumps(dependencies_description.to_dict()) if env: launch_params['env'] = json_dumps(env) if remote_staging: launch_params['remote_staging'] = json_dumps(remote_staging) if job_config and 'touch_outputs' in job_config: launch_params['submit_extras'] = json_dumps({'touch_outputs': job_config['touch_outputs']}) if job_config and self.setup_handler.local: setup_params = _setup_params_from_job_config(job_config) launch_params['setup_params'] = json_dumps(setup_params) return self._raw_execute("submit", launch_params)
Queue up the execution of the supplied `command_line` on the remote server. Called launch for historical reasons, should be renamed to enqueue or something like that. **Parameters** command_line : str Command to execute.
def matching_tokens(self, text, start=0): for token_class, regexp in self._tokens: match = regexp.match(text, pos=start) if match: yield token_class, match
Retrieve all token definitions matching the beginning of a text. Args: text (str): the text to test start (int): the position where matches should be searched in the string (see re.match(rx, txt, pos)) Yields: (token_class, re.Match): all token class whose regexp matches the text, and the related re.Match object.
def calculate_boundaries(fine_states, full_magnetic_states): r N_magnetic = len(full_magnetic_states) fq = full_magnetic_states[0].quantum_numbers[:4] index_list_fine = []; start_fine = 0 hq = full_magnetic_states[0].quantum_numbers[:5] index_list_hyperfine = []; start_hyperfine = 0 for i in range(N_magnetic): magnetic = full_magnetic_states[i] if magnetic.quantum_numbers[:4] != fq: index_list_fine += [(start_fine, i)] start_fine = i fq = magnetic.quantum_numbers[:4] if magnetic.quantum_numbers[:5] != hq: index_list_hyperfine += [(start_hyperfine, i)] start_hyperfine = i hq = magnetic.quantum_numbers[:5] if i == N_magnetic-1: index_list_fine += [(start_fine, i+1)] index_list_hyperfine += [(start_hyperfine, i+1)] return index_list_fine, index_list_hyperfine
r"""Calculate the boundary indices within a list of magnetic states. This function calculates the boundary indices of each fine state and each hyperfine state within a list of magnetic states. The output is a list of tuples (a,b) with a the starting index of a state and b it's ending index. >>> g=State("Rb", 87, 5, 0, 1/Integer(2)) >>> full_magnetic_states=make_list_of_states([g],"magnetic") >>> calculate_boundaries([g], full_magnetic_states) ([(0, 8)], [(0, 3), (3, 8)])
def merge(metric_kind, prior, latest): prior_type, _ = _detect_value(prior) latest_type, _ = _detect_value(latest) if prior_type != latest_type: _logger.warn(u'Metric values are not compatible: %s, %s', prior, latest) raise ValueError(u'Incompatible delta metric values') if prior_type is None: _logger.warn(u'Bad metric values, types not known for : %s, %s', prior, latest) raise ValueError(u'Unsupported delta metric types') if metric_kind == MetricKind.DELTA: return _merge_delta_metric(prior, latest) else: return _merge_cumulative_or_gauge_metrics(prior, latest)
Merges `prior` and `latest` Args: metric_kind (:class:`MetricKind`): indicates the kind of metrics being merged prior (:class:`MetricValue`): an prior instance of the metric latest (:class:`MetricValue`: the latest instance of the metric
def handle_offchain_secretreveal( initiator_state: InitiatorTransferState, state_change: ReceiveSecretReveal, channel_state: NettingChannelState, pseudo_random_generator: random.Random, ) -> TransitionResult[InitiatorTransferState]: iteration: TransitionResult[InitiatorTransferState] valid_reveal = is_valid_secret_reveal( state_change=state_change, transfer_secrethash=initiator_state.transfer_description.secrethash, secret=state_change.secret, ) sent_by_partner = state_change.sender == channel_state.partner_state.address is_channel_open = channel.get_status(channel_state) == CHANNEL_STATE_OPENED if valid_reveal and is_channel_open and sent_by_partner: events = events_for_unlock_lock( initiator_state=initiator_state, channel_state=channel_state, secret=state_change.secret, secrethash=state_change.secrethash, pseudo_random_generator=pseudo_random_generator, ) iteration = TransitionResult(None, events) else: events = list() iteration = TransitionResult(initiator_state, events) return iteration
Once the next hop proves it knows the secret, the initiator can unlock the mediated transfer. This will validate the secret, and if valid a new balance proof is sent to the next hop with the current lock removed from the merkle tree and the transferred amount updated.
def update(self, group: 'SentenceGroup', flags: Flags) -> None: to_append = [] for old, new in zip_longest(self.sentences, group.sentences): if old is None: old = Sentence() to_append.append(old) if new is None: new = Sentence() old.update(new, flags) self.sentences.extend(to_append)
This object is considered to be a "global" sentence group while the other one is flags-specific. All data related to the specified flags will be overwritten by the content of the specified group.
def splot(axes="gca", smoothing=5000, degree=5, presmoothing=0, plot=True, spline_class=spline_single, interactive=True, show_derivative=1): if axes=="gca": axes = _pylab.gca() xlabel = axes.xaxis.label.get_text() ylabel = axes.yaxis.label.get_text() xdata = axes.get_lines()[0].get_xdata() ydata = axes.get_lines()[0].get_ydata() if interactive: return splinteractive(xdata, ydata, smoothing, degree, presmoothing, spline_class, xlabel, ylabel) else: return spline_class(xdata, ydata, smoothing, degree, presmoothing, plot, xlabel, ylabel)
gets the data from the plot and feeds it into splint returns an instance of spline_single axes="gca" which axes to get the data from. smoothing=5000 spline_single smoothing parameter presmoothing=0 spline_single data presmoothing factor (nearest neighbor) plot=True should we plot the result? spline_class=spline_single which data class to use? interactive=False should we spline fit interactively or just make a spline_single?
def check_keys_split(self, decoded): bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys = ", ".join(bad_keys) raise ValueError("JSON data had unexpected key(s): {bad_keys}" .format(bad_keys=pprint_thing(bad_keys)))
Checks that dict has only the appropriate keys for orient='split'.
def _language_exclusions(stem: LanguageStemRange, exclusions: List[ShExDocParser.LanguageExclusionContext]) -> None: for excl in exclusions: excl_langtag = LANGTAG(excl.LANGTAG().getText()[1:]) stem.exclusions.append(LanguageStem(excl_langtag) if excl.STEM_MARK() else excl_langtag)
languageExclusion = '-' LANGTAG STEM_MARK?
def safe_send(self, connection, target, message, *args, **kwargs): prefix = "PRIVMSG {0} :".format(target) max_len = 510 - len(prefix) for chunk in chunks(message.format(*args, **kwargs), max_len): connection.send_raw("{0}{1}".format(prefix, chunk))
Safely sends a message to the given target
def write_and_quit_all(editor): eb = editor.window_arrangement.active_editor_buffer if eb.location is None: editor.show_message(_NO_FILE_NAME) else: eb.write() quit(editor, all_=True, force=False)
Write current buffer and quit all.
def getcwd(cls): if not hasattr(cls._tl, "cwd"): cls._tl.cwd = os.getcwd() return cls._tl.cwd
Provide a context dependent current working directory. This method will return the directory currently holding the lock.
def print_event(attributes=[]): def python_callback(event): cls_name = event.__class__.__name__ attrs = ', '.join(['{attr}={val}'.format(attr=attr, val=event.__dict__[attr]) for attr in attributes]) print('{cls_name}({attrs})'.format(cls_name=cls_name, attrs=attrs)) return python_callback
Function that returns a Python callback to pretty print the events.
def spinn5_fpga_link(x, y, link, root_x=0, root_y=0): x, y = spinn5_chip_coord(x, y, root_x, root_y) return SPINN5_FPGA_LINKS.get((x, y, link))
Get the identity of the FPGA link which corresponds with the supplied link. .. note:: This function assumes the system is constructed from SpiNN-5 boards whose FPGAs are loaded with the SpI/O 'spinnaker_fpgas' image. Parameters ---------- x, y : int The chip whose link is of interest. link : :py:class:`~rig.links.Link` The link of interest. root_x, root_y : int The coordinates of the root chip (i.e. the chip used to boot the machine), e.g. from :py:attr:`rig.machine_control.MachineController.root_chip`. Returns ------- (fpga_num, link_num) or None If not None, the link supplied passes through an FPGA link. The returned tuple indicates the FPGA responsible for the sending-side of the link. `fpga_num` is the number (0, 1 or 2) of the FPGA responsible for the link. `link_num` indicates which of the sixteen SpiNNaker links (0 to 15) into an FPGA is being used. Links 0-7 are typically handled by S-ATA link 0 and 8-15 are handled by S-ATA link 1. Returns None if the supplied link does not pass through an FPGA.
def get_book_ids_by_comment(self, comment_id): mgr = self._get_provider_manager('COMMENTING', local=True) lookup_session = mgr.get_comment_lookup_session(proxy=self._proxy) lookup_session.use_federated_book_view() comment = lookup_session.get_comment(comment_id) id_list = [] for idstr in comment._my_map['assignedBookIds']: id_list.append(Id(idstr)) return IdList(id_list)
Gets the list of ``Book`` ``Ids`` mapped to a ``Comment``. arg: comment_id (osid.id.Id): ``Id`` of a ``Comment`` return: (osid.id.IdList) - list of book ``Ids`` raise: NotFound - ``comment_id`` is not found raise: NullArgument - ``comment_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def read_until(self, delimiter: bytes, max_bytes: int = None) -> Awaitable[bytes]: future = self._start_read() self._read_delimiter = delimiter self._read_max_bytes = max_bytes try: self._try_inline_read() except UnsatisfiableReadError as e: gen_log.info("Unsatisfiable read, closing connection: %s" % e) self.close(exc_info=e) return future except: future.add_done_callback(lambda f: f.exception()) raise return future
Asynchronously read until we have found the given delimiter. The result includes all the data read including the delimiter. If ``max_bytes`` is not None, the connection will be closed if more than ``max_bytes`` bytes have been read and the delimiter is not found. .. versionchanged:: 4.0 Added the ``max_bytes`` argument. The ``callback`` argument is now optional and a `.Future` will be returned if it is omitted. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead.
def nd_load_and_stats(filenames, base_path=BASEPATH): nds = [] for filename in filenames: try: nd_load = results.load_nd_from_pickle(filename= os.path.join(base_path, 'grids', filename)) nds.append(nd_load) except: print("File {mvgd} not found. It was maybe excluded by Ding0 or " "just forgotten to generate by you...".format(mvgd=filename)) nd = nds[0] for n in nds[1:]: nd.add_mv_grid_district(n._mv_grid_districts[0]) stats = results.calculate_mvgd_stats(nd) return stats
Load multiple files from disk and generate stats Passes the list of files assuming the ding0 data structure as default in :code:`~/.ding0`. Data will be concatenated and key indicators for each grid district are returned in table and graphic format. Parameters ---------- filenames : list of str Provide list of files you want to analyze base_path : str Root directory of Ding0 data structure, i.e. '~/.ding0' (which is default). Returns ------- stats : pandas.DataFrame Statistics of each MV grid districts