code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def load_config(fname: str) -> ModelConfig: config = ModelConfig.load(fname) logger.info('ModelConfig loaded from "%s"', fname) return cast(ModelConfig, config)
Loads model configuration. :param fname: Path to load model configuration from. :return: Model configuration.
def parse_connection_string(self, connection): if connection == 'c2s_tls': return CONNECTION_XMPP, True, False elif connection == 'c2s_compressed_tls': return CONNECTION_XMPP, True, True elif connection == 'http_bind': return CONNECTION_HTTP_BINDING, None, None elif connection == 'c2s': return CONNECTION_XMPP, False, False log.warn('Could not parse connection string "%s"', connection) return CONNECTION_UNKNOWN, True, True
Parse string as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. >>> EjabberdBackendBase().parse_connection_string('c2s_tls') (0, True, False) >>> EjabberdBackendBase().parse_connection_string('c2s_compressed_tls') (0, True, True) >>> EjabberdBackendBase().parse_connection_string('http_bind') (2, None, None) :param connection: The connection string as returned by the ejabberd APIs. :type connection: str :return: A tuple representing the conntion type, if it is encrypted and if it uses XMPP stream compression. :rtype: tuple
def _str(self, name, val): s = '' v = Value(val) if name: logger.debug("{} is type {}".format(name, v.typename)) try: count = len(val) s = "{} ({}) = {}".format(name, count, v.string_value()) except (TypeError, KeyError, AttributeError) as e: logger.info(e, exc_info=True) s = "{} = {}".format(name, v.string_value()) else: s = v.string_value() return s
return a string version of name = val that can be printed example -- _str('foo', 'bar') # foo = bar name -- string -- the variable name that was passed into one of the public methods val -- mixed -- the variable at name's value return -- string
def get_creation_date_tags(url, domain, as_dicts=False): creation_date_tags = [ mementoweb_api_tags(url), get_whois_tags(domain), ] creation_date_tags = sorted( sum(creation_date_tags, []), key=lambda x: x.date ) if not as_dicts: return creation_date_tags return [ item._as_dict() for item in creation_date_tags ]
Put together all data sources in this module and return it's output. Args: url (str): URL of the web. With relative paths and so on. domain (str): Just the domain of the web. as_dicts (bool, default False): Convert output to dictionaries compatible with :class:`.SourceString`? Returns: list: Sorted list of :class:`TimeResource` objects or dicts.
def _write_migration(self, creator, name, table, create, path): file_ = os.path.basename(creator.create(name, path, table, create)) return file_
Write the migration file to disk.
def is_subfeature_of (parent_property, f): if __debug__: from .property import Property assert isinstance(parent_property, Property) assert isinstance(f, Feature) if not f.subfeature: return False p = f.parent if not p: return False parent_feature = p[0] parent_value = p[1] if parent_feature != parent_property.feature: return False if parent_value and parent_value != parent_property.value: return False return True
Return true iff f is an ordinary subfeature of the parent_property's feature, or if f is a subfeature of the parent_property's feature specific to the parent_property's value.
def prepare_allseries(self, ramflag: bool = True) -> None: for element in printtools.progressbar(self): element.prepare_allseries(ramflag)
Call method |Element.prepare_allseries| of all handled |Element| objects.
def format_image(path, options): image = Image.open(path) image_pipeline_results = __pipeline_image(image, options) return image_pipeline_results
Formats an image. Args: path (str): Path to the image file. options (dict): Options to apply to the image. Returns: (list) A list of PIL images. The list will always be of length 1 unless resolutions for resizing are provided in the options.
def format_assistants_lines(cls, assistants): lines = cls._format_files(assistants, 'assistants') if assistants: lines.append('') assistant = strip_prefix(random.choice(assistants), 'assistants').replace(os.path.sep, ' ').strip() if len(assistants) == 1: strings = ['After you install this DAP, you can find help about the Assistant', 'by running "da {a} -h" .'] else: strings = ['After you install this DAP, you can find help, for example about the Assistant', '"{a}", by running "da {a} -h".'] lines.extend([l.format(a=assistant) for l in strings]) return lines
Return formatted assistants from the given list in human readable form.
def _setup_params(self_,**params): self = self_.param.self params_to_instantiate = {} for class_ in classlist(type(self)): if not issubclass(class_, Parameterized): continue for (k,v) in class_.__dict__.items(): if isinstance(v,Parameter) and v.instantiate and k!="name": params_to_instantiate[k]=v for p in params_to_instantiate.values(): self.param._instantiate_param(p) for name,val in params.items(): desc = self.__class__.get_param_descriptor(name)[0] if not desc: self.param.warning("Setting non-parameter attribute %s=%s using a mechanism intended only for parameters",name,val) setattr(self,name,val)
Initialize default and keyword parameter values. First, ensures that all Parameters with 'instantiate=True' (typically used for mutable Parameters) are copied directly into each object, to ensure that there is an independent copy (to avoid suprising aliasing errors). Then sets each of the keyword arguments, warning when any of them are not defined as parameters. Constant Parameters can be set during calls to this method.
def parse_json(self, page): if not isinstance(page, basestring): page = util.decode_page(page) self.doc = json.loads(page) results = self.doc.get(self.result_name, []) if not results: self.check_status(self.doc.get('status')) return None return results
Returns json feed.
def identify_datafiles(root, extensions_to_ignore=None, directories_to_ignore=None, files_to_ignore=None): for dirpath, dirnames, filenames in walk(root): for ignore in directories_to_ignore: if ignore in dirnames: dirnames.remove(ignore) for filename in filenames: ignore = False for ext in extensions_to_ignore: if filename.endswith(ext): ignore = True if filename in files_to_ignore: ignore = True if ignore is False: yield dirpath, filename
Identify files that might contain data See function IP_verified() for details about optinoal parmeters
def transform(fields, function, *tables): "Return a new table based on other tables and a transformation function" new_table = Table(fields=fields) for table in tables: for row in filter(bool, map(lambda row: function(row, table), table)): new_table.append(row) return new_table
Return a new table based on other tables and a transformation function
def service_executions(self, name=None, pk=None, scope=None, service=None, **kwargs): request_params = { 'name': name, 'id': pk, 'service': service, 'scope': scope } if kwargs: request_params.update(**kwargs) r = self._request('GET', self._build_url('service_executions'), params=request_params) if r.status_code != requests.codes.ok: raise NotFoundError("Could not retrieve service executions") data = r.json() return [ServiceExecution(service_exeuction, client=self) for service_exeuction in data['results']]
Retrieve Service Executions. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: (optional) name to limit the search for :type name: basestring or None :param pk: (optional) primary key or id (UUID) of the service to search for :type pk: basestring or None :param scope: (optional) id (UUID) of the scope to search in :type scope: basestring or None :param service: (optional) service UUID to filter on :type service: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: a single :class:`models.ServiceExecution` object :raises NotFoundError: When no `ServiceExecution` object is found
def _parse_references(xml): references = [] ref_finder = HTMLReferenceFinder(xml) for elm, uri_attr in ref_finder: type_ = _discover_uri_type(elm.get(uri_attr)) references.append(Reference(elm, type_, uri_attr)) return references
Parse the references to ``Reference`` instances.
def unoptimize_scope(self, frame): if frame.identifiers.declared: self.writeline('%sdummy(%s)' % ( unoptimize_before_dead_code and 'if 0: ' or '', ', '.join('l_' + name for name in frame.identifiers.declared) ))
Disable Python optimizations for the frame.
def _is_autonomous(indep, exprs): if indep is None: return True for expr in exprs: try: in_there = indep in expr.free_symbols except: in_there = expr.has(indep) if in_there: return False return True
Whether the expressions for the dependent variables are autonomous. Note that the system may still behave as an autonomous system on the interface of :meth:`integrate` due to use of pre-/post-processors.
def _encode_params(kw): args = [] for k, v in kw.items(): try: qv = v.encode('utf-8') if isinstance(v, unicode) else str(v) except: qv = v args.append('%s=%s' % (k, urlquote(qv))) return '&'.join(args)
Encode parameters.
def buildingname(ddtt): idf = ddtt.theidf building = idf.idfobjects['building'.upper()][0] return building.Name
return building name
def shorten_type(typ): offset = 0 for prefix in SHORTEN_TYPE_PREFIXES: if typ.startswith(prefix): if len(prefix) > offset: offset = len(prefix) return typ[offset:]
Shorten a type. E.g. drops 'System.'
def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._iterable_class == ModelIterable: for x in self._result_cache: self._set_item_querytime(x) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects()
Completely overrides the QuerySet._fetch_all method by adding the timestamp to all objects :return: See django.db.models.query.QuerySet._fetch_all for return values
def _split_lines(self): parsed_lines = {} for rt in all_record_types: parsed_lines[rt] = [] parsed_lines[0] = [] for line in self.lines: linetype = line[0:6] if linetype in all_record_types: parsed_lines[linetype].append(line) else: parsed_lines[0].append(line) self.parsed_lines = parsed_lines self._update_structure_lines()
Creates the parsed_lines dict which keeps all record data in document order indexed by the record type.
def _expand_sequence(self, seq: List[GridQubit]) -> List[GridQubit]: i = 1 while i < len(seq): path = self._find_path_between(seq[i - 1], seq[i], set(seq)) if path: seq = seq[:i] + path + seq[i:] else: i += 1 return seq
Tries to expand given sequence with more qubits. Args: seq: Linear sequence of qubits. Returns: New continuous linear sequence which contains all the qubits from seq and possibly new qubits inserted in between.
def bounds(self, thr=0): min_lat = float("inf") min_lon = float("inf") max_lat = -float("inf") max_lon = -float("inf") for segment in self.segments: milat, milon, malat, malon = segment.bounds(thr=thr) min_lat = min(milat, min_lat) min_lon = min(milon, min_lon) max_lat = max(malat, max_lat) max_lon = max(malon, max_lon) return min_lat, min_lon, max_lat, max_lon
Gets the bounds of this segment Returns: (float, float, float, float): Bounds, with min latitude, min longitude, max latitude and max longitude
def load_configuration(self): filename = os.path.join(os.path.dirname(__file__), 'templates/spline-loc.yml.j2') with open(filename) as handle: return Adapter(safe_load(handle)).configuration
Loading configuration.
def events(self, year, simple=False, keys=False): if keys: return self._get('events/%s/keys' % year) else: return [Event(raw) for raw in self._get('events/%s%s' % (year, '/simple' if simple else ''))]
Get a list of events in a given year. :param year: Year to get events from. :param keys: Get only keys of the events rather than full data. :param simple: Get only vital data. :return: List of string event keys or Event objects.
def cublasZherk(handle, uplo, trans, n, k, alpha, A, lda, beta, C, ldc): status = _libcublas.cublasZherk_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, ctypes.byref(ctypes.c_double(beta)), int(C), ldc) cublasCheckStatus(status)
Rank-k operation on Hermitian matrix.
def _prepare_tmp_directory(self, tmp_dir): if tmp_dir: if os.path.exists(tmp_dir): raise SquashError( "The '%s' directory already exists, please remove it before you proceed" % tmp_dir) os.makedirs(tmp_dir) else: tmp_dir = tempfile.mkdtemp(prefix="docker-squash-") self.log.debug("Using %s as the temporary directory" % tmp_dir) return tmp_dir
Creates temporary directory that is used to work on layers
def strip_spaces(value, sep=None, join=True): value = value.strip() value = [v.strip() for v in value.split(sep)] join_sep = sep or ' ' return join_sep.join(value) if join else value
Cleans trailing whitespaces and replaces also multiple whitespaces with a single space.
def parse_first_row(row, url_instance): tags = row.xpath(Parser.FIRST_ROW_XPATH) category_url = url_instance.combine(tags[0].get('href')) title = unicode(tags[1].text) torrent_url = tags[1].get('href') str_id = torrent_url.split('details/')[1] str_id = str_id[:-1] if str_id.endswith('/') else str_id torrent_url = url_instance.combine(torrent_url) if len(tags) == 3: category_url += '&external=1' tracked_by = '(external)' else: tracked_by = 'Demonoid' return [str_id, title, tracked_by, category_url, torrent_url]
Static method that parses a given table row element by executing `Parser.FIRST_ROW_XPATH` and scrapping torrent's id, title, tracked by status, category url and torrent url. Used specifically with a torrent's first table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped id, title, tracked by status, category url and torrent url :rtype: list
def count(self, stats, value, sample_rate=1): self.update_stats(stats, value, self.SC_COUNT, sample_rate)
Updates one or more stats counters by arbitrary value >>> client = StatsdClient() >>> client.count('example.counter', 17)
def remove_tar_files(file_list): for f in file_list: if file_exists(f) and f.endswith('.tar'): os.remove(f)
Public function that removes temporary tar archive files in a local directory
def _from_dict(cls, _dict): args = {} if 'authors' in _dict: args['authors'] = [ Author._from_dict(x) for x in (_dict.get('authors')) ] if 'publication_date' in _dict: args['publication_date'] = _dict.get('publication_date') if 'title' in _dict: args['title'] = _dict.get('title') if 'image' in _dict: args['image'] = _dict.get('image') if 'feeds' in _dict: args['feeds'] = [Feed._from_dict(x) for x in (_dict.get('feeds'))] return cls(**args)
Initialize a AnalysisResultsMetadata object from a json dictionary.
def handle(self, args): salutation = { 'french': 'Bonjour', 'spanish': 'Hola', 'english': 'Hello', }[args.lang.lower()] output = [] for name in args.name: output.append("{} {}!".format(salutation, name)) return "\n".join(output)
Greet each person by name.
def _one_q_state_prep(oneq_state: _OneQState): label = oneq_state.label if label == 'SIC': return _one_q_sic_prep(oneq_state.index, oneq_state.qubit) elif label in ['X', 'Y', 'Z']: return _one_q_pauli_prep(label, oneq_state.index, oneq_state.qubit) else: raise ValueError(f"Bad state label: {label}")
Prepare a one qubit state. Either SIC[0-3], X[0-1], Y[0-1], or Z[0-1].
def process_dataset(dataset, models, **kargs): dset = collections.OrderedDict() for m in MultiFitter.flatten_models(models): dset[m.datatag] = ( m.builddataset(dataset) if m.ncg <= 1 else MultiFitter.coarse_grain(m.builddataset(dataset), ncg=m.ncg) ) return gvar.dataset.avg_data(dset, **kargs)
Convert ``dataset`` to processed data using ``models``. :class:`gvar.dataset.Dataset` (or similar dictionary) object ``dataset`` is processed by each model in list ``models``, and the results collected into a new dictionary ``pdata`` for use in :meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`. Assumes that the models have defined method :meth:`MultiFitterModel.builddataset`. Keyword arguments ``kargs`` are passed on to :func:`gvar.dataset.avg_data` when averaging the data.
def _get_version(): version_string = __salt__['cmd.run']( [_check_xbps(), '--version'], output_loglevel='trace') if version_string is None: return False VERSION_MATCH = re.compile(r'(?:XBPS:[\s]+)([\d.]+)(?:[\s]+.*)') version_match = VERSION_MATCH.search(version_string) if not version_match: return False return version_match.group(1).split('.')
Get the xbps version
def compliance_report(filepath=None, string=None, renderer='jinja|yaml', **kwargs): validation_string = __salt__['slsutil.renderer'](path=filepath, string=string, default_renderer=renderer, **kwargs) return salt.utils.napalm.call( napalm_device, 'compliance_report', validation_source=validation_string )
Return the compliance report. filepath The absolute path to the validation file. .. versionchanged:: 2019.2.0 Beginning with release codename ``2019.2.0``, this function has been enhanced, to be able to leverage the multi-engine template rendering of Salt, besides the possibility to retrieve the file source from remote systems, the URL schemes supported being: - ``salt://`` - ``http://`` and ``https://`` - ``ftp://`` - ``s3://`` - ``swift:/`` Or on the local file system (on the Minion). .. note:: The rendering result does not necessarily need to be YAML, instead it can be any format interpreted by Salt's rendering pipeline (including pure Python). string .. versionadded:: 2019.2.0 The compliance report send as inline string, to be used as the file to send through the renderer system. Note, not all renderer modules can work with strings; the 'py' renderer requires a file, for example. renderer: ``jinja|yaml`` .. versionadded:: 2019.2.0 The renderer pipe to send the file through; this is overridden by a "she-bang" at the top of the file. kwargs .. versionchanged:: 2019.2.0 Keyword args to pass to Salt's compile_template() function. CLI Example: .. code-block:: bash salt '*' napalm.compliance_report ~/validate.yml salt '*' napalm.compliance_report salt://path/to/validator.sls Validation File Example (pure YAML): .. code-block:: yaml - get_facts: os_version: 4.17 - get_interfaces_ip: Management1: ipv4: 10.0.2.14: prefix_length: 24 _mode: strict Validation File Example (as Jinja + YAML): .. code-block:: yaml - get_facts: os_version: {{ grains.version }} - get_interfaces_ip: Loopback0: ipv4: {{ grains.lo0.ipv4 }}: prefix_length: 24 _mode: strict - get_bgp_neighbors: {{ pillar.bgp.neighbors }} Output Example: .. code-block:: yaml device1: ---------- comment: out: ---------- complies: False get_facts: ---------- complies: False extra: missing: present: ---------- os_version: ---------- actual_value: 15.1F6-S1.4 complies: False nested: False get_interfaces_ip: ---------- complies: False extra: missing: - Management1 present: ---------- skipped: result: True
def op_at_on(operation: ops.Operation, time: Timestamp, device: Device): return ScheduledOperation(time, device.duration_of(operation), operation)
Creates a scheduled operation with a device-determined duration.
def add_coreference(self, coreference): if self.coreference_layer is None: self.coreference_layer = Ccoreferences(type=self.type) self.root.append(self.coreference_layer.get_node()) self.coreference_layer.add_coreference(coreference)
Adds an coreference to the coreference layer @type coreference: L{Ccoreference} @param coreference: the coreference object
def number(self): return int(math.ceil(self.total_size / float(self.size)))
Returns the number of batches the batched sequence contains. :rtype: integer.
def get_single_keywords(skw_db, fulltext): timer_start = time.clock() records = [] for single_keyword in skw_db.values(): for regex in single_keyword.regex: for match in regex.finditer(fulltext): span = (match.span()[0], match.span()[1] - 1) records = [record for record in records if not _contains_span(span, record[0])] add = True for previous_record in records: if ((span, single_keyword) == previous_record or _contains_span(previous_record[0], span)): add = False break if add: records.append((span, single_keyword)) single_keywords = {} for span, single_keyword in records: single_keywords.setdefault(single_keyword, [[]]) single_keywords[single_keyword][0].append(span) current_app.logger.info( "Matching single keywords... %d keywords found " "in %.1f sec." % (len(single_keywords), time.clock() - timer_start), ) return single_keywords
Find single keywords in the fulltext. :param skw_db: list of KeywordToken objects :param fulltext: string, which will be searched :return : dictionary of matches in a format { <keyword object>, [[position, position...], ], .. }
def create(dataset, target, features=None, validation_set = 'auto', verbose=True): dataset, validation_set = _validate_data(dataset, target, features, validation_set) if validation_set is None: validation_set = _turicreate.SFrame() model_proxy = _turicreate.extensions.create_automatic_regression_model( dataset, target, validation_set, {}) return _sl.wrap_model_proxy(model_proxy)
Automatically create a suitable regression model based on the provided training data. To use specific options of a desired model, use the ``create`` function of the corresponding model. Parameters ---------- dataset : SFrame Dataset for training the model. target : str The name of the column in ``dataset`` that is the prediction target. This column must have a numeric type (int/float). features : list[string], optional Names of the columns containing features. 'None' (the default) indicates that all columns except the target variable should be used as features. The features are columns in the input SFrame that can be of the following types: - *Numeric*: values of numeric type integer or float. - *Categorical*: values of type string. - *Array*: list of numeric (integer or float) values. Each list element is treated as a separate feature in the model. - *Dictionary*: key-value pairs with numeric (integer or float) values Each key of a dictionary is treated as a separate feature and the value in the dictionary corresponds to the value of the feature. Dictionaries are ideal for representing sparse data. Columns of type *list* are not supported. Convert such feature columns to type array if all entries in the list are of numeric types. If the lists contain data of mixed types, separate them out into different columns. validation_set : SFrame, optional A dataset for monitoring the model's generalization performance. For each row of the progress table, the chosen metrics are computed for both the provided training dataset and the validation_set. The format of this SFrame must be the same as the training set. By default this argument is set to 'auto' and a validation set is automatically sampled and used for progress printing. If validation_set is set to None, then no additional metrics are computed. The default value is 'auto'. verbose : boolean, optional If True, print progress information during training. Returns ------- out : A trained regression model. See Also -------- turicreate.linear_regression.LinearRegression, turicreate.boosted_trees_regression.BoostedTreesRegression Examples -------- .. sourcecode:: python # Setup the data >>> import turicreate as tc >>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv') # Selects the best model based on your data. >>> model = tc.regression.create(data, target='price', ... features=['bath', 'bedroom', 'size']) # Make predictions and evaluate results. >>> predictions = model.predict(data) >>> results = model.evaluate(data) # Setup the data >>> import turicreate as tc >>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv') # Selects the best model based on your data. >>> model = tc.regression.create(data, target='price', ... features=['bath', 'bedroom', 'size']) # Make predictions and evaluate results. >>> predictions = model.predict(data) >>> results = model.evaluate(data)
def snappy_encode(payload, xerial_compatible=False, xerial_blocksize=32 * 1024): if not has_snappy(): raise NotImplementedError("Snappy codec is not available") if xerial_compatible: def _chunker(): for i in xrange(0, len(payload), xerial_blocksize): yield payload[i:i + xerial_blocksize] out = BytesIO() header = b''.join([struct.pack('!' + fmt, dat) for fmt, dat in zip(_XERIAL_V1_FORMAT, _XERIAL_V1_HEADER)]) out.write(header) for chunk in _chunker(): block = snappy.compress(chunk) block_size = len(block) out.write(struct.pack('!i', block_size)) out.write(block) out.seek(0) return out.read() else: return snappy.compress(payload)
Encodes the given data with snappy if xerial_compatible is set then the stream is encoded in a fashion compatible with the xerial snappy library The block size (xerial_blocksize) controls how frequent the blocking occurs 32k is the default in the xerial library. The format winds up being +-------------+------------+--------------+------------+--------------+ | Header | Block1 len | Block1 data | Blockn len | Blockn data | |-------------+------------+--------------+------------+--------------| | 16 bytes | BE int32 | snappy bytes | BE int32 | snappy bytes | +-------------+------------+--------------+------------+--------------+ It is important to not that the blocksize is the amount of uncompressed data presented to snappy at each block, whereas the blocklen is the number of bytes that will be present in the stream, that is the length will always be <= blocksize.
def _get_pdb_id(self, elem, **kwargs): id = elem.attrib['ID'] if self.restrict_to_transmembrane_proteins: tmp = elem.attrib['TMP'] assert(tmp == 'no' or tmp == 'yes' or tmp == 'not') if tmp == 'yes': self.ids[id] = PDBTM._get_tm_type(elem) else: self.ids[id] = self.ids.get(id, 0) + 1
If self.restrict_to_transmembrane_proteins is False then this adds all ids to self.ids. Otherwise, only transmembrane protein ids are added.
def create_redis_client(self): return ray.services.create_redis_client( self._redis_address, self._ray_params.redis_password)
Create a redis client.
def do_authenticate_account(self, authc_token): try: realms = self.token_realm_resolver[authc_token.__class__] except KeyError: raise KeyError('Unsupported Token Type Provided: ', authc_token.__class__.__name__) if (len(self.realms) == 1): account = self.authenticate_single_realm_account(realms[0], authc_token) else: account = self.authenticate_multi_realm_account(self.realms, authc_token) cred_type = authc_token.token_info['cred_type'] attempts = account['authc_info'][cred_type].get('failed_attempts', []) self.validate_locked(authc_token, attempts) if len(account['authc_info']) > authc_token.token_info['tier']: if self.mfa_dispatcher: realm = self.token_realm_resolver[TOTPToken][0] totp_token = realm.generate_totp_token(account) mfa_info = account['authc_info']['totp_key']['2fa_info'] self.mfa_dispatcher.dispatch(authc_token.identifier, mfa_info, totp_token) raise AdditionalAuthenticationRequired(account['account_id']) return account
Returns an account object only when the current token authenticates AND the authentication process is complete, raising otherwise :returns: Account :raises AdditionalAuthenticationRequired: when additional tokens are required, passing the account object
def connect_reftrack_scenenode(self, refobj, scenenode): conns = [("%s.scenenode" % refobj, "%s.reftrack" % scenenode), ("%s.taskfile_id" % scenenode, "%s.taskfile_id" % refobj)] for src, dst in conns: if not cmds.isConnected(src, dst): cmds.connectAttr(src, dst, force=True)
Connect the given reftrack node with the given scene node :param refobj: the reftrack node to connect :type refobj: str :param scenenode: the jb_sceneNode to connect :type scenenode: str :returns: None :rtype: None :raises: None
def download_and_extract_to_mkdtemp(bucket, key, session=None): if session: s3_client = session.client('s3') else: s3_client = boto3.client('s3') transfer = S3Transfer(s3_client) filedes, temp_file = tempfile.mkstemp() os.close(filedes) transfer.download_file(bucket, key, temp_file) output_dir = tempfile.mkdtemp() zip_ref = zipfile.ZipFile(temp_file, 'r') zip_ref.extractall(output_dir) zip_ref.close() os.remove(temp_file) return output_dir
Download zip archive and extract it to temporary directory.
def _speak_as_spell_out_inherit(self, element): self._reverse_speak_as(element, 'spell-out') self._isolate_text_node(element) self._visit(element, self._speak_as_spell_out)
Speak one letter at a time for each word for elements and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA): color = _get_color(color, tree_type=NeuriteType.soma) if isinstance(soma, SomaCylinders): for start, end in zip(soma.points, soma.points[1:]): common.plot_cylinder(ax, start=start[COLS.XYZ], end=end[COLS.XYZ], start_radius=start[COLS.R], end_radius=end[COLS.R], color=color, alpha=alpha) else: common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius, color=color, alpha=alpha) _update_3d_datalim(ax, soma)
Generates a 3d figure of the soma. Args: ax(matplotlib axes): on what to plot soma(neurom.core.Soma): plotted soma color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
def convert_value(value, source_currency, target_currency): if source_currency == target_currency: return value rate = get_rate(source_currency, target_currency) return value * rate
Converts the price of a currency to another one using exchange rates :param price: the price value :param type: decimal :param source_currency: source ISO-4217 currency code :param type: str :param target_currency: target ISO-4217 currency code :param type: str :returns: converted price instance :rtype: ``Price``
def gsr_path(self): try: return self._gsr_path except AttributeError: path = self.outdir.has_abiext("GSR") if path: self._gsr_path = path return path
Absolute path of the GSR file. Empty string if file is not present.
def bind_download_buttons(cls): def on_click(ev): button_el = ev.target form_el = button_el.parent.parent.parent content = form_el.get(selector="textarea")[0].text suffix = form_el.name download_path = "as_file/%s.%s" % (cls.filename, suffix) form_el.action = join(settings.API_PATH, download_path) input_el = form_el.get(selector="input")[0] input_el.value = content for el in document.get(selector="button.output_download_button"): el.bind("click", on_click)
Bind buttons to callbacks.
def namedtuple_storable(namedtuple, *args, **kwargs): return default_storable(namedtuple, namedtuple._fields, *args, **kwargs)
Storable factory for named tuples.
def get_total_time(self): first_step = self.steps[0] last_step = self.steps[-1] seconds = self.history[last_step]["__timestamp__"] \ - self.history[first_step]["__timestamp__"] return datetime.timedelta(seconds=seconds)
Returns the total period between when the first and last steps where logged. This usually correspnods to the total training time if there were no gaps in the training.
def _py3_crc16(value): crc = 0 for byte in value: crc = ((crc << 8) & 0xffff) ^ _CRC16_LOOKUP[((crc >> 8) ^ byte) & 0xff] return crc
Calculate the CRC for the value in Python 3 :param bytes value: The value to return for the CRC Checksum :rtype: int
def rename_keys(record: Mapping, key_map: Mapping) -> dict: new_record = dict() for k, v in record.items(): key = key_map[k] if k in key_map else k new_record[key] = v return new_record
New record with same keys or renamed keys if key found in key_map.
def blob(self, sha): url = self._build_url('git', 'blobs', sha, base_url=self._api) json = self._json(self._get(url), 200) return Blob(json) if json else None
Get the blob indicated by ``sha``. :param str sha: (required), sha of the blob :returns: :class:`Blob <github3.git.Blob>` if successful, otherwise None
def merge_chunk_data(output_file="merged_idx_contig_hit_size_cov.txt", *files): chunks = dict() for chunk_file in files: with open(chunk_file) as chunk_file_handle: for line in chunk_file_handle: chunk_id, chunk_name, hit, size, cov = line.split("\t") try: chunks[chunk_id]["hit"] += hit chunks[chunk_id]["cov"] += cov except KeyError: chunks[chunk_id] = { "name": chunk_name, "hit": hit, "size": size, "cov": cov, } sorted_chunks = sorted(chunks) with open(output_file, "w") as output_handle: for chunk_id in sorted_chunks: my_chunk = chunks[chunk_id] name, hit, size, cov = ( my_chunk["name"], my_chunk["hit"], my_chunk["size"], my_chunk["cov"], ) my_line = "{}\t{}\t{}\t{}\t{}".format( chunk_id, name, hit, size, cov ) output_handle.write(my_line)
Merge chunk data from different networks Similarly to merge_network, this merges any number of chunk data files. Parameters --------- output_file : file, str, or pathlib.Path, optional The output file to write the merged chunk data files into. Default is merged_idx_contig_hit_size_cov.txt `*files` : file, str or pathlib.Path The chunk data files to merge.
def delete_intf_router(self, name, tenant_id, rout_id, subnet_lst): try: for subnet_id in subnet_lst: body = {'subnet_id': subnet_id} intf = self.neutronclient.remove_interface_router(rout_id, body=body) intf.get('id') except Exception as exc: LOG.error("Failed to delete router interface %(name)s, " " Exc %(exc)s", {'name': name, 'exc': str(exc)}) return False return True
Delete the openstack router and remove the interfaces attached.
def convert_ascii_field(string): values = [] for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]: if (codepoint.startswith(DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START)) or (codepoint.startswith(DATA_FILE_ASCII_UNICODE_CODEPOINT_START)): values.append(hex_to_unichr(codepoint)) else: values.append(codepoint) return values
Convert an ASCII field into the corresponding list of Unicode strings. The (input) ASCII field is a Unicode string containing one or more ASCII codepoints (``00xx`` or ``U+00xx`` or an ASCII string not starting with ``00`` or ``U+``), separated by a space. :param str string: the (input) ASCII field :rtype: list of Unicode strings
def get_service_types(self): resp = self._get_resource_root().get(self._path() + '/serviceTypes') return resp[ApiList.LIST_KEY]
Get all service types supported by this cluster. @return: A list of service types (strings)
def body_kwargs(self): body_kwargs = {} ct = self.get_header("content-type") if ct: ct = ct.lower() if ct.rfind("json") >= 0: body = self.body if body: body_kwargs = json.loads(body) else: if self.body_input: body = RequestBody( fp=self.body_input, headers=self.headers, environ=self.environ ) body_kwargs = dict(body) else: body = self.body if body: body_kwargs = self._parse_query_str(body) return body_kwargs
the request body, if this is a POST request this tries to do the right thing with the body, so if you have set the body and the content type is json, then it will return the body json decoded, if you need the original string body, use body example -- self.body = '{"foo":{"name":"bar"}}' b = self.body_kwargs # dict with: {"foo": { "name": "bar"}} print self.body # string with: '{"foo":{"name":"bar"}}'
def set_cursor_pos_callback(window, cbfun): window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value if window_addr in _cursor_pos_callback_repository: previous_callback = _cursor_pos_callback_repository[window_addr] else: previous_callback = None if cbfun is None: cbfun = 0 c_cbfun = _GLFWcursorposfun(cbfun) _cursor_pos_callback_repository[window_addr] = (cbfun, c_cbfun) cbfun = c_cbfun _glfw.glfwSetCursorPosCallback(window, cbfun) if previous_callback is not None and previous_callback[0] != 0: return previous_callback[0]
Sets the cursor position callback. Wrapper for: GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* window, GLFWcursorposfun cbfun);
def substitute_values(self, vect): try: unique = np.unique(vect) except: unique = set(vect) unique = [ x for x in unique if not isinstance(x, float) or not isnan(x) ] arr = np.copy(vect) for new_id, value in enumerate(unique): np.place(arr, arr==value, new_id) self.metadata[new_id] = value arr = arr.astype(np.float) np.place(arr, np.isnan(arr), -1) self.arr = arr if -1 in arr: self.metadata[-1] = self._missing_id
Internal method to substitute integers into the vector, and construct metadata to convert back to the original vector. np.nan is always given -1, all other objects are given integers in order of apperence. Parameters ---------- vect : np.array the vector in which to substitute values in
def reconstitute_path(drive, folders): reconstituted = os.path.join(drive, os.path.sep, *folders) return reconstituted
Reverts a tuple from `get_path_components` into a path. :param drive: A drive (eg 'c:'). Only applicable for NT systems :param folders: A list of folder names :return: A path comprising the drive and list of folder names. The path terminate with a `os.path.sep` *only* if it is a root directory
def select(self, limit=0): if limit < 1: limit = None for child in self.get_descendants(self.tag): if self.match(child): yield child if limit is not None: limit -= 1 if limit < 1: break
Match all tags under the targeted tag.
def crypto_sign(msg, sk): if len(sk) != SECRETKEYBYTES: raise ValueError("Bad signing key length %d" % len(sk)) vkbytes = sk[PUBLICKEYBYTES:] skbytes = sk[:PUBLICKEYBYTES] sig = djbec.signature(msg, skbytes, vkbytes) return sig + msg
Return signature+message given message and secret key. The signature is the first SIGNATUREBYTES bytes of the return value. A copy of msg is in the remainder.
def format(self, *args, **kwargs): return self.__class__(super(ColorStr, self).format(*args, **kwargs), keep_tags=True)
Return a formatted version, using substitutions from args and kwargs. The substitutions are identified by braces ('{' and '}').
def parse_rule(name, rule_text, do_raise=False): try: return rule.parseString(rule_text, parseAll=True)[0] except pyparsing.ParseException as exc: if do_raise: raise log = logging.getLogger('policies') log.warn("Failed to parse rule %r: %s" % (name, exc)) log.warn("Rule line: %s" % exc.line) log.warn("Location : %s^" % (" " * (exc.col - 1))) return Instructions([Constant(False), set_authz])
Parses the given rule text. :param name: The name of the rule. Used when emitting log messages regarding a failure to parse the rule. :param rule_text: The text of the rule to parse. :param do_raise: If ``False`` and the rule fails to parse, a log message is emitted to the "policies" logger at level WARN, and a rule that always evaluates to ``False`` will be returned. If ``True``, a ``pyparsing.ParseException`` will be raised. :returns: An instance of ``policies.instructions.Instructions``, containing the instructions necessary to evaluate the authorization rule.
def get_relationship(self, attribute): rel = self.__relationships.get(attribute.entity_attr) if rel is None: rel = LazyDomainRelationship(self, attribute, direction= self.relationship_direction) self.__relationships[attribute.entity_attr] = rel return rel
Returns the domain relationship object for the given resource attribute.
def wrap(start: str, string: str, end: str = "") -> str: return f"{start}{string}{end}" if string else ""
Wrap string inside other strings at start and end. If the string is not None or empty, then wrap with start and end, otherwise return an empty string.
def send_msg(app, msg, reply_cls=None, reply_multi=False): return app.send_request(event.SendMsgRequest(msg=msg, reply_cls=reply_cls, reply_multi=reply_multi))()
Send an OpenFlow message and wait for reply messages. :param app: Client RyuApp instance :param msg: An OpenFlow controller-to-switch message to send :param reply_cls: OpenFlow message class for expected replies. None means no replies are expected. The default is None. :param reply_multi: True if multipart replies are expected. The default is False. If no replies, returns None. If reply_multi=False, returns OpenFlow switch-to-controller message. If reply_multi=True, returns a list of OpenFlow switch-to-controller messages. Raise an exception on error. Example:: # ...(snip)... import ryu.app.ofctl.api as ofctl_api class MyApp(app_manager.RyuApp): def _my_handler(self, ev): # ...(snip)... msg = parser.OFPPortDescStatsRequest(datapath=datapath) result = ofctl_api.send_msg( self, msg, reply_cls=parser.OFPPortDescStatsReply, reply_multi=True)
def is_castable(src, dst): if ((src in [int, bool]) or rdltypes.is_user_enum(src)) and (dst in [int, bool]): return True elif (src == rdltypes.ArrayPlaceholder) and (dst == rdltypes.ArrayPlaceholder): if src.element_type is None: return True elif src.element_type == dst.element_type: return True else: return False elif rdltypes.is_user_struct(dst): return issubclass(src, dst) elif dst == rdltypes.PropertyReference: return issubclass(src, rdltypes.PropertyReference) elif src == dst: return True else: return False
Check if src type can be cast to dst type
async def finish_authentication(self, username, password): self.srp.step1(username, password) data = await self._send_plist( 'step1', method='pin', user=username) resp = plistlib.loads(data) pub_key, key_proof = self.srp.step2(resp['pk'], resp['salt']) await self._send_plist( 'step2', pk=binascii.unhexlify(pub_key), proof=binascii.unhexlify(key_proof)) epk, tag = self.srp.step3() await self._send_plist('step3', epk=epk, authTag=tag) return True
Finish authentication process. A username (generated by new_credentials) and the PIN code shown on screen must be provided.
def _get_section(self, name, create=True): try: return self.sections[name] except KeyError: if not create: raise section = Section(name) self.sections[name] = section return section
Retrieve a section by name. Create it on first access.
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None): countriesdata = cls.countriesdata(use_live=use_live) iso2 = countriesdata['iso2iso3'].get(iso3.upper()) if iso2 is not None: return iso2 if exception is not None: raise exception return None
Get ISO2 from ISO3 code Args: iso3 (str): ISO3 code for which to get ISO2 code use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True. exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None. Returns: Optional[str]: ISO2 code
def get_service_id_list() -> List[tuple]: keys = DB.get_keys('states*') services = [] for key in keys: values = key.split(':') if len(values) == 4: services.append(':'.join(values[1:])) return services
Return list of Services.
def get_plural_name(cls): if not hasattr(cls.Meta, 'plural_name'): setattr( cls.Meta, 'plural_name', inflection.pluralize(cls.get_name()) ) return cls.Meta.plural_name
Get the serializer's plural name. The plural name may be defined on the Meta class. If the plural name is not defined, the pluralized form of the name will be returned.
def get_all_published_ships_basic(db_connection): if not hasattr(get_all_published_ships_basic, '_results'): sql = 'CALL get_all_published_ships_basic();' results = execute_sql(sql, db_connection) get_all_published_ships_basic._results = results return get_all_published_ships_basic._results
Gets a list of all published ships and their basic information. :return: Each result has a tuple of (typeID, typeName, groupID, groupName, categoryID, and categoryName). :rtype: list
def _resample_residuals(self, stars, epsf): shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1]) star_imgs = np.zeros(shape) for i, star in enumerate(stars.all_good_stars): star_imgs[i, :, :] = self._resample_residual(star, epsf) return star_imgs
Compute normalized residual images for all the input stars. Parameters ---------- stars : `EPSFStars` object The stars used to build the ePSF. epsf : `EPSFModel` object The ePSF model. Returns ------- star_imgs : 3D `~numpy.ndarray` A 3D cube containing the resampled residual images.
def resource_create_ticket(self, token, id, scopes, **kwargs): data = dict(resource_id=id, resource_scopes=scopes, **kwargs) return self._realm.client.post( self.well_known['permission_endpoint'], data=self._dumps([data]), headers=self.get_headers(token) )
Create a ticket form permission to resource. https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_protection_permission_api_papi :param str token: user access token :param str id: resource id :param list scopes: scopes access is wanted :param dict claims: (optional) :rtype: dict
def prebuild_arch(self, arch): path = self.get_build_dir(arch.arch) if not exists(path): info("creating {}".format(path)) shprint(sh.mkdir, '-p', path)
Make the build and target directories
def do_propset(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
Set PROPNAME to PROPVAL on files, dirs, or revisions. usage: 1. propset PROPNAME [PROPVAL | -F VALFILE] PATH... 2. propset PROPNAME --revprop -r REV [PROPVAL | -F VALFILE] [URL] 1. Creates a versioned, local propchange in working copy. 2. Creates an unversioned, remote propchange on repos revision. Note: svn recognizes the following special versioned properties but will store any arbitrary properties set: svn:ignore - A newline separated list of file patterns to ignore. svn:keywords - Keywords to be expanded. Valid keywords are: URL, HeadURL - The URL for the head version of the object. Author, LastChangedBy - The last person to modify the file. Date, LastChangedDate - The date/time the object was last modified. Rev, Revision, - The last revision the object changed. LastChangedRevision Id - A compressed summary of the previous 4 keywords. svn:executable - If present, make the file executable. This property cannot be set on a directory. A non-recursive attempt will fail, and a recursive attempt will set the property only on the file children of the directory. svn:eol-style - One of 'native', 'LF', 'CR', 'CRLF'. svn:mime-type - The mimetype of the file. Used to determine whether to merge the file, and how to serve it from Apache. A mimetype beginning with 'text/' (or an absent mimetype) is treated as text. Anything else is treated as binary. svn:externals - A newline separated list of module specifiers, each of which consists of a relative directory path, optional revision flags, and an URL. For example foo http://example.com/repos/zig foo/bar -r 1234 http://example.com/repos/zag ${cmd_option_list}
def delete(self, story, params={}, **options): path = "/stories/%s" % (story) return self.client.delete(path, params, **options)
Deletes a story. A user can only delete stories they have created. Returns an empty data record. Parameters ---------- story : {Id} Globally unique identifier for the story.
def _createMagConversionDict(): magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat') raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5') magDict = {} for row in raw_table: if sys.hexversion >= 0x03000000: starClass = row[1].decode("utf-8") tableData = [x.decode("utf-8") for x in row[3:]] else: starClass = row[1] tableData = row[3:] magDict[starClass] = tableData return magDict
loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
def poll_events(self): while self.started: event_obj = None event_name = None try: event_obj = self._sl4a.eventWait(50000) except: if self.started: print("Exception happened during polling.") print(traceback.format_exc()) raise if not event_obj: continue elif 'name' not in event_obj: print("Received Malformed event {}".format(event_obj)) continue else: event_name = event_obj['name'] if event_name in self.handlers: self.handle_subscribed_event(event_obj, event_name) if event_name == "EventDispatcherShutdown": self._sl4a.closeSl4aSession() break else: self.lock.acquire() if event_name in self.event_dict: self.event_dict[event_name].put(event_obj) else: q = queue.Queue() q.put(event_obj) self.event_dict[event_name] = q self.lock.release()
Continuously polls all types of events from sl4a. Events are sorted by name and store in separate queues. If there are registered handlers, the handlers will be called with corresponding event immediately upon event discovery, and the event won't be stored. If exceptions occur, stop the dispatcher and return
def pop(self): if self._length == 0: raise IndexError() newvec = ImmutableVector() newvec.tree = self.tree.remove(self._length-1) newvec._length = self._length-1 return newvec
Return a new ImmutableVector with the last item removed.
def dpotri(A, lower=1): A = force_F_ordered(A) R, info = lapack.dpotri(A, lower=lower) symmetrify(R) return R, info
Wrapper for lapack dpotri function DPOTRI - compute the inverse of a real symmetric positive definite matrix A using the Cholesky factorization A = U**T*U or A = L*L**T computed by DPOTRF :param A: Matrix A :param lower: is matrix lower (true) or upper (false) :returns: A inverse
def required(cls): columns = [] for column in cls.__table__.columns: is_autoincrement = 'int' in str(column.type).lower() and column.autoincrement if (not column.nullable and not column.primary_key) or (column.primary_key and not is_autoincrement): columns.append(column.name) return columns
Return a list of all columns required by the database to create the resource. :param cls: The Model class to gather attributes from :rtype: list
def GetDataStream(self, name, case_sensitive=True): if not isinstance(name, py2to3.STRING_TYPES): raise ValueError('Name is not a string.') name_lower = name.lower() matching_data_stream = None for data_stream in self._GetDataStreams(): if data_stream.name == name: return data_stream if not case_sensitive and data_stream.name.lower() == name_lower: if not matching_data_stream: matching_data_stream = data_stream return matching_data_stream
Retrieves a data stream by name. Args: name (str): name of the data stream. case_sensitive (Optional[bool]): True if the name is case sensitive. Returns: DataStream: a data stream or None if not available. Raises: ValueError: if the name is not string.
async def scan(self): atvs = await pyatv.scan_for_apple_tvs( self.loop, timeout=self.args.scan_timeout, only_usable=False) _print_found_apple_tvs(atvs) return 0
Scan for Apple TVs on the network.
def getIRThreshold(self): command = '$GO' threshold = self.sendCommand(command) if threshold[0] == 'NK': return 0 else: return float(threshold[2])/10
Returns the IR temperature threshold in degrees Celcius, or 0 if no Threshold is set
def removeReliableListener(self, listener): self.store.query(_ReliableListener, attributes.AND(_ReliableListener.processor == self, _ReliableListener.listener == listener)).deleteFromStore() self.store.query(BatchProcessingError, attributes.AND(BatchProcessingError.processor == self, BatchProcessingError.listener == listener)).deleteFromStore()
Remove a previously added listener.
def exp2(x, context=None): return _apply_function_in_current_context( BigFloat, mpfr.mpfr_exp2, (BigFloat._implicit_convert(x),), context, )
Return two raised to the power x.
def __edge_weight(edge_id, dfs_data): graph = dfs_data['graph'] edge_lookup = dfs_data['edge_lookup'] edge = graph.get_edge(edge_id) u, v = edge['vertices'] d_u = D(u, dfs_data) d_v = D(v, dfs_data) lp_1 = L1(v, dfs_data) d_lp_1 = D(lp_1, dfs_data) if edge_lookup[edge_id] == 'backedge' and d_v < d_u: return 2*d_v elif is_type_I_branch(u, v, dfs_data): return 2*d_lp_1 elif is_type_II_branch(u, v, dfs_data): return 2*d_lp_1 + 1 else: return 2*graph.num_nodes() + 1
Calculates the edge weight used to sort edges.
def make_summaries(self): self.summary_df = save_summaries(self.frames, self.keys, self.selected_summaries, self.batch_dir, self.name) logger.debug("made and saved summaries")
Make and save summary csv files, each containing values from all cells
async def fetch_api_description( url: typing.Union[str, ParseResult, SplitResult], insecure: bool = False): url_describe = urljoin(_ensure_url_string(url), "describe/") connector = aiohttp.TCPConnector(verify_ssl=(not insecure)) session = aiohttp.ClientSession(connector=connector) async with session, session.get(url_describe) as response: if response.status != HTTPStatus.OK: raise RemoteError( "{0} -> {1.status} {1.reason}".format( url, response)) elif response.content_type != "application/json": raise RemoteError( "Expected application/json, got: %s" % response.content_type) else: return await response.json()
Fetch the API description from the remote MAAS instance.
def _connect(self, key, spec, via=None): try: method = getattr(self.router, spec['method']) except AttributeError: raise Error('unsupported method: %(transport)s' % spec) context = method(via=via, unidirectional=True, **spec['kwargs']) if via and spec.get('enable_lru'): self._update_lru(context, spec, via) mitogen.core.listen(context, 'disconnect', lambda: self._on_context_disconnect(context)) self._send_module_forwards(context) init_child_result = context.call( ansible_mitogen.target.init_child, log_level=LOG.getEffectiveLevel(), candidate_temp_dirs=self._get_candidate_temp_dirs(), ) if os.environ.get('MITOGEN_DUMP_THREAD_STACKS'): from mitogen import debug context.call(debug.dump_to_logger) self._key_by_context[context] = key self._refs_by_context[context] = 0 return { 'context': context, 'via': via, 'init_child_result': init_child_result, 'msg': None, }
Actual connect implementation. Arranges for the Mitogen connection to be created and enqueues an asynchronous call to start the forked task parent in the remote context. :param key: Deduplication key representing the connection configuration. :param spec: Connection specification. :returns: Dict like:: { 'context': mitogen.core.Context or None, 'via': mitogen.core.Context or None, 'init_child_result': { 'fork_context': mitogen.core.Context, 'home_dir': str or None, }, 'msg': str or None } Where `context` is a reference to the newly constructed context, `init_child_result` is the result of executing :func:`ansible_mitogen.target.init_child` in that context, `msg` is an error message and the remaining fields are :data:`None`, or `msg` is :data:`None` and the remaining fields are set.