code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def activate_network_interface(iface): iface = iface.encode() SIOCGIFFLAGS = 0x8913 SIOCSIFFLAGS = 0x8914 IFF_UP = 0x1 STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY = b"16sH14s" STRUCT_IFREQ_LAYOUT_IFFLAGS = b"16sH14s" sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) try: ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY, iface, socket.AF_INET, b'0' * 14) ifreq = fcntl.ioctl(sock, SIOCGIFFLAGS, ifreq) if_flags = struct.unpack(STRUCT_IFREQ_LAYOUT_IFFLAGS, ifreq)[1] ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFFLAGS, iface, if_flags | IFF_UP, b'0' * 14) fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq) finally: sock.close()
Bring up the given network interface. @raise OSError: if interface does not exist or permissions are missing
def fit_transform(self, X, **kwargs): tasklogger.log_start('PHATE') self.fit(X) embedding = self.transform(**kwargs) tasklogger.log_complete('PHATE') return embedding
Computes the diffusion operator and the position of the cells in the embedding space Parameters ---------- X : array, shape=[n_samples, n_features] input data with `n_samples` samples and `n_dimensions` dimensions. Accepted data types: `numpy.ndarray`, `scipy.sparse.spmatrix`, `pd.DataFrame`, `anndata.AnnData` If `knn_dist` is 'precomputed', `data` should be a n_samples x n_samples distance or affinity matrix kwargs : further arguments for `PHATE.transform()` Keyword arguments as specified in :func:`~phate.PHATE.transform` Returns ------- embedding : array, shape=[n_samples, n_dimensions] The cells embedded in a lower dimensional space using PHATE
def str_extract(arr, pat, flags=0, expand=True): r if not isinstance(expand, bool): raise ValueError("expand must be True or False") if expand: return _str_extract_frame(arr._orig, pat, flags=flags) else: result, name = _str_extract_noexpand(arr._parent, pat, flags=flags) return arr._wrap_result(result, name=name, expand=expand)
r""" Extract capture groups in the regex `pat` as columns in a DataFrame. For each subject string in the Series, extract groups from the first match of regular expression `pat`. Parameters ---------- pat : str Regular expression pattern with capturing groups. flags : int, default 0 (no flags) Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that modify regular expression matching for things like case, spaces, etc. For more details, see :mod:`re`. expand : bool, default True If True, return DataFrame with one column per capture group. If False, return a Series/Index if there is one capture group or DataFrame if there are multiple capture groups. .. versionadded:: 0.18.0 Returns ------- DataFrame or Series or Index A DataFrame with one row for each subject string, and one column for each group. Any capture group names in regular expression pat will be used for column names; otherwise capture group numbers will be used. The dtype of each result column is always object, even when no match is found. If ``expand=False`` and pat has only one capture group, then return a Series (if subject is a Series) or Index (if subject is an Index). See Also -------- extractall : Returns all matches (not just the first match). Examples -------- A pattern with two groups will return a DataFrame with two columns. Non-matches will be NaN. >>> s = pd.Series(['a1', 'b2', 'c3']) >>> s.str.extract(r'([ab])(\d)') 0 1 0 a 1 1 b 2 2 NaN NaN A pattern may contain optional groups. >>> s.str.extract(r'([ab])?(\d)') 0 1 0 a 1 1 b 2 2 NaN 3 Named groups will become column names in the result. >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)') letter digit 0 a 1 1 b 2 2 NaN NaN A pattern with one group will return a DataFrame with one column if expand=True. >>> s.str.extract(r'[ab](\d)', expand=True) 0 0 1 1 2 2 NaN A pattern with one group will return a Series if expand=False. >>> s.str.extract(r'[ab](\d)', expand=False) 0 1 1 2 2 NaN dtype: object
def start(self): while not self.is_start(self.current_tag): self.next() self.new_entry()
Find the first data entry and prepare to parse.
def check_status(self, delay=0): if not self.item_id: while not self._request_status(): yield self.status, self.completion_percentage if self.item_id is None: sleep(delay) else: yield self.status, self.completion_percentage
Checks the api endpoint in a loop :param delay: number of seconds to wait between api calls. Note Connection 'requests_delay' also apply. :return: tuple of status and percentage complete :rtype: tuple(str, float)
def level(self): m = re.match( self.compliance_prefix + r'(\d)' + self.compliance_suffix + r'$', self.compliance) if (m): return int(m.group(1)) raise IIIFInfoError( "Bad compliance profile URI, failed to extract level number")
Extract level number from compliance profile URI. Returns integer level number or raises IIIFInfoError
def bitwise_dot_product(bs0: str, bs1: str) -> str: if len(bs0) != len(bs1): raise ValueError("Bit strings are not of equal length") return str(sum([int(bs0[i]) * int(bs1[i]) for i in range(len(bs0))]) % 2)
A helper to calculate the bitwise dot-product between two string representing bit-vectors :param bs0: String of 0's and 1's representing a number in binary representations :param bs1: String of 0's and 1's representing a number in binary representations :return: 0 or 1 as a string corresponding to the dot-product value
def data_file(file_fmt, info=None, **kwargs): if isinstance(info, dict): kwargs['hash_key'] = hashlib.sha256(json.dumps(info).encode('utf-8')).hexdigest() kwargs.update(info) return utils.fstr(fmt=file_fmt, **kwargs)
Data file name for given infomation Args: file_fmt: file format in terms of f-strings info: dict, to be hashed and then pass to f-string using 'hash_key' these info will also be passed to f-strings **kwargs: arguments for f-strings Returns: str: data file name
def row_is_filtered(self, row_subs, filters): for filtername in filters: filtervalue = filters[filtername] if filtername in row_subs: row_subs_value = row_subs[filtername] if str(row_subs_value) != str(filtervalue): return True else: print("fw: Unknown filter keyword (%s)" % (filtername,)) return False
returns True if row should NOT be included according to filters
def repeat(self, count): x = HSeq() for i in range(count): x = x.concatenate(self) return x
repeat sequence given number of times to produce a new sequence
def getfirstline(file, default): with open(file, 'rb') as fh: content = fh.readlines() if len(content) == 1: return content[0].decode('utf-8').strip('\n') return default
Returns the first line of a file.
def _handle_archive(archive, command, verbosity=0, interactive=True, program=None, format=None, compression=None): if format is None: format, compression = get_archive_format(archive) check_archive_format(format, compression) if command not in ('list', 'test'): raise util.PatoolError("invalid archive command `%s'" % command) program = find_archive_program(format, command, program=program) check_program_compression(archive, command, program, compression) get_archive_cmdlist = get_archive_cmdlist_func(program, command, format) cmdlist = get_archive_cmdlist(archive, compression, program, verbosity, interactive) if cmdlist: run_archive_cmdlist(cmdlist, verbosity=verbosity)
Test and list archives.
def finish_assessment(self, assessment_taken_id): assessment_taken = self._get_assessment_taken(assessment_taken_id) assessment_taken_map = assessment_taken._my_map if assessment_taken.has_started() and not assessment_taken.has_ended(): assessment_taken_map['completionTime'] = DateTime.utcnow() assessment_taken_map['ended'] = True collection = JSONClientValidated('assessment', collection='AssessmentTaken', runtime=self._runtime) collection.save(assessment_taken_map) else: raise errors.IllegalState()
Indicates the entire assessment is complete. arg: assessment_taken_id (osid.id.Id): ``Id`` of the ``AssessmentTaken`` raise: IllegalState - ``has_begun()`` is ``false or is_over()`` is ``true`` raise: NotFound - ``assessment_taken_id`` is not found raise: NullArgument - ``assessment_taken_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def connected_edges(G, nodes): nodes_in_G = collections.deque() for node in nodes: if not G.has_node(node): continue nodes_in_G.extend(nx.node_connected_component(G, node)) edges = G.subgraph(nodes_in_G).edges() return edges
Given graph G and list of nodes, return the list of edges that are connected to nodes
def add_event( request, template='swingtime/add_event.html', event_form_class=forms.EventForm, recurrence_form_class=forms.MultipleOccurrenceForm ): dtstart = None if request.method == 'POST': event_form = event_form_class(request.POST) recurrence_form = recurrence_form_class(request.POST) if event_form.is_valid() and recurrence_form.is_valid(): event = event_form.save() recurrence_form.save(event) return http.HttpResponseRedirect(event.get_absolute_url()) else: if 'dtstart' in request.GET: try: dtstart = parser.parse(request.GET['dtstart']) except(TypeError, ValueError) as exc: logging.warning(exc) dtstart = dtstart or datetime.now() event_form = event_form_class() recurrence_form = recurrence_form_class(initial={'dtstart': dtstart}) return render( request, template, {'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form} )
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s. Context parameters: ``dtstart`` a datetime.datetime object representing the GET request value if present, otherwise None ``event_form`` a form object for updating the event ``recurrence_form`` a form object for adding occurrences
def rollback_app(self, app_id, version, force=False): params = {'force': force} data = json.dumps({'version': version}) response = self._do_request( 'PUT', '/v2/apps/{app_id}'.format(app_id=app_id), params=params, data=data) return response.json()
Roll an app back to a previous version. :param str app_id: application ID :param str version: application version :param bool force: apply even if a deployment is in progress :returns: a dict containing the deployment id and version :rtype: dict
def delete_unused(self, keyword_ids=None): if keyword_ids is None: keywords = self.all() else: keywords = self.filter(id__in=keyword_ids) keywords.filter(assignments__isnull=True).delete()
Removes all instances that are not assigned to any object. Limits processing to ``keyword_ids`` if given.
def _flush(self): self.classes_names = None self.__cache_methods = None self.__cached_methods_idx = None self.__cache_fields = None self.__cache_all_methods = None self.__cache_all_fields = None
Flush all caches Might be used after classes, methods or fields are added.
def add(a, b): if a is None: if b is None: return None else: return b elif b is None: return a return a + b
Add two values, ignoring None
def get_instance(self, payload): return TranscriptionInstance( self._version, payload, account_sid=self._solution['account_sid'], recording_sid=self._solution['recording_sid'], )
Build an instance of TranscriptionInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.recording.transcription.TranscriptionInstance :rtype: twilio.rest.api.v2010.account.recording.transcription.TranscriptionInstance
def _retrieve(self, *criterion): try: return self._query(*criterion).one() except NoResultFound as error: raise ModelNotFoundError( "{} not found".format( self.model_class.__name__, ), error, )
Retrieve a model by some criteria. :raises `ModelNotFoundError` if the row cannot be deleted.
def callable_name(callable_obj): try: if (isinstance(callable_obj, type) and issubclass(callable_obj, param.ParameterizedFunction)): return callable_obj.__name__ elif (isinstance(callable_obj, param.Parameterized) and 'operation' in callable_obj.params()): return callable_obj.operation.__name__ elif isinstance(callable_obj, partial): return str(callable_obj) elif inspect.isfunction(callable_obj): return callable_obj.__name__ elif inspect.ismethod(callable_obj): meth = callable_obj if sys.version_info < (3,0): owner = meth.im_class if meth.im_self is None else meth.im_self else: owner = meth.__self__ if meth.__name__ == '__call__': return type(owner).__name__ return '.'.join([owner.__name__, meth.__name__]) elif isinstance(callable_obj, types.GeneratorType): return callable_obj.__name__ else: return type(callable_obj).__name__ except: return str(callable_obj)
Attempt to return a meaningful name identifying a callable or generator
def truncate_table(self, tablename): self.get(tablename).remove() self.db.commit()
SQLite3 doesn't support direct truncate, so we just use delete here
def _ref_bib(key, ref): s = '' s += '@{}{{{},\n'.format(ref['type'], key) entry_lines = [] for k, v in ref.items(): if k == 'type': continue if k == 'authors': entry_lines.append(' author = {{{}}}'.format(' and '.join(v))) elif k == 'editors': entry_lines.append(' editor = {{{}}}'.format(' and '.join(v))) else: entry_lines.append(' {} = {{{}}}'.format(k, v)) s += ',\n'.join(entry_lines) s += '\n}' return s
Convert a single reference to bibtex format
def attach(self): s = self._sensor self.update(s, s.read()) self._sensor.attach(self)
Attach strategy to its sensor and send initial update.
def add_injectable( name, value, autocall=True, cache=False, cache_scope=_CS_FOREVER, memoize=False): if isinstance(value, Callable): if autocall: value = _InjectableFuncWrapper( name, value, cache=cache, cache_scope=cache_scope) value.clear_cached() elif not autocall and memoize: value = _memoize_function(value, name, cache_scope=cache_scope) logger.debug('registering injectable {!r}'.format(name)) _INJECTABLES[name] = value
Add a value that will be injected into other functions. Parameters ---------- name : str value If a callable and `autocall` is True then the function's argument names and keyword argument values will be matched to registered variables when the function needs to be evaluated by Orca. The return value will be passed to any functions using this injectable. In all other cases, `value` will be passed through untouched. autocall : bool, optional Set to True to have injectable functions automatically called (with argument matching) and the result injected instead of the function itself. cache : bool, optional Whether to cache the return value of an injectable function. Only applies when `value` is a callable and `autocall` is True. cache_scope : {'step', 'iteration', 'forever'}, optional Scope for which to cache data. Default is to cache forever (or until manually cleared). 'iteration' caches data for each complete iteration of the pipeline, 'step' caches data for a single step of the pipeline. memoize : bool, optional If autocall is False it is still possible to cache function results by setting this flag to True. Cached values are stored in a dictionary keyed by argument values, so the argument values must be hashable. Memoized functions have their caches cleared according to the same rules as universal caching.
def logs(ctx, services, num, follow): logger.debug("running command %s (%s)", ctx.command.name, ctx.params, extra={"command": ctx.command.name, "params": ctx.params}) home = ctx.obj["HOME"] services_path = os.path.join(home, SERVICES) tail_threads = [] for service in services: logpath = os.path.join(services_path, service, LOGS_DIR, STDOUTLOG) if os.path.exists(logpath): logger.debug("tailing %s", logpath) t = threading.Thread(target=Tailer, kwargs={"name": service, "nlines": num, "filepath": logpath, "follow": follow}) t.daemon = True t.start() tail_threads.append(t) if tail_threads: while tail_threads[0].isAlive(): tail_threads[0].join(0.1)
Show logs of daemonized service.
def delete(self, key): "Delete a `key` from the keystore." if key in self.data: self.delete_from_index(key) del self.data[key]
Delete a `key` from the keystore.
def data(self) -> Sequence[Tuple[int, float]]: return [(num, prob) for num, prob in zip(self._num_cfds_seq, self._gnd_state_probs)]
Returns a sequence of tuple pairs with the first item being a number of Cliffords and the second item being the corresponding average ground state probability.
def add_chain(self, group_name, component_map): self.assert_writeable() for component, path in component_map.items(): if not path.startswith('Analyses/'): path = 'Analyses/{}'.format(path) component_map[component] = path self.add_analysis_attributes(group_name, component_map)
Adds the component chain to ``group_name`` in the fast5. These are added as attributes to the group. :param group_name: The group name you wish to add chaining data to, e.g. ``Test_000`` :param component_map: The set of components and corresponding group names or group paths that contribute data to the analysis. If group names are provided, these will be converted into group paths. If ``Test_000`` uses data from the results of ``first_component`` stored at ``Analyses/First_000/`` the component_map could be ``{'first_component': 'First_000'}`` or ``{'first_component': 'Analyses/First_000'}``.
def pickle_to_param(obj, name): return from_pyvalue(u"pickle:%s" % name, unicode(pickle.dumps(obj)))
Return the top-level element of a document sub-tree containing the pickled serialization of a Python object.
def from_scf_input(cls, workdir, scf_input, manager=None, allocate=True): flow = cls(workdir, manager=manager) flow.register_scf_task(scf_input) scf_task = flow[0][0] nl_work = DteWork.from_scf_task(scf_task) flow.register_work(nl_work) if allocate: flow.allocate() return flow
Create a `NonlinearFlow` for second order susceptibility calculations from an `AbinitInput` defining a ground-state run. Args: workdir: Working directory of the flow. scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run. manager: :class:`TaskManager` object. Read from `manager.yml` if None. allocate: True if the flow should be allocated before returning. Return: :class:`NonlinearFlow` object.
def is_np_compat(): curr = ctypes.c_bool() check_call(_LIB.MXIsNumpyCompatible(ctypes.byref(curr))) return curr.value
Checks whether the NumPy compatibility is currently turned on. NumPy-compatibility is turned off by default in backend. Returns ------- A bool value indicating whether the NumPy compatibility is currently on.
def parse(self, input_text, syncmap): self.log_exc(u"%s is abstract and cannot be called directly" % (self.TAG), None, True, NotImplementedError)
Parse the given ``input_text`` and append the extracted fragments to ``syncmap``. :param input_text: the input text as a Unicode string (read from file) :type input_text: string :param syncmap: the syncmap to append to :type syncmap: :class:`~aeneas.syncmap.SyncMap`
def _get_memory_contents(self): if self._memory_contents is not None: return self._memory_contents schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg) self._memory_contents = self._graph.compute_memory_contents_under_schedule( schedule) return self._memory_contents
Runs the scheduler to determine memory contents at every point in time. Returns: a list of frozenset of strings, where the ith entry describes the tensors in memory when executing operation i (where schedule[i] is an index into GetAllOperationNames()).
def rivermap_update(self, river, water_flow, rivermap, precipitations): isSeed = True px, py = (0, 0) for x, y in river: if isSeed: rivermap[y, x] = water_flow[y, x] isSeed = False else: rivermap[y, x] = precipitations[y, x] + rivermap[py, px] px, py = x, y
Update the rivermap with the rainfall that is to become the waterflow
def load_config(self): config = Config() self.config_obj = config.load('awsshellrc') self.config_section = self.config_obj['aws-shell'] self.model_completer.match_fuzzy = self.config_section.as_bool( 'match_fuzzy') self.enable_vi_bindings = self.config_section.as_bool( 'enable_vi_bindings') self.show_completion_columns = self.config_section.as_bool( 'show_completion_columns') self.show_help = self.config_section.as_bool('show_help') self.theme = self.config_section['theme']
Load the config from the config file or template.
def remove_ipv4addr(self, ipv4addr): for addr in self.ipv4addrs: if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or (isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)): self.ipv4addrs.remove(addr) break
Remove an IPv4 address from the host. :param str ipv4addr: The IP address to remove
def get_bitcoind_client(): bitcoind_opts = get_bitcoin_opts() bitcoind_host = bitcoind_opts['bitcoind_server'] bitcoind_port = bitcoind_opts['bitcoind_port'] bitcoind_user = bitcoind_opts['bitcoind_user'] bitcoind_passwd = bitcoind_opts['bitcoind_passwd'] return create_bitcoind_service_proxy(bitcoind_user, bitcoind_passwd, server=bitcoind_host, port=bitcoind_port)
Connect to the bitcoind node
def datetime(self): if 'dt_scale' not in self._cache.keys(): self._cache['dt_scale'] = self._datetime - timedelta(seconds=self._offset) return self._cache['dt_scale']
Conversion of the Date object into a ``datetime.datetime`` The resulting object is a timezone-naive instance with the same scale as the originating Date object.
def com_daltonmaag_check_required_fields(ufo_font): recommended_fields = [] for field in [ "unitsPerEm", "ascender", "descender", "xHeight", "capHeight", "familyName" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield FAIL, f"Required field(s) missing: {recommended_fields}" else: yield PASS, "Required fields present."
Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName.
def remove_polygons(self, test): empty = [] for element in self.elements: if isinstance(element, PolygonSet): ii = 0 while ii < len(element.polygons): if test(element.polygons[ii], element.layers[ii], element.datatypes[ii]): element.polygons.pop(ii) element.layers.pop(ii) element.datatypes.pop(ii) else: ii += 1 if len(element.polygons) == 0: empty.append(element) for element in empty: self.elements.remove(element) return self
Remove polygons from this cell. The function or callable ``test`` is called for each polygon in the cell. If its return value evaluates to ``True``, the corresponding polygon is removed from the cell. Parameters ---------- test : callable Test function to query whether a polygon should be removed. The function is called with arguments: ``(points, layer, datatype)`` Returns ------- out : ``Cell`` This cell. Examples -------- Remove polygons in layer 1: >>> cell.remove_polygons(lambda pts, layer, datatype: ... layer == 1) Remove polygons with negative x coordinates: >>> cell.remove_polygons(lambda pts, layer, datatype: ... any(pts[:, 0] < 0))
def as_list_data(self): element = ElementTree.Element(self.list_type) id_ = ElementTree.SubElement(element, "id") id_.text = self.id name = ElementTree.SubElement(element, "name") name.text = self.name return element
Return an Element to be used in a list. Most lists want an element with tag of list_type, and subelements of id and name. Returns: Element: list representation of object.
def pull_cfg_from_parameters_out_file( parameters_out_file, namelist_to_read="nml_allcfgs" ): parameters_out = read_cfg_file(parameters_out_file) return pull_cfg_from_parameters_out( parameters_out, namelist_to_read=namelist_to_read )
Pull out a single config set from a MAGICC ``PARAMETERS.OUT`` file. This function reads in the ``PARAMETERS.OUT`` file and returns a single file with the config that needs to be passed to MAGICC in order to do the same run as is represented by the values in ``PARAMETERS.OUT``. Parameters ---------- parameters_out_file : str The ``PARAMETERS.OUT`` file to read namelist_to_read : str The namelist to read from the file. Returns ------- :obj:`f90nml.Namelist` An f90nml object with the cleaned, read out config. Examples -------- >>> cfg = pull_cfg_from_parameters_out_file("PARAMETERS.OUT") >>> cfg.write("/somewhere/else/ANOTHERNAME.cfg")
def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict: params_dict = self.as_dict(quiet=True) if not preference_orders: preference_orders = [] preference_orders.append(["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"]) preference_orders.append(["type"]) def order_func(key): order_tuple = [order.index(key) if key in order else len(order) for order in preference_orders] return order_tuple + [key] def order_dict(dictionary, order_func): result = OrderedDict() for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])): result[key] = order_dict(val, order_func) if isinstance(val, dict) else val return result return order_dict(params_dict, order_func)
Returns Ordered Dict of Params from list of partial order preferences. Parameters ---------- preference_orders: List[List[str]], optional ``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means "A" > "B" > "C". For multiple preference_orders first will be considered first. Keys not found, will have last but alphabetical preference. Default Preferences: ``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"], ["type"]]``
def unserialize(cls, string): id_s, created_s = string.split('_') return cls(int(id_s, 16), datetime.utcfromtimestamp(int(created_s, 16)))
Unserializes from a string. :param string: A string created by :meth:`serialize`.
def _read_sequences(self, graph): for e in self._get_elements(graph, SBOL.Sequence): identity = e[0] c = self._get_rdf_identified(graph, identity) c['elements'] = self._get_triplet_value(graph, identity, SBOL.elements) c['encoding'] = self._get_triplet_value(graph, identity, SBOL.encoding) seq = Sequence(**c) self._sequences[identity.toPython()] = seq self._collection_store[identity.toPython()] = seq
Read graph and add sequences to document
def catch_exceptions(orig_func): @functools.wraps(orig_func) def catch_exceptions_wrapper(self, *args, **kwargs): try: return orig_func(self, *args, **kwargs) except arvados.errors.ApiError as e: logging.exception("Failure") return {"msg": e._get_reason(), "status_code": e.resp.status}, int(e.resp.status) except subprocess.CalledProcessError as e: return {"msg": str(e), "status_code": 500}, 500 except MissingAuthorization: return {"msg": "'Authorization' header is missing or empty, expecting Arvados API token", "status_code": 401}, 401 except ValueError as e: return {"msg": str(e), "status_code": 400}, 400 except Exception as e: return {"msg": str(e), "status_code": 500}, 500 return catch_exceptions_wrapper
Catch uncaught exceptions and turn them into http errors
def infer_declared(ms, namespace=None): conditions = [] for m in ms: for cav in m.caveats: if cav.location is None or cav.location == '': conditions.append(cav.caveat_id_bytes.decode('utf-8')) return infer_declared_from_conditions(conditions, namespace)
Retrieves any declared information from the given macaroons and returns it as a key-value map. Information is declared with a first party caveat as created by declared_caveat. If there are two caveats that declare the same key with different values, the information is omitted from the map. When the caveats are later checked, this will cause the check to fail. namespace is the Namespace used to retrieve the prefix associated to the uri, if None it will use the STD_NAMESPACE only.
def to_datetime(value): if value is None: return None if isinstance(value, six.integer_types): return parser.parse(value) return parser.isoparse(value)
Converts a string to a datetime.
def send_message(self, msg): if self.socket is not None: msg.version = self.PROTOCOL_VERSION serialized = msg.SerializeToString() data = struct.pack(">I", len(serialized)) + serialized try: self.socket.send(data) except Exception as e: pass
Internal method used to send messages through Clementine remote network protocol.
def reftrack_version_data(rt, role): tfi = rt.get_taskfileinfo() if not tfi: return return filesysitemdata.taskfileinfo_version_data(tfi, role)
Return the data for the version that is loaded by the reftrack :param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data :type rt: :class:`jukeboxcore.reftrack.Reftrack` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the version :rtype: depending on role :raises: None
def get_results(self, job_id): url = self._url('%s/results' % job_id) return self.client.get(url)
Get results of a job Args: job_id (str): The ID of the job. See: https://auth0.com/docs/api/management/v2#!/Jobs/get_results
def s3path(self, rel_path): import urlparse path = self.path(rel_path, public_url=True) parts = list(urlparse.urlparse(path)) parts[0] = 's3' parts[1] = self.bucket_name return urlparse.urlunparse(parts)
Return the path as an S3 schema
def postActivate_(self): self.tmpfile = os.path.join(constant.tmpdir,"valkka-"+str(os.getpid())) self.analyzer = ExternalDetector( executable = self.executable, image_dimensions = self.image_dimensions, tmpfile = self.tmpfile )
Create temporary file for image dumps and the analyzer itself
def group_id(self, resource_id): if self._name != 'group': self._request_uri = '{}/{}'.format(self._api_uri, resource_id)
Update the request URI to include the Group ID for specific group retrieval. Args: resource_id (string): The group id.
def _setID(self, id): if type(id) in (types.ListType, types.TupleType): try: for key in self._sqlPrimary: value = id[0] self.__dict__[key] = value id = id[1:] except IndexError: raise 'Not enough id fields, required: %s' % len(self._sqlPrimary) elif len(self._sqlPrimary) <= 1: key = self._sqlPrimary[0] self.__dict__[key] = id else: raise 'Not enough id fields, required: %s' % len(self._sqlPrimary) self._new = False
Set the ID, ie. the values for primary keys. id can be either a list, following the _sqlPrimary, or some other type, that will be set as the singleton ID (requires 1-length sqlPrimary).
def non_interactions(graph, t=None): nodes = set(graph) while nodes: u = nodes.pop() for v in nodes - set(graph[u]): yield (u, v)
Returns the non-existent edges in the graph at time t. Parameters ---------- graph : NetworkX graph. Graph to find non-existent edges. t : snapshot id (default=None) If None the non-existent edges are identified on the flattened graph. Returns ------- non_edges : iterator Iterator of edges that are not in the graph.
def group_list(**kwargs): ctx = Context(**kwargs) ctx.execute_action('group:list', **{ 'storage': ctx.repo.create_secure_service('storage'), })
Show available routing groups.
def get_hostinfo(self,callb=None): response = self.req_with_resp(GetInfo, StateInfo,callb=callb ) return None
Convenience method to request the device info from the device This will request the information from the device and request that callb be executed when a response is received. The is no default callback :param callb: Callable to be used when the response is received. If not set, self.resp_set_label will be used. :type callb: callable :returns: None :rtype: None
def user_add_link(self): if self.check_post_role()['ADD']: pass else: return False post_data = self.get_post_data() post_data['user_name'] = self.get_current_user() cur_uid = tools.get_uudd(2) while MLink.get_by_uid(cur_uid): cur_uid = tools.get_uudd(2) MLink.create_link(cur_uid, post_data) self.redirect('/link/list')
Create link by user.
def generate_command(self): example = [] example.append(f"{sys.argv[0]}") for key in sorted(list(self.spec.keys())): if self.spec[key]['type'] == list: value = " ".join(self.spec[key].get('example', '')) elif self.spec[key]['type'] == dict: value = f"\'{json.dumps(self.spec[key].get('example', ''))}\'" else: value = self.spec[key].get('example', '') string = f" --{key.lower()} {value}" example.append(string) print(" \\\n".join(example))
Generate a sample command
def vfolders(access_key): fields = [ ('Name', 'name'), ('Created At', 'created_at'), ('Last Used', 'last_used'), ('Max Files', 'max_files'), ('Max Size', 'max_size'), ] if access_key is None: q = 'query { vfolders { $fields } }' else: q = 'query($ak:String) { vfolders(access_key:$ak) { $fields } }' q = q.replace('$fields', ' '.join(item[1] for item in fields)) v = {'ak': access_key} with Session() as session: try: resp = session.Admin.query(q, v) except Exception as e: print_error(e) sys.exit(1) print(tabulate((item.values() for item in resp['vfolders']), headers=(item[0] for item in fields)))
List and manage virtual folders.
def send(self, message): try: self.ws.send(json.dumps(message)) except websocket._exceptions.WebSocketConnectionClosedException: raise SelenolWebSocketClosedException()
Send a the defined message to the backend. :param message: Message to be send, usually a Python dictionary.
def load_private_key(self, priv_key): with open(priv_key) as fd: self._private_key = paramiko.RSAKey.from_private_key(fd)
Register the SSH private key.
def _fragment_one_level(self, mol_graphs): unique_fragments_on_this_level = [] for mol_graph in mol_graphs: for edge in mol_graph.graph.edges: bond = [(edge[0],edge[1])] try: fragments = mol_graph.split_molecule_subgraphs(bond, allow_reverse=True) for fragment in fragments: found = False for unique_fragment in self.unique_fragments: if unique_fragment.isomorphic_to(fragment): found = True break if not found: self.unique_fragments.append(fragment) unique_fragments_on_this_level.append(fragment) except MolGraphSplitError: if self.open_rings: fragment = open_ring(mol_graph, bond, self.opt_steps) found = False for unique_fragment in self.unique_fragments: if unique_fragment.isomorphic_to(fragment): found = True break if not found: self.unique_fragments.append(fragment) self.unique_fragments_from_ring_openings.append(fragment) unique_fragments_on_this_level.append(fragment) return unique_fragments_on_this_level
Perform one step of iterative fragmentation on a list of molecule graphs. Loop through the graphs, then loop through each graph's edges and attempt to remove that edge in order to obtain two disconnected subgraphs, aka two new fragments. If successful, check to see if the new fragments are already present in self.unique_fragments, and append them if not. If unsucessful, we know that edge belongs to a ring. If we are opening rings, do so with that bond, and then again check if the resulting fragment is present in self.unique_fragments and add it if it is not.
def _make_transitions(self) -> List[Transition]: module_name = settings.TRANSITIONS_MODULE module_ = importlib.import_module(module_name) return module_.transitions
Load the transitions file.
def last(self): start = max(self.number - 1, 0) * self.size return Batch(start, self.size, self.total_size)
Returns the last batch for the batched sequence. :rtype: :class:`Batch` instance.
def worldview(self) -> str: views = self._data['worldview'] return self.random.choice(views)
Get a random worldview. :return: Worldview. :Example: Pantheism.
def get_obs_function(self): obs_function = [] for variable in self.network.findall('ObsFunction'): for var in variable.findall('CondProb'): cond_prob = defaultdict(list) cond_prob['Var'] = var.find('Var').text cond_prob['Parent'] = var.find('Parent').text.split() if not var.find('Parameter').get('type'): cond_prob['Type'] = 'TBL' else: cond_prob['Type'] = var.find('Parameter').get('type') cond_prob['Parameter'] = self.get_parameter(var) obs_function.append(cond_prob) return obs_function
Returns the observation function as nested dict in the case of table- type parameter and a nested structure in case of decision diagram parameter Example -------- >>> reader = PomdpXReader('Test_PomdpX.xml') >>> reader.get_obs_function() [{'Var': 'obs_sensor', 'Parent': ['action_rover', 'rover_1', 'rock_1'], 'Type': 'TBL', 'Parameter': [{'Instance': ['amw', '*', '*', '-'], 'ProbTable': ['1.0', '0.0']}, ... ] }]
def _get_template_dirs(): return filter(lambda x: os.path.exists(x), [ os.path.join(os.path.expanduser('~'), '.py2pack', 'templates'), os.path.join('/', 'usr', 'share', 'py2pack', 'templates'), os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'), ])
existing directories where to search for jinja2 templates. The order is important. The first found template from the first found dir wins!
def in_unit_of(self, unit, as_quantity=False): new_unit = u.Unit(unit) new_quantity = self.as_quantity.to(new_unit) if as_quantity: return new_quantity else: return new_quantity.value
Return the current value transformed to the new units :param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit instance, like "1 / (erg cm**2 s)" :param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number. Default is False :return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
def pdf_row_limiter(rows, limits=None, **kwargs): limits = limits or [None, None] upper_limit = limits[0] if limits else None lower_limit = limits[1] if len(limits) > 1 else None return rows[upper_limit: lower_limit]
Limit row passing a value. In this case we dont implementate a best effort algorithm because the posibilities are infite with a data text structure from a pdf.
def login(self, username, password): d = self.send_command('login', keys={'client_login_name': username, 'client_login_password': password}) if d == 0: self._log.info('Login Successful') return True return False
Login to the TS3 Server @param username: Username @type username: str @param password: Password @type password: str
def start(self): self.stop() self._task = asyncio.ensure_future(self._pinger(), loop=self._loop)
Start the pinging coroutine using the client and event loop which was passed to the constructor. :meth:`start` always behaves as if :meth:`stop` was called right before it.
def callback(self, cats): enable = bool(cats) if not enable: self.search.visible = False enable_widget(self.search_widget, enable) enable_widget(self.remove_widget, enable) if self.done_callback: self.done_callback(cats)
When a catalog is selected, enable widgets that depend on that condition and do done_callback
def secgroup_create(self, name, description): nt_ks = self.compute_conn nt_ks.security_groups.create(name, description) ret = {'name': name, 'description': description} return ret
Create a security group
def add_section(self, section_name): if section_name == "DEFAULT": raise Exception("'DEFAULT' is reserved section name.") if section_name in self._sections: raise Exception( "Error! %s is already one of the sections" % section_name) else: self._sections[section_name] = Section(section_name)
Add an empty section.
def _filter_response(self, response_dict): filtered_dict = {} for key, value in response_dict.items(): if key == "_jsns": continue if key == "xmlns": continue if type(value) == list and len(value) == 1: filtered_dict[key] = value[0] elif type(value) == dict and len(value.keys()) == 1 and "_content" \ in value.keys(): filtered_dict[key] = value["_content"] elif type(value) == dict: tmp_dict = self._filter_response(value) filtered_dict[key] = tmp_dict else: filtered_dict[key] = value return filtered_dict
Add additional filters to the response dictionary Currently the response dictionary is filtered like this: * If a list only has one item, the list is replaced by that item * Namespace-Keys (_jsns and xmlns) are removed :param response_dict: the pregenerated, but unfiltered response dict :type response_dict: dict :return: The filtered dictionary :rtype: dict
def _call_config(self, method, *args, **kwargs): return getattr(self._config, method)(self._section_name, *args, **kwargs)
Call the configuration at the given method which must take a section name as first argument
def _select_options(pat): if pat in _registered_options: return [pat] keys = sorted(_registered_options.keys()) if pat == 'all': return keys return [k for k in keys if re.search(pat, k, re.I)]
returns a list of keys matching `pat` if pat=="all", returns all registered options
def findCampaignsByName(target): try: from astropy.coordinates import SkyCoord from astropy.coordinates.name_resolve import NameResolveError from astropy.utils.data import conf conf.remote_timeout = 90 except ImportError: print('Error: AstroPy needs to be installed for this feature.') sys.exit(1) try: crd = SkyCoord.from_name(target) except NameResolveError: raise ValueError('Could not find coordinates ' 'for target "{0}".'.format(target)) return findCampaigns(crd.ra.deg, crd.dec.deg), crd.ra.deg, crd.dec.deg
Returns a list of the campaigns that cover a given target. Parameters ---------- target : str Name of the celestial object. Returns ------- campaigns : list of int A list of the campaigns that cover the given target name. ra, dec : float, float Resolved coordinates in decimal degrees (J2000). Exceptions ---------- Raises an ImportError if AstroPy is not installed. Raises a ValueError if `name` cannot be resolved to coordinates.
def delete(self, file_path, branch, commit_message, **kwargs): path = '%s/%s' % (self.path, file_path.replace('/', '%2F')) data = {'branch': branch, 'commit_message': commit_message} self.gitlab.http_delete(path, query_data=data, **kwargs)
Delete a file on the server. Args: file_path (str): Path of the file to remove branch (str): Branch from which the file will be removed commit_message (str): Commit message for the deletion **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabDeleteError: If the server cannot perform the request
def getDataPath(_system=thisSystem, _FilePath=FilePath): if _system == "Windows": pathName = "~/Crypto101/" else: pathName = "~/.crypto101/" path = _FilePath(expanduser(pathName)) if not path.exists(): path.makedirs() return path
Gets an appropriate path for storing some local data, such as TLS credentials. If the path doesn't exist, it is created.
def p_startswith(self, st, ignorecase=False): "Return True if the input starts with `st` at current position" length = len(st) matcher = result = self.input[self.pos:self.pos + length] if ignorecase: matcher = result.lower() st = st.lower() if matcher == st: self.pos += length return result return False
Return True if the input starts with `st` at current position
def add_text(self, end, next=None): if self.str_begin != end: self.fmt.append_text(self.format[self.str_begin:end]) if next is not None: self.str_begin = next
Adds the text from string beginning to the specified ending index to the format. :param end: The ending index of the string. :param next: The next string begin index. If None, the string index will not be updated.
def hash(*cols): sc = SparkContext._active_spark_context jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column)) return Column(jc)
Calculates the hash code of given columns, and returns the result as an int column. >>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect() [Row(hash=-757602832)]
def delete(self): if self.parent is None: raise RuntimeError( "Current statement has no parent, so it cannot " + "be deleted form a procmailrc structure" ) elif self.id is None: raise RuntimeError("id not set but have a parent, this should not be happening") else: parent_id = self.parent.id index = int(self.id.split('.')[-1]) self.parent.pop(index) self.parent = None self.id = None return parent_id
Remove the statement from a ProcmailRC structure, raise a RuntimeError if the statement is not inside a ProcmailRC structure return the parent id
def on_exception(func): class OnExceptionDecorator(LambdaDecorator): def on_exception(self, exception): return func(exception) return OnExceptionDecorator
Run a function when a handler thows an exception. It's return value is returned to AWS. Usage:: >>> # to create a reusable decorator >>> @on_exception ... def handle_errors(exception): ... print(exception) ... return {'statusCode': 500, 'body': 'uh oh'} >>> @handle_errors ... def handler(event, context): ... raise Exception('it broke!') >>> handler({}, object()) it broke! {'statusCode': 500, 'body': 'uh oh'} >>> # or a one off >>> @on_exception(lambda e: {'statusCode': 500}) ... def handler(body, context): ... raise Exception >>> handler({}, object()) {'statusCode': 500}
def reflected_light_intensity(self): self._ensure_mode(self.MODE_REFLECT) return self.value(0) * self._scale('REFLECT')
A measurement of the reflected light intensity, as a percentage.
def call_requests( requests: Union[Request, Iterable[Request]], methods: Methods, debug: bool ) -> Response: if isinstance(requests, collections.Iterable): return BatchResponse(safe_call(r, methods, debug=debug) for r in requests) return safe_call(requests, methods, debug=debug)
Takes a request or list of Requests and calls them. Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. debug: Include more information in error responses.
def update_terms_translations(self, project_id, file_path=None, language_code=None, overwrite=False, sync_terms=False, tags=None, fuzzy_trigger=None): return self._upload( project_id=project_id, updating=self.UPDATING_TERMS_TRANSLATIONS, file_path=file_path, language_code=language_code, overwrite=overwrite, sync_terms=sync_terms, tags=tags, fuzzy_trigger=fuzzy_trigger )
Updates terms translations overwrite: set it to True if you want to overwrite translations sync_terms: set it to True if you want to sync your terms (terms that are not found in the uploaded file will be deleted from project and the new ones added). Ignored if updating = translations tags: Add tags to the project terms; available when updating terms or terms_translations; you can use the following keys: "all" - for the all the imported terms, "new" - for the terms which aren't already in the project, "obsolete" - for the terms which are in the project but not in the imported file and "overwritten_translations" - for the terms for which translations change fuzzy_trigger: set it to True to mark corresponding translations from the other languages as fuzzy for the updated values
def init_controller(url): global _VERA_CONTROLLER created = False if _VERA_CONTROLLER is None: _VERA_CONTROLLER = VeraController(url) created = True _VERA_CONTROLLER.start() return [_VERA_CONTROLLER, created]
Initialize a controller. Provides a single global controller for applications that can't do this themselves
def as_scipy_functional(func, return_gradient=False): def func_call(arr): return func(np.asarray(arr).reshape(func.domain.shape)) if return_gradient: def func_gradient_call(arr): return np.asarray( func.gradient(np.asarray(arr).reshape(func.domain.shape))) return func_call, func_gradient_call else: return func_call
Wrap ``op`` as a function operating on linear arrays. This is intended to be used with the `scipy solvers <https://docs.scipy.org/doc/scipy/reference/optimize.html>`_. Parameters ---------- func : `Functional`. A functional that should be wrapped return_gradient : bool, optional ``True`` if the gradient of the functional should also be returned, ``False`` otherwise. Returns ------- function : ``callable`` The wrapped functional. gradient : ``callable``, optional The wrapped gradient. Only returned if ``return_gradient`` is true. Examples -------- Wrap functional and solve simple problem (here toy problem ``min_x ||x||^2``): >>> func = odl.solvers.L2NormSquared(odl.rn(3)) >>> scipy_func = odl.as_scipy_functional(func) >>> from scipy.optimize import minimize >>> result = minimize(scipy_func, x0=[0, 1, 0]) >>> np.allclose(result.x, [0, 0, 0]) True The gradient (jacobian) can also be provided: >>> func = odl.solvers.L2NormSquared(odl.rn(3)) >>> scipy_func, scipy_grad = odl.as_scipy_functional(func, True) >>> from scipy.optimize import minimize >>> result = minimize(scipy_func, x0=[0, 1, 0], jac=scipy_grad) >>> np.allclose(result.x, [0, 0, 0]) True Notes ----- If the data representation of ``op``'s domain is of type `NumpyTensorSpace`, this incurs no significant overhead. If the space type is ``CudaFn`` or some other nonlocal type, the overhead is significant.
def webhook_handler(request): body = request.stream.read().decode('utf8') print('webhook handler saw:', body) api.notify_webhook_received(payload=body) print('key store contains:', api._db.keys())
Receives the webhook from mbed cloud services Passes the raw http body directly to mbed sdk, to notify that a webhook was received
def is_superset(reference_set ): def is_superset_of(x): missing = reference_set - x if len(missing) == 0: return True else: raise NotSuperset(wrong_value=x, reference_set=reference_set, missing=missing) is_superset_of.__name__ = 'is_superset_of_{}'.format(reference_set) return is_superset_of
'Is superset' validation_function generator. Returns a validation_function to check that x is a superset of reference_set :param reference_set: the reference set :return:
def select_subscription(subs_code, subscriptions): if subs_code and subscriptions: for subs in subscriptions: if (subs.subscription_code == subs_code): return subs return None
Return the uwnetid.subscription object with the subs_code.
def pickle_dict(items): ret = {} pickled_keys = [] for key, val in items.items(): if isinstance(val, basestring): ret[key] = val else: pickled_keys.append(key) ret[key] = pickle.dumps(val) if pickled_keys: ret['_pickled'] = ','.join(pickled_keys) return ret
Returns a new dictionary where values which aren't instances of basestring are pickled. Also, a new key '_pickled' contains a comma separated list of keys corresponding to the pickled values.
def GET(self, url): r = requests.get(url) if self.verbose: sys.stdout.write("%s %s\n" % (r.status_code, r.encoding)) sys.stdout.write(str(r.headers) + "\n") self.encoding = r.encoding return r.text
returns text content of HTTP GET response.
def _case_stmt(self, stmt: Statement, sctx: SchemaContext) -> None: self._handle_child(CaseNode(), stmt, sctx)
Handle case statement.