code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def load_data(cr, module_name, filename, idref=None, mode='init'): """ Load an xml, csv or yml data file from your post script. The usual case for this is the occurrence of newly added essential or useful data in the module that is marked with "noupdate='1'" and without "forcecreate='1'" so that it will not be loaded by the usual upgrade mechanism. Leaving the 'mode' argument to its default 'init' will load the data from your migration script. Theoretically, you could simply load a stock file from the module, but be careful not to reinitialize any data that could have been customized. Preferably, select only the newly added items. Copy these to a file in your migrations directory and load that file. Leave it to the user to actually delete existing resources that are marked with 'noupdate' (other named items will be deleted automatically). :param module_name: the name of the module :param filename: the path to the filename, relative to the module \ directory. :param idref: optional hash with ?id mapping cache? :param mode: one of 'init', 'update', 'demo', 'init_no_create'. Always use 'init' for adding new items from files that are marked with 'noupdate'. Defaults to 'init'. 'init_no_create' is a hack to load data for records which have forcecreate=False set. As those records won't be recreated during the update, standard Odoo would recreate the record if it was deleted, but this will fail in cases where there are required fields to be filled which are not contained in the data file. """ if idref is None: idref = {} logger.info('%s: loading %s' % (module_name, filename)) _, ext = os.path.splitext(filename) pathname = os.path.join(module_name, filename) fp = tools.file_open(pathname) try: if ext == '.csv': noupdate = True tools.convert_csv_import( cr, module_name, pathname, fp.read(), idref, mode, noupdate) elif ext == '.yml': yaml_import(cr, module_name, fp, None, idref=idref, mode=mode) elif mode == 'init_no_create': for fp2 in _get_existing_records(cr, fp, module_name): tools.convert_xml_import( cr, module_name, fp2, idref, mode='init', ) else: tools.convert_xml_import(cr, module_name, fp, idref, mode=mode) finally: fp.close()
Load an xml, csv or yml data file from your post script. The usual case for this is the occurrence of newly added essential or useful data in the module that is marked with "noupdate='1'" and without "forcecreate='1'" so that it will not be loaded by the usual upgrade mechanism. Leaving the 'mode' argument to its default 'init' will load the data from your migration script. Theoretically, you could simply load a stock file from the module, but be careful not to reinitialize any data that could have been customized. Preferably, select only the newly added items. Copy these to a file in your migrations directory and load that file. Leave it to the user to actually delete existing resources that are marked with 'noupdate' (other named items will be deleted automatically). :param module_name: the name of the module :param filename: the path to the filename, relative to the module \ directory. :param idref: optional hash with ?id mapping cache? :param mode: one of 'init', 'update', 'demo', 'init_no_create'. Always use 'init' for adding new items from files that are marked with 'noupdate'. Defaults to 'init'. 'init_no_create' is a hack to load data for records which have forcecreate=False set. As those records won't be recreated during the update, standard Odoo would recreate the record if it was deleted, but this will fail in cases where there are required fields to be filled which are not contained in the data file.
def set_beeper_mode(self, state): """ :param state: a boolean of ture (on) or false ('off') :return: nothing """ values = {"desired_state": {"beeper_enabled": state}} response = self.api_interface.set_device_state(self, values) self._update_state_from_response(response)
:param state: a boolean of ture (on) or false ('off') :return: nothing
def validate_bands(bands): """Validate bands parameter.""" if not isinstance(bands, list): raise TypeError('Parameter bands must be a "list"') valid_bands = list(range(1, 12)) + ['BQA'] for band in bands: if band not in valid_bands: raise InvalidBandError('%s is not a valid band' % band)
Validate bands parameter.
def make_break(lineno, p): """ Checks if --enable-break is set, and if so, calls BREAK keyboard interruption for this line if it has not been already checked """ global last_brk_linenum if not OPTIONS.enableBreak.value or lineno == last_brk_linenum or is_null(p): return None last_brk_linenum = lineno return make_sentence('CHKBREAK', make_number(lineno, lineno, TYPE.uinteger))
Checks if --enable-break is set, and if so, calls BREAK keyboard interruption for this line if it has not been already checked
def cache(self): """Return a cache instance.""" cache = self._cache or self.app.config.get('COLLECTIONS_CACHE') return import_string(cache) if isinstance(cache, six.string_types) \ else cache
Return a cache instance.
def freqpoly_plot(data): """make freqpoly plot of merged read lengths""" rel_data = OrderedDict() for key, val in data.items(): tot = sum(val.values(), 0) rel_data[key] = {k: v / tot for k, v in val.items()} fplotconfig = { 'data_labels': [ {'name': 'Absolute', 'ylab': 'Frequency', 'xlab': 'Merged Read Length'}, {'name': 'Relative', 'ylab': 'Relative Frequency', 'xlab': 'Merged Read Length'} ], 'id': 'flash_freqpoly_plot', 'title': 'FLASh: Frequency of merged read lengths', 'colors': dict(zip(data.keys(), MultiqcModule.get_colors(len(data)))) } return linegraph.plot([data, rel_data], fplotconfig)
make freqpoly plot of merged read lengths
def datafield(*path, **kwargs): """A decorator that defines a field for data within a :class:`DataStruct` The decorated function becomes the parser for a :class:`DataField` which will be assigned to a data structure under the function's defined name. Parameters ---------- path: tuple The path to a value within a raw piece of data. If no path is provided the path is assumed to be ``(self.this_name)`` where ``self.this_name`` is that of the attribute this field was defined under. **kwargs: By explicitely claiming ``path=None``, no assumptions are made about the ``path``, causing all the raw data to be passed to the handler for parsing. """ if len(path) == 1 and isinstance(path[0], types.FunctionType): return DataField(**kwargs)(*path) else: return DataField(*path, **kwargs)
A decorator that defines a field for data within a :class:`DataStruct` The decorated function becomes the parser for a :class:`DataField` which will be assigned to a data structure under the function's defined name. Parameters ---------- path: tuple The path to a value within a raw piece of data. If no path is provided the path is assumed to be ``(self.this_name)`` where ``self.this_name`` is that of the attribute this field was defined under. **kwargs: By explicitely claiming ``path=None``, no assumptions are made about the ``path``, causing all the raw data to be passed to the handler for parsing.
def verify_sc_url(url: str) -> bool: """Verify signature certificate URL against Amazon Alexa requirements. Each call of Agent passes incoming utterances batch through skills filter, agent skills, skills processor. Batch of dialog IDs can be provided, in other case utterances indexes in incoming batch are used as dialog IDs. Args: url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: True if verification was successful, False if not. """ parsed = urlsplit(url) scheme: str = parsed.scheme netloc: str = parsed.netloc path: str = parsed.path try: port = parsed.port except ValueError: port = None result = (scheme.lower() == 'https' and netloc.lower().split(':')[0] == 's3.amazonaws.com' and path.startswith('/echo.api/') and (port == 443 or port is None)) return result
Verify signature certificate URL against Amazon Alexa requirements. Each call of Agent passes incoming utterances batch through skills filter, agent skills, skills processor. Batch of dialog IDs can be provided, in other case utterances indexes in incoming batch are used as dialog IDs. Args: url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: True if verification was successful, False if not.
def write_word(self, cmd, value): """ Writes a 16-bit word to the specified command register """ self.bus.write_word_data(self.address, cmd, value) self.log.debug( "write_word: Wrote 0x%04X to command register 0x%02X" % ( value, cmd ) )
Writes a 16-bit word to the specified command register
def encode(self, value): ''' :param value: value to encode ''' kassert.is_of_types(value, Bits) if len(value) % 8 != 0: raise KittyException('this encoder cannot encode bits that are not byte aligned') return self._encoder.encode(value.bytes)
:param value: value to encode
def split_list(alist, wanted_parts=1): """ A = [0,1,2,3,4,5,6,7,8,9] print split_list(A, wanted_parts=1) print split_list(A, wanted_parts=2) print split_list(A, wanted_parts=8) """ length = len(alist) return [ alist[i * length // wanted_parts:(i + 1) * length // wanted_parts] for i in range(wanted_parts) ]
A = [0,1,2,3,4,5,6,7,8,9] print split_list(A, wanted_parts=1) print split_list(A, wanted_parts=2) print split_list(A, wanted_parts=8)
def child(self, offset256): """ Derive new public key from this key and a sha256 "offset" """ a = bytes(self) + offset256 s = hashlib.sha256(a).digest() return self.add(s)
Derive new public key from this key and a sha256 "offset"
def render_svg(self, render_id, words, arcs): """Render SVG. render_id (int): Unique ID, typically index of document. words (list): Individual words and their tags. arcs (list): Individual arcs and their start, end, direction and label. RETURNS (unicode): Rendered SVG markup. """ self.levels = self.get_levels(arcs) self.highest_level = len(self.levels) self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke self.width = self.offset_x + len(words) * self.distance self.height = self.offset_y + 3 * self.word_spacing self.id = render_id words = [self.render_word(w["text"], w["tag"], i) for i, w in enumerate(words)] arcs = [ self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i) for i, a in enumerate(arcs) ] content = "".join(words) + "".join(arcs) return TPL_DEP_SVG.format( id=self.id, width=self.width, height=self.height, color=self.color, bg=self.bg, font=self.font, content=content, dir=self.direction, lang=self.lang, )
Render SVG. render_id (int): Unique ID, typically index of document. words (list): Individual words and their tags. arcs (list): Individual arcs and their start, end, direction and label. RETURNS (unicode): Rendered SVG markup.
def __imap_search(self, ** criteria_dict): """ Searches for query in the given IMAP criteria and returns the message numbers that match as a list of strings. Criteria without values (eg DELETED) should be keyword args with KEY=True, or else not passed. Criteria with values should be keyword args of the form KEY="VALUE" where KEY is a valid IMAP key. IMAP default is to AND all criteria together. We don't support other logic quite yet. All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>, BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM <string>, HEADER <field-name> <string> (UNTESTED), KEYWORD <flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>, OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN, SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>, SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>, UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED, UNKEYWORD <flag>, UNSEEN. For details on keys and their values, see http://tools.ietf.org/html/rfc3501#section-6.4.4 :param criteria_dict: dictionary of search criteria keywords :raises: EmailException if something in IMAP breaks :returns: List of message numbers as strings matched by given criteria """ self.imap_connect() criteria = [] for key in criteria_dict: if criteria_dict[key] is True: criteria.append('(%s)' % key) else: criteria.append('(%s "%s")' % (key, criteria_dict[key])) # If any of these criteria are not valid IMAP keys, IMAP will tell us. status, msg_nums = self.mailbox.search('UTF-8', * criteria) self.imap_disconnect() if 0 == len(msg_nums): msg_nums = [] if 'OK' in status: return self.__parse_imap_search_result(msg_nums) else: raise EmailException("IMAP status is " + str(status))
Searches for query in the given IMAP criteria and returns the message numbers that match as a list of strings. Criteria without values (eg DELETED) should be keyword args with KEY=True, or else not passed. Criteria with values should be keyword args of the form KEY="VALUE" where KEY is a valid IMAP key. IMAP default is to AND all criteria together. We don't support other logic quite yet. All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>, BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM <string>, HEADER <field-name> <string> (UNTESTED), KEYWORD <flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>, OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN, SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>, SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>, UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED, UNKEYWORD <flag>, UNSEEN. For details on keys and their values, see http://tools.ietf.org/html/rfc3501#section-6.4.4 :param criteria_dict: dictionary of search criteria keywords :raises: EmailException if something in IMAP breaks :returns: List of message numbers as strings matched by given criteria
def inverted(self): """Return the inverse of the transform.""" # This is a bit of hackery so that we can put a single "inverse" # function here. If we just made "self._inverse_type" point to the class # in question, it wouldn't be defined yet. This way, it's done at # at runtime and we avoid the definition problem. Hackish, but better # than repeating code everywhere or making a relatively complex # metaclass. inverse_type = globals()[self._inverse_type] return inverse_type(self._center_longitude, self._center_latitude, self._resolution)
Return the inverse of the transform.
def get_volumes(self): """ Return a list of all Volumes in this Storage Pool """ vols = [self.find_volume(name) for name in self.virsp.listVolumes()] return vols
Return a list of all Volumes in this Storage Pool
def _determine_auth_mechanism(username, password, delegation): """ if the username contains at '@' sign we will use kerberos if the username contains a '/ we will use ntlm either NTLM or Kerberos. In fact its basically always Negotiate. """ if re.match('(.*)@(.+)', username) is not None: if delegation is True: raise Exception('Kerberos is not yet supported, specify the username in <domain>\<username> form for NTLM') else: raise Exception('Kerberos is not yet supported, specify the username in <domain>>\<username> form for NTLM') # check for NT format 'domain\username' a blank domain or username is invalid legacy = re.match('(.*)\\\\(.*)', username) if legacy is not None: if not legacy.group(1): raise Exception('Please specify the Windows domain for user in <domain>\<username> format') if not legacy.group(2): raise Exception('Please specify the Username of the user in <domain>\<username> format') if delegation is True: return HttpCredSSPAuth(legacy.group(1), legacy.group(2), password) else: return HttpNtlmAuth(legacy.group(1), legacy.group(2), password) #return HttpCredSSPAuth("SERVER2012", "Administrator", password) # attempt NTLM (local account, not domain) - if username is '' then we try anonymous NTLM auth # as if anyone will configure that - uf! return HttpNtlmAuth('', username, password)
if the username contains at '@' sign we will use kerberos if the username contains a '/ we will use ntlm either NTLM or Kerberos. In fact its basically always Negotiate.
def markup_fragment(source, encoding=None): ''' Parse a fragment if markup in HTML mode, and return a bindery node Warning: if you pass a string, you must make sure it's a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if it's not obviously XML or HTML (for example it could be confused with a file name) from amara.lib import inputsource from amara.bindery import html doc = html.markup_fragment(inputsource.text('XXX<html><body onload="" color="white"><p>Spam!<p>Eggs!</body></html>YYY')) See also: http://wiki.xml3k.org/Amara2/Tagsoup ''' doc = parse(source, encoding=encoding) frag = doc.html.body return frag
Parse a fragment if markup in HTML mode, and return a bindery node Warning: if you pass a string, you must make sure it's a byte string, not a Unicode object. You might also want to wrap it with amara.lib.inputsource.text if it's not obviously XML or HTML (for example it could be confused with a file name) from amara.lib import inputsource from amara.bindery import html doc = html.markup_fragment(inputsource.text('XXX<html><body onload="" color="white"><p>Spam!<p>Eggs!</body></html>YYY')) See also: http://wiki.xml3k.org/Amara2/Tagsoup
def _validate_method_decoration(meta, class_): """Validate the usage of ``@override`` and ``@final`` modifiers on methods of the given ``class_``. """ # TODO(xion): employ some code inspection tricks to serve ClassErrors # as if they were thrown at the offending class's/method's definition super_mro = class_.__mro__[1:] own_methods = ((name, member) for name, member in class_.__dict__.items() if is_method(member)) # check that ``@override`` modifier is present where it should be # and absent where it shouldn't (e.g. ``@final`` methods) for name, method in own_methods: shadowed_method, base_class = next( ((getattr(base, name), base) for base in super_mro if hasattr(base, name)), (None, None) ) if meta._is_override(method): # ``@override`` is legal only when the method actually shadows # a method from a superclass, and that metod is not ``@final`` if not shadowed_method: raise ClassError("unnecessary @override on %s.%s" % ( class_.__name__, name), class_=class_) if meta._is_final(shadowed_method): raise ClassError( "illegal @override on a @final method %s.%s" % ( base_class.__name__, name), class_=class_) # if @override had parameter supplied, verify if it was # the same class as the base of shadowed method override_base = meta._get_override_base(method) if override_base and base_class is not override_base: if is_class(override_base): raise ClassError( "incorrect override base: expected %s, got %s" % ( base_class.__name__, override_base.__name__)) else: raise ClassError( "invalid override base specified: %s" % ( override_base,)) setattr(class_, name, method.method) else: if shadowed_method and name not in meta.OVERRIDE_EXEMPTIONS: if meta._is_final(shadowed_method): msg = "%s.%s is hiding a @final method %s.%s" % ( class_.__name__, name, base_class.__name__, name) else: msg = ("overridden method %s.%s " "must be marked with @override" % ( class_.__name__, name)) raise ClassError(msg, class_=class_)
Validate the usage of ``@override`` and ``@final`` modifiers on methods of the given ``class_``.
def setmonitor(self, enable=True): """Alias for setmode('monitor') or setmode('managed') Only available with Npcap""" # We must reset the monitor cache if enable: res = self.setmode('monitor') else: res = self.setmode('managed') if not res: log_runtime.error("Npcap WlanHelper returned with an error code !") self.cache_mode = None tmp = self.cache_mode = self.ismonitor() return tmp if enable else (not tmp)
Alias for setmode('monitor') or setmode('managed') Only available with Npcap
def login(config, username=None, password=None, email=None, url=None, client=None, *args, **kwargs): ''' Wrapper to the docker.py login method ''' try: c = (_get_client(config) if not client else client) lg = c.login(username, password, email, url) print "%s logged to %s"%(username,(url if url else "default hub")) except Exception as e: utils.error("%s can't login to repo %s: %s"%(username,(url if url else "default repo"),e)) return False return True
Wrapper to the docker.py login method
def update(self): """Update the charging state of the Tesla Vehicle.""" self._controller.update(self._id, wake_if_asleep=False) data = self._controller.get_charging_params(self._id) if data and (time.time() - self.__manual_update_time > 60): if data['charging_state'] != "Charging": self.__charger_state = False else: self.__charger_state = True
Update the charging state of the Tesla Vehicle.
def _apply_mt(self, doc_loader, parallelism, **kwargs): """Run the UDF multi-threaded using python multiprocessing""" if not Meta.postgres: raise ValueError("Fonduer must use PostgreSQL as a database backend.") def fill_input_queue(in_queue, doc_loader, terminal_signal): for doc in doc_loader: in_queue.put(doc) in_queue.put(terminal_signal) # Create an input queue to feed documents to UDF workers manager = Manager() in_queue = manager.Queue() # Use an output queue to track multiprocess progress out_queue = JoinableQueue() total_count = len(doc_loader) # Start UDF Processes for i in range(parallelism): udf = self.udf_class( in_queue=in_queue, out_queue=out_queue, worker_id=i, **self.udf_init_kwargs, ) udf.apply_kwargs = kwargs self.udfs.append(udf) # Start the UDF processes, and then join on their completion for udf in self.udfs: udf.start() # Fill input queue with documents terminal_signal = UDF.QUEUE_CLOSED in_queue_filler = Process( target=fill_input_queue, args=(in_queue, doc_loader, terminal_signal) ) in_queue_filler.start() count_parsed = 0 while count_parsed < total_count: y = out_queue.get() # Update progress bar whenever an item has been processed if y == UDF.TASK_DONE: count_parsed += 1 if self.pb is not None: self.pb.update(1) else: raise ValueError("Got non-sentinal output.") in_queue_filler.join() in_queue.put(UDF.QUEUE_CLOSED) for udf in self.udfs: udf.join() # Terminate and flush the processes for udf in self.udfs: udf.terminate() self.udfs = []
Run the UDF multi-threaded using python multiprocessing
def artist_commentary_revert(self, id_, version_id): """Revert artist commentary (Requires login) (UNTESTED). Parameters: id_ (int): The artist commentary id. version_id (int): The artist commentary version id to revert to. """ params = {'version_id': version_id} return self._get('artist_commentaries/{0}/revert.json'.format(id_), params, method='PUT', auth=True)
Revert artist commentary (Requires login) (UNTESTED). Parameters: id_ (int): The artist commentary id. version_id (int): The artist commentary version id to revert to.
def getaccountaddress(self, user_id=""): """Get the coin address associated with a user id. If the specified user id does not yet have an address for this coin, then generate one. Args: user_id (str): this user's unique identifier Returns: str: Base58Check address for this account """ address = self.rpc.call("getaccountaddress", user_id) self.logger.debug("Your", self.coin, "address is", address) return address
Get the coin address associated with a user id. If the specified user id does not yet have an address for this coin, then generate one. Args: user_id (str): this user's unique identifier Returns: str: Base58Check address for this account
def config(name, reset=False, **kwargs): ''' Modify configuration options for a given port. Multiple options can be specified. To see the available options for a port, use :mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`. name The port name, in ``category/name`` format reset : False If ``True``, runs a ``make rmconfig`` for the port, clearing its configuration before setting the desired options CLI Examples: .. code-block:: bash salt '*' ports.config security/nmap IPV6=off ''' portpath = _check_portname(name) if reset: rmconfig(name) configuration = showconfig(name, dict_return=True) if not configuration: raise CommandExecutionError( 'Unable to get port configuration for \'{0}\''.format(name) ) # Get top-level key for later reference pkg = next(iter(configuration)) conf_ptr = configuration[pkg] opts = dict( (six.text_type(x), _normalize(kwargs[x])) for x in kwargs if not x.startswith('_') ) bad_opts = [x for x in opts if x not in conf_ptr] if bad_opts: raise SaltInvocationError( 'The following opts are not valid for port {0}: {1}' .format(name, ', '.join(bad_opts)) ) bad_vals = [ '{0}={1}'.format(x, y) for x, y in six.iteritems(opts) if y not in ('on', 'off') ] if bad_vals: raise SaltInvocationError( 'The following key/value pairs are invalid: {0}' .format(', '.join(bad_vals)) ) conf_ptr.update(opts) _write_options(name, configuration) new_config = showconfig(name, dict_return=True) try: new_config = new_config[next(iter(new_config))] except (StopIteration, TypeError): return False return all(conf_ptr[x] == new_config.get(x) for x in conf_ptr)
Modify configuration options for a given port. Multiple options can be specified. To see the available options for a port, use :mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`. name The port name, in ``category/name`` format reset : False If ``True``, runs a ``make rmconfig`` for the port, clearing its configuration before setting the desired options CLI Examples: .. code-block:: bash salt '*' ports.config security/nmap IPV6=off
def validate_sceneInfo(self): """Check whether sceneInfo is valid to download from AWS Storage.""" if self.sceneInfo.prefix not in self.__prefixesValid: raise WrongSceneNameError('AWS: Prefix of %s (%s) is invalid' % (self.sceneInfo.name, self.sceneInfo.prefix))
Check whether sceneInfo is valid to download from AWS Storage.
def _responsify(api_spec, error, status): """Take a bravado-core model representing an error, and return a Flask Response with the given error code and error instance as body""" result_json = api_spec.model_to_json(error) r = jsonify(result_json) r.status_code = status return r
Take a bravado-core model representing an error, and return a Flask Response with the given error code and error instance as body
def get_disabled(): ''' .. versionadded:: 2014.7.0 Return a set of services that are installed but disabled CLI Example: .. code-block:: bash salt '*' service.get_disabled ''' services = [] for daemon, is_enabled in six.iteritems(_get_rc()): if not is_enabled: services.append(daemon) return sorted(set(get_all()) & set(services))
.. versionadded:: 2014.7.0 Return a set of services that are installed but disabled CLI Example: .. code-block:: bash salt '*' service.get_disabled
def get_minimum_span(low, high, span): """ If lower and high values are equal ensures they are separated by the defined span. """ if is_number(low) and low == high: if isinstance(low, np.datetime64): span = span * np.timedelta64(1, 's') low, high = low-span, high+span return low, high
If lower and high values are equal ensures they are separated by the defined span.
def read_folder(folder): """Read all files of `folder` and return a list of HandwrittenData objects. Parameters ---------- folder : string Path to a folder Returns ------- list : A list of all .ink files in the given folder. """ recordings = [] for filename in glob.glob(os.path.join(folder, '*.ink')): recording = parse_scg_ink_file(filename) recordings.append(recording) return recordings
Read all files of `folder` and return a list of HandwrittenData objects. Parameters ---------- folder : string Path to a folder Returns ------- list : A list of all .ink files in the given folder.
def EXPGauss(w_F, compute_uncertainty=True, is_timeseries=False): """Estimate free energy difference using gaussian approximation to one-sided (unidirectional) exponential averaging. Parameters ---------- w_F : np.ndarray, float w_F[t] is the forward work value from snapshot t. t = 0...(T-1) Length T is deduced from vector. compute_uncertainty : bool, optional, default=True if False, will disable computation of the statistical uncertainty (default: True) is_timeseries : bool, default=False if True, correlation in data is corrected for by estimation of statisitcal inefficiency (default: False) Use this option if you are providing correlated timeseries data and have not subsampled the data to produce uncorrelated samples. Returns ------- result_vals : dictionary Possible keys in the result_vals dictionary 'Delta_f' : float Free energy difference between the two states 'dDelta_f': float Estimated standard deviation of free energy difference between the two states. Notes ----- If you are prodividing correlated timeseries data, be sure to set the 'timeseries' flag to True Examples -------- Compute the free energy difference given a sample of forward work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> results = EXPGauss(w_F) >>> print('Forward Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f'])) Forward Gaussian approximated free energy difference is 1.049 +- 0.089 kT >>> results = EXPGauss(w_R) >>> print('Reverse Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f'])) Reverse Gaussian approximated free energy difference is -1.073 +- 0.080 kT """ # Get number of work measurements. T = float(np.size(w_F)) # number of work measurements var = np.var(w_F) # Estimate free energy difference by Gaussian approximation, dG = <U> - 0.5*var(U) DeltaF = np.average(w_F) - 0.5 * var result_vals = dict() if compute_uncertainty: # Compute effective number of uncorrelated samples. g = 1.0 # statistical inefficiency T_eff = T if is_timeseries: # Estimate statistical inefficiency of x timeseries. import timeseries g = timeseries.statisticalInefficiency(w_F, w_F) T_eff = T / g # Estimate standard error of E[x]. dx2 = var / T_eff + 0.5 * var * var / (T_eff - 1) dDeltaF = np.sqrt(dx2) # Return estimate of free energy difference and uncertainty. result_vals['Delta_f'] = DeltaF result_vals['dDelta_f'] = dDeltaF else: result_vals['Delta_f'] = DeltaF return result_vals
Estimate free energy difference using gaussian approximation to one-sided (unidirectional) exponential averaging. Parameters ---------- w_F : np.ndarray, float w_F[t] is the forward work value from snapshot t. t = 0...(T-1) Length T is deduced from vector. compute_uncertainty : bool, optional, default=True if False, will disable computation of the statistical uncertainty (default: True) is_timeseries : bool, default=False if True, correlation in data is corrected for by estimation of statisitcal inefficiency (default: False) Use this option if you are providing correlated timeseries data and have not subsampled the data to produce uncorrelated samples. Returns ------- result_vals : dictionary Possible keys in the result_vals dictionary 'Delta_f' : float Free energy difference between the two states 'dDelta_f': float Estimated standard deviation of free energy difference between the two states. Notes ----- If you are prodividing correlated timeseries data, be sure to set the 'timeseries' flag to True Examples -------- Compute the free energy difference given a sample of forward work values. >>> from pymbar import testsystems >>> [w_F, w_R] = testsystems.gaussian_work_example(mu_F=None, DeltaF=1.0, seed=0) >>> results = EXPGauss(w_F) >>> print('Forward Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f'])) Forward Gaussian approximated free energy difference is 1.049 +- 0.089 kT >>> results = EXPGauss(w_R) >>> print('Reverse Gaussian approximated free energy difference is %.3f +- %.3f kT' % (results['Delta_f'], results['dDelta_f'])) Reverse Gaussian approximated free energy difference is -1.073 +- 0.080 kT
def setup(self, redis_conn=None, host='localhost', port=6379): ''' Set up the counting manager class @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port ''' AbstractCounter.setup(self, redis_conn=redis_conn, host=host, port=port) self._threaded_start()
Set up the counting manager class @param redis_conn: A premade redis connection (overrides host and port) @param host: the redis host @param port: the redis port
def prepare_stack_for_update(self, stack, tags): """Prepare a stack for updating It may involve deleting the stack if is has failed it's initial creation. The deletion is only allowed if: - The stack contains all the tags configured in the current context; - The stack is in one of the statuses considered safe to re-create - ``recreate_failed`` is enabled, due to either being explicitly enabled by the user, or because interactive mode is on. Args: stack (dict): a stack object returned from get_stack tags (list): list of expected tags that must be present in the stack if it must be re-created Returns: bool: True if the stack can be updated, False if it must be re-created """ if self.is_stack_destroyed(stack): return False elif self.is_stack_completed(stack): return True stack_name = self.get_stack_name(stack) stack_status = self.get_stack_status(stack) if self.is_stack_in_progress(stack): raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Update already in-progress') if not self.is_stack_recreatable(stack): raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Unsupported state for re-creation') if not self.recreate_failed: raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Stack re-creation is disabled. Run stacker again with the ' '--recreate-failed option to force it to be deleted and ' 'created from scratch.') stack_tags = self.get_stack_tags(stack) if not check_tags_contain(stack_tags, tags): raise exceptions.StackUpdateBadStatus( stack_name, stack_status, 'Tags differ from current configuration, possibly not created ' 'with stacker') if self.interactive: sys.stdout.write( 'The \"%s\" stack is in a failed state (%s).\n' 'It cannot be updated, but it can be deleted and re-created.\n' 'All its current resources will IRREVERSIBLY DESTROYED.\n' 'Proceed carefully!\n\n' % (stack_name, stack_status)) sys.stdout.flush() ask_for_approval(include_verbose=False) logger.warn('Destroying stack \"%s\" for re-creation', stack_name) self.destroy_stack(stack) return False
Prepare a stack for updating It may involve deleting the stack if is has failed it's initial creation. The deletion is only allowed if: - The stack contains all the tags configured in the current context; - The stack is in one of the statuses considered safe to re-create - ``recreate_failed`` is enabled, due to either being explicitly enabled by the user, or because interactive mode is on. Args: stack (dict): a stack object returned from get_stack tags (list): list of expected tags that must be present in the stack if it must be re-created Returns: bool: True if the stack can be updated, False if it must be re-created
def u_shape(units: tf.Tensor, n_hidden_list: List, filter_width=7, use_batch_norm=False, training_ph=None): """ Network architecture inspired by One Hundred layer Tiramisu. https://arxiv.org/abs/1611.09326. U-Net like. Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) Returns: units: tensor at the output of the last convolutional layer with dimensionality [None, n_tokens, n_hidden_list[-1]] """ # Bread Crumbs units_for_skip_conn = [] conv_net_params = {'filter_width': filter_width, 'use_batch_norm': use_batch_norm, 'training_ph': training_ph} # Go down the rabbit hole for n_hidden in n_hidden_list: units = stacked_cnn(units, [n_hidden], **conv_net_params) units_for_skip_conn.append(units) units = tf.layers.max_pooling1d(units, pool_size=2, strides=2, padding='same') units = stacked_cnn(units, [n_hidden], **conv_net_params) # Up to the sun light for down_step, n_hidden in enumerate(n_hidden_list[::-1]): units = tf.expand_dims(units, axis=2) units = tf.layers.conv2d_transpose(units, n_hidden, filter_width, strides=(2, 1), padding='same') units = tf.squeeze(units, axis=2) # Skip connection skip_units = units_for_skip_conn[-(down_step + 1)] if skip_units.get_shape().as_list()[-1] != n_hidden: skip_units = tf.layers.dense(skip_units, n_hidden) units = skip_units + units units = stacked_cnn(units, [n_hidden], **conv_net_params) return units
Network architecture inspired by One Hundred layer Tiramisu. https://arxiv.org/abs/1611.09326. U-Net like. Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) Returns: units: tensor at the output of the last convolutional layer with dimensionality [None, n_tokens, n_hidden_list[-1]]
def deactivate(self): """ Stop heating/cooling and turn off the fan """ if self._driver and self._driver.is_connected(): self._driver.deactivate()
Stop heating/cooling and turn off the fan
def _from_binary_objid(cls, binary_stream): """See base class.""" uid_size = ObjectID._UUID_SIZE #some entries might not have all four ids, this line forces #to always create 4 elements, so contruction is easier uids = [UUID(bytes_le=binary_stream[i*uid_size:(i+1)*uid_size].tobytes()) if i * uid_size < len(binary_stream) else None for i in range(0,4)] _MOD_LOGGER.debug("Attempted to unpack OBJECT_ID Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), uids) return cls(uids)
See base class.
def baseurl(url): """ return baseurl of given url """ parsed_url = urlparse.urlparse(url) if not parsed_url.netloc or parsed_url.scheme not in ("http", "https"): raise ValueError('bad url') service_url = "%s://%s%s" % (parsed_url.scheme, parsed_url.netloc, parsed_url.path.strip()) return service_url
return baseurl of given url
def _images_succeeded(cls, session): """Clears the :attr:`_stored_images` set and deletes actual files that are marked as deleted in the storage if the ongoing transaction has committed. """ for image, store in cls._deleted_images: for stored_image, _ in cls._stored_images: if stored_image.object_type == image.object_type and \ stored_image.object_id == image.object_id and \ stored_image.width == image.width and \ stored_image.height == image.height and \ stored_image.mimetype == image.mimetype: break else: store.delete(image) cls._stored_images.clear() cls._deleted_images.clear()
Clears the :attr:`_stored_images` set and deletes actual files that are marked as deleted in the storage if the ongoing transaction has committed.
def select(self, node): """ Translate a select node into a latex qtree node. :param node: a treebrd node :return: a qtree subtree rooted at the node """ child = self.translate(node.child) return '[.${op}_{{{conditions}}}$ {child} ]'\ .format(op=latex_operator[node.operator], conditions=node.conditions, child=child)
Translate a select node into a latex qtree node. :param node: a treebrd node :return: a qtree subtree rooted at the node
def GetValue(self, row, col, table=None): """Return the result value of a cell, line split if too much data""" if table is None: table = self.grid.current_table try: cell_code = self.code_array((row, col, table)) except IndexError: cell_code = None # Put EOLs into result if it is too long maxlength = int(config["max_textctrl_length"]) if cell_code is not None and len(cell_code) > maxlength: chunk = 80 cell_code = "\n".join(cell_code[i:i + chunk] for i in xrange(0, len(cell_code), chunk)) return cell_code
Return the result value of a cell, line split if too much data
def unwrap(self, value, session=None): ''' Expects a list of dictionaries with ``k`` and ``v`` set to the keys and values that will be unwrapped into the output python dictionary should have. Validates the input and then constructs the dictionary from the list. ''' self.validate_unwrap(value) ret = {} for value_dict in value: k = value_dict['k'] v = value_dict['v'] ret[self.key_type.unwrap(k, session=session)] = self.value_type.unwrap(v, session=session) return ret
Expects a list of dictionaries with ``k`` and ``v`` set to the keys and values that will be unwrapped into the output python dictionary should have. Validates the input and then constructs the dictionary from the list.
def save(thing, url_or_handle, **kwargs): """Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported. """ is_handle = hasattr(url_or_handle, "write") and hasattr(url_or_handle, "name") if is_handle: _, ext = os.path.splitext(url_or_handle.name) else: _, ext = os.path.splitext(url_or_handle) if not ext: raise RuntimeError("No extension in URL: " + url_or_handle) if ext in savers: saver = savers[ext] if is_handle: saver(thing, url_or_handle, **kwargs) else: with write_handle(url_or_handle) as handle: saver(thing, handle, **kwargs) else: saver_names = [(key, fn.__name__) for (key, fn) in savers.items()] message = "Unknown extension '{}', supports {}." raise ValueError(message.format(ext, saver_names))
Save object to file on CNS. File format is inferred from path. Use save_img(), save_npy(), or save_json() if you need to force a particular format. Args: obj: object to save. path: CNS path. Raises: RuntimeError: If file extension not supported.
def _setup_crontab(): """Sets up the crontab if it hasn't already been setup.""" from crontab import CronTab #Since CI works out of a virtualenv anyway, the `ci.py` script will be #installed in the bin already, so we can call it explicitly. command = '/bin/bash -c "source ~/.cron_profile; workon {}; ci.py -cron"'.format(settings.venv) user = _get_real_user() if args["nolive"]: vms("Skipping cron tab configuration because 'nolive' enabled.") return cron = CronTab(user=user) #We need to see if the cron has already been created for this command. existing = False possible = cron.find_comment("pyci_cron") if len(list(possible)) > 0: if args["rollback"]: vms("Removing {} from cron tab.".format(command)) cron.remove_all(command) cron.write() db["cron"] = False _save_db() else: existing = True if not existing and not args["rollback"]: job = cron.new(command=command, comment="pyci_cron") #Run the cron every minute of every hour every day. if args["cronfreq"] == 1: vms("New cron tab configured *minutely* for {}".format(command)) job.setall("* * * * *") else: vms("New cron tab configured every {} minutes for {}.".format(args["cronfreq"], command)) job.setall("*/{} * * * *".format(args["cronfreq"])) cron.write() db["cron"] = True _save_db()
Sets up the crontab if it hasn't already been setup.
def get_prefix_envname(self, name, log=False): """Return full prefix path of environment defined by `name`.""" prefix = None if name == 'root': prefix = self.ROOT_PREFIX # envs, error = self.get_envs().communicate() envs = self.get_envs() for p in envs: if basename(p) == name: prefix = p return prefix
Return full prefix path of environment defined by `name`.
async def run_task(self) -> None: '''Execute the task inside the asyncio event loop. Track the time it takes to run, and log when it starts/stops. After `INTERVAL` seconds, if/once the task has finished running, run it again until `stop()` is called.''' while self.running: try: Log.debug('executing periodic task %s', self.name) before = self.time() await self.run() total = self.time() - before Log.debug('finished periodic task %s in %.1f seconds', self.name, total) sleep = self.INTERVAL - total if sleep > 0: await self.sleep(sleep) except CancelledError: Log.debug('cancelled periodic task %s', self.name) raise except Exception: Log.exception('exception in periodic task %s', self.name)
Execute the task inside the asyncio event loop. Track the time it takes to run, and log when it starts/stops. After `INTERVAL` seconds, if/once the task has finished running, run it again until `stop()` is called.
def CheckGlobalStatic(filename, clean_lines, linenum, error): """Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access, and # also because globals can be destroyed when some threads are still running. # TODO(unknown): Generalize this to also find static unique_ptr instances. # TODO(unknown): File bugs for clang-tidy to find these. match = Match( r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +' r'([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))): if Search(r'\bconst\b', line): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string ' 'instead: "%schar%s %s[]".' % (match.group(1), match.group(2) or '', match.group(3))) else: error(filename, linenum, 'runtime/string', 4, 'Static/global string variables are not permitted.') if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.')
Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def find_by_ids(self, _ids, projection=None, **kwargs): """ Does a big _id:$in query on any iterator """ id_list = [ObjectId(_id) for _id in _ids] if len(_ids) == 0: return [] # FIXME : this should be an empty cursor ! # Optimized path when only fetching the _id field. # Be mindful this might not filter missing documents that may not have been returned, had we done the query. if projection is not None and list(projection.keys()) == ["_id"]: return [self({"_id": x}, fetched_fields={"_id": True}) for x in id_list] else: return self.find({"_id": {"$in": id_list}}, projection=projection, **kwargs)
Does a big _id:$in query on any iterator
def delete(self, *args, **kwargs): """ Delete the image, along with any generated thumbnails. """ source_cache = self.get_source_cache() # First, delete any related thumbnails. self.delete_thumbnails(source_cache) # Next, delete the source image. super(ThumbnailerFieldFile, self).delete(*args, **kwargs) # Finally, delete the source cache entry. if source_cache and source_cache.pk is not None: source_cache.delete()
Delete the image, along with any generated thumbnails.
def animation(self, animation): """Setter for animation property. Parameters ---------- animation: str Defines the animation of the spinner """ self._animation = animation self._text = self._get_text(self._text['original'])
Setter for animation property. Parameters ---------- animation: str Defines the animation of the spinner
def _call_one_middleware(self, middleware): ''' Evaluate arguments and execute the middleware function ''' args = {} for arg in middleware['args']: if hasattr(self, arg): # same as eval() but safer for arbitrary code execution args[arg] = reduce(getattr, arg.split('.'), self) self.logger.debug('calling middleware event {}' .format(middleware['name'])) middleware['call'](**args)
Evaluate arguments and execute the middleware function
async def export_wallet(self, von_wallet: Wallet, path: str) -> None: """ Export an existing VON anchor wallet. Raise WalletState if wallet is closed. :param von_wallet: open wallet :param path: path to which to export wallet """ LOGGER.debug('WalletManager.export_wallet >>> von_wallet %s, path %s', von_wallet, path) if not von_wallet.handle: LOGGER.debug('WalletManager.export_wallet <!< Wallet %s is closed', von_wallet.name) raise WalletState('Wallet {} is closed'.format(von_wallet.name)) await wallet.export_wallet( von_wallet.handle, json.dumps({ 'path': path, **von_wallet.access_creds })) LOGGER.debug('WalletManager.export_wallet <<<')
Export an existing VON anchor wallet. Raise WalletState if wallet is closed. :param von_wallet: open wallet :param path: path to which to export wallet
def distance(self, clr): """ Returns the Euclidean distance between two colors (0.0-1.0). Consider colors arranged on the color wheel: - hue is the angle of a color along the center - saturation is the distance of a color from the center - brightness is the elevation of a color from the center (i.e. we're on color a sphere) """ coord = lambda a, d: (cos(radians(a)) * d, sin(radians(a)) * d) x0, y0 = coord(self.h * 360, self.s) x1, y1 = coord(clr.h * 360, clr.s) z0 = self.brightness z1 = clr.brightness d = sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2 + (z1 - z0) ** 2) return d
Returns the Euclidean distance between two colors (0.0-1.0). Consider colors arranged on the color wheel: - hue is the angle of a color along the center - saturation is the distance of a color from the center - brightness is the elevation of a color from the center (i.e. we're on color a sphere)
def reset(self): """Reset the service to its' initial state.""" logger.debug('StackInABoxService ({0}): Reset' .format(self.__id, self.name)) self.base_url = '/{0}'.format(self.name) logger.debug('StackInABoxService ({0}): Hosting Service {1}' .format(self.__id, self.name))
Reset the service to its' initial state.
def reverse(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL): """ Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param bool exactly_one: Return one result or a list of results, if available. Baidu's API will always return at most one result. .. versionadded:: 1.14.0 :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``. """ params = { 'ak': self.api_key, 'output': 'json', 'location': self._coerce_point_to_string(query), } url = self._construct_url(params) logger.debug("%s.reverse: %s", self.__class__.__name__, url) return self._parse_reverse_json( self._call_geocoder(url, timeout=timeout), exactly_one=exactly_one )
Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param bool exactly_one: Return one result or a list of results, if available. Baidu's API will always return at most one result. .. versionadded:: 1.14.0 :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.
def process_vts_params(self, scanner_vts): """ Receive an XML object with the Vulnerability Tests an their parameters to be use in a scan and return a dictionary. @param: XML element with vt subelements. Each vt has an id attribute. Optional parameters can be included as vt child. Example form: <vt_selection> <vt_single id='vt1' /> <vt_single id='vt2'> <vt_value id='param1'>value</vt_value> </vt_single> <vt_group filter='family=debian'/> <vt_group filter='family=general'/> </vt_selection> @return: Dictionary containing the vts attribute and subelements, like the VT's id and VT's parameters. Example form: {'vt1': {}, 'vt2': {'value_id': 'value'}, 'vt_groups': ['family=debian', 'family=general']} """ vt_selection = {} filters = list() for vt in scanner_vts: if vt.tag == 'vt_single': vt_id = vt.attrib.get('id') vt_selection[vt_id] = {} for vt_value in vt: if not vt_value.attrib.get('id'): raise OSPDError('Invalid VT preference. No attribute id', 'start_scan') vt_value_id = vt_value.attrib.get('id') vt_value_value = vt_value.text if vt_value.text else '' vt_selection[vt_id][vt_value_id] = vt_value_value if vt.tag == 'vt_group': vts_filter = vt.attrib.get('filter', None) if vts_filter is None: raise OSPDError('Invalid VT group. No filter given.', 'start_scan') filters.append(vts_filter) vt_selection['vt_groups'] = filters return vt_selection
Receive an XML object with the Vulnerability Tests an their parameters to be use in a scan and return a dictionary. @param: XML element with vt subelements. Each vt has an id attribute. Optional parameters can be included as vt child. Example form: <vt_selection> <vt_single id='vt1' /> <vt_single id='vt2'> <vt_value id='param1'>value</vt_value> </vt_single> <vt_group filter='family=debian'/> <vt_group filter='family=general'/> </vt_selection> @return: Dictionary containing the vts attribute and subelements, like the VT's id and VT's parameters. Example form: {'vt1': {}, 'vt2': {'value_id': 'value'}, 'vt_groups': ['family=debian', 'family=general']}
def library_sequencing_results(self): """ Generates a dict. where each key is a Library ID on the SequencingRequest and each value is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are not inlcuded in the dict. """ sres_ids = self.sequencing_result_ids res = {} for i in sres_ids: sres = SequencingResult(i) res[sres.library_id] = sres return res
Generates a dict. where each key is a Library ID on the SequencingRequest and each value is the associated SequencingResult. Libraries that aren't yet with a SequencingResult are not inlcuded in the dict.
def closeSession(self): """ C_CloseSession """ rv = self.lib.C_CloseSession(self.session) if rv != CKR_OK: raise PyKCS11Error(rv)
C_CloseSession
def filter_resources(tables, relationships, include_tables=None, include_columns=None, exclude_tables=None, exclude_columns=None): """ Include the following: 1. Tables and relationships with tables present in the include_tables (lst of str, tables names) 2. Columns (of whichever table) present in the include_columns (lst of str, columns names) Exclude the following: 1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names) 2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names) Disclosure note: All relationships are taken into consideration before ignoring columns. In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables """ _tables = copy.deepcopy(tables) _relationships = copy.deepcopy(relationships) include_tables = include_tables or [t.name for t in _tables] include_columns = include_columns or [c.name for t in _tables for c in t.columns] exclude_tables = exclude_tables or list() exclude_columns = exclude_columns or list() _tables = [t for t in _tables if t.name not in exclude_tables and t.name in include_tables] _relationships = [r for r in _relationships if r.right_col not in exclude_tables and r.left_col not in exclude_tables and r.right_col in include_tables and r.left_col in include_tables] for t in _tables: t.columns = [c for c in t.columns if c.name not in exclude_columns and c.name in include_columns] return _tables, _relationships
Include the following: 1. Tables and relationships with tables present in the include_tables (lst of str, tables names) 2. Columns (of whichever table) present in the include_columns (lst of str, columns names) Exclude the following: 1. Tables and relationships with tables present in the exclude_tables (lst of str, tables names) 2. Columns (of whichever table) present in the exclude_columns (lst of str, columns names) Disclosure note: All relationships are taken into consideration before ignoring columns. In other words, if one excludes primary or foreign keys, it will still keep the relations display amongst tables
def add_to_capabilities(self, capabilities): """ Adds proxy information as capability in specified capabilities. :Args: - capabilities: The capabilities to which proxy will be added. """ proxy_caps = {} proxy_caps['proxyType'] = self.proxyType['string'] if self.autodetect: proxy_caps['autodetect'] = self.autodetect if self.ftpProxy: proxy_caps['ftpProxy'] = self.ftpProxy if self.httpProxy: proxy_caps['httpProxy'] = self.httpProxy if self.proxyAutoconfigUrl: proxy_caps['proxyAutoconfigUrl'] = self.proxyAutoconfigUrl if self.sslProxy: proxy_caps['sslProxy'] = self.sslProxy if self.noProxy: proxy_caps['noProxy'] = self.noProxy if self.socksProxy: proxy_caps['socksProxy'] = self.socksProxy if self.socksUsername: proxy_caps['socksUsername'] = self.socksUsername if self.socksPassword: proxy_caps['socksPassword'] = self.socksPassword capabilities['proxy'] = proxy_caps
Adds proxy information as capability in specified capabilities. :Args: - capabilities: The capabilities to which proxy will be added.
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr") config = get_vmpolicy_macaddr output = ET.SubElement(get_vmpolicy_macaddr, "output") vmpolicy_macaddr = ET.SubElement(output, "vmpolicy-macaddr") name = ET.SubElement(vmpolicy_macaddr, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def prior_names(self): """ get the prior information names Returns ------- prior_names : list a list of prior information names """ return list(self.prior_information.groupby( self.prior_information.index).groups.keys())
get the prior information names Returns ------- prior_names : list a list of prior information names
def next_token(expected_type, data): """ Based on the expected next type, consume the next token returning the type found and an updated buffer with the found token removed :param expected_type: :param data: :return: (TokenType, str) tuple where TokenType is the type of the next token expected """ next_data = copy.copy(data) next_type = TokenType.INVALID if len(next_data) == 0 or next_data[0] == None: next_type = TokenType.END elif (expected_type == TokenType.DIGIT or expected_type == TokenType.DIGIT_OR_ZERO) and next_data[0].isalpha(): next_type = TokenType.LETTER elif expected_type == TokenType.LETTER and next_data[0].isdigit(): next_type = TokenType.DIGIT elif expected_type == TokenType.SUFFIX and next_data[0].isdigit(): next_type = TokenType.SUFFIX_NO else: if next_data[0] == '.': next_type = TokenType.DIGIT_OR_ZERO elif next_data[0] == '_': next_type = TokenType.SUFFIX elif next_data[0] == '-': if len(next_data) > 1 and next_data[1] == 'r': next_type = TokenType.REVISION_NO # Pop leading char off next_data = next_data[1:] else: next_type = TokenType.INVALID next_data = next_data[1:] if next_type < expected_type: if not ((next_type == TokenType.DIGIT_OR_ZERO and expected_type == TokenType.DIGIT) or (next_type == TokenType.SUFFIX and expected_type == TokenType.SUFFIX_NO) or (next_type == TokenType.DIGIT and expected_type == TokenType.LETTER)): next_type = TokenType.INVALID return next_type, next_data
Based on the expected next type, consume the next token returning the type found and an updated buffer with the found token removed :param expected_type: :param data: :return: (TokenType, str) tuple where TokenType is the type of the next token expected
def getpaths(struct): """ Maps all Tasks in a structured data object to their .output(). """ if isinstance(struct, Task): return struct.output() elif isinstance(struct, dict): return struct.__class__((k, getpaths(v)) for k, v in six.iteritems(struct)) elif isinstance(struct, (list, tuple)): return struct.__class__(getpaths(r) for r in struct) else: # Remaining case: assume struct is iterable... try: return [getpaths(r) for r in struct] except TypeError: raise Exception('Cannot map %s to Task/dict/list' % str(struct))
Maps all Tasks in a structured data object to their .output().
def resplit_datasets(dataset, other_dataset, random_seed=None, split=None): """Deterministic shuffle and split algorithm. Given the same two datasets and the same ``random_seed``, the split happens the same exact way every call. Args: dataset (lib.datasets.Dataset): First dataset. other_dataset (lib.datasets.Dataset): Another dataset. random_seed (int, optional): Seed to control the shuffle of both datasets. split (float, optional): If defined it is the percentage of rows that first dataset gets after split otherwise the original proportions are kept. Returns: :class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets. """ # Prevent circular dependency from torchnlp.datasets import Dataset concat = dataset.rows + other_dataset.rows shuffle(concat, random_seed=random_seed) if split is None: return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):]) else: split = max(min(round(len(concat) * split), len(concat)), 0) return Dataset(concat[:split]), Dataset(concat[split:])
Deterministic shuffle and split algorithm. Given the same two datasets and the same ``random_seed``, the split happens the same exact way every call. Args: dataset (lib.datasets.Dataset): First dataset. other_dataset (lib.datasets.Dataset): Another dataset. random_seed (int, optional): Seed to control the shuffle of both datasets. split (float, optional): If defined it is the percentage of rows that first dataset gets after split otherwise the original proportions are kept. Returns: :class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets.
def validate_username_for_new_account(person, username): """ Validate the new username for a new account. If the username is invalid or in use, raises :py:exc:`UsernameInvalid` or :py:exc:`UsernameTaken`. :param person: Owner of new account. :param username: Username to validate. """ # This is much the same as validate_username_for_new_person, except # we don't care if the username is used by the person owning the account # is the username valid? validate_username(username) # Check for existing people query = Person.objects.filter(username__exact=username) count = query.exclude(pk=person.pk).count() if count >= 1: raise UsernameTaken(six.u( 'The username is already taken. Please choose another. ' 'If this was the name of your old account please email %s') % settings.ACCOUNTS_EMAIL) # Check for existing accounts not belonging to this person query = Account.objects.filter(username__exact=username) count = query.exclude(person__pk=person.pk).count() if count >= 1: raise UsernameTaken(six.u( 'The username is already taken. Please choose another. ' 'If this was the name of your old account please email %s') % settings.ACCOUNTS_EMAIL) # Check datastore, in case username created outside Karaage. # Make sure we don't count the entry for person. query = Person.objects.filter(username__exact=username) count = query.filter(pk=person.pk).count() if count == 0 and account_exists(username): raise UsernameTaken( six.u('Username is already in external personal datastore.'))
Validate the new username for a new account. If the username is invalid or in use, raises :py:exc:`UsernameInvalid` or :py:exc:`UsernameTaken`. :param person: Owner of new account. :param username: Username to validate.
def read_from(self, provider, **options): """ All :class:`Pointer` fields in the `Structure` read the necessary number of bytes from the data :class:`Provider` for their referenced :attr:`~Pointer.data` object. Null pointer are ignored. :param Provider provider: data :class:`Provider`. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`~Pointer.data` objects of all :class:`Pointer` fields in the `Structure` reads their referenced :attr:`~Pointer.data` object as well (chained method call). Each :class:`Pointer` field stores the bytes for its referenced :attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`. """ for item in self.values(): # Container or Pointer if is_mixin(item): item.read_from(provider, **options)
All :class:`Pointer` fields in the `Structure` read the necessary number of bytes from the data :class:`Provider` for their referenced :attr:`~Pointer.data` object. Null pointer are ignored. :param Provider provider: data :class:`Provider`. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`~Pointer.data` objects of all :class:`Pointer` fields in the `Structure` reads their referenced :attr:`~Pointer.data` object as well (chained method call). Each :class:`Pointer` field stores the bytes for its referenced :attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
def get_current_m2m_diff(self, instance, new_objects): """ :param instance: Versionable object :param new_objects: objects which are about to be associated with instance :return: (being_removed id list, being_added id list) :rtype : tuple """ new_ids = self.pks_from_objects(new_objects) relation_manager = self.__get__(instance) filter = Q(**{relation_manager.source_field.attname: instance.pk}) qs = self.through.objects.current.filter(filter) try: # Django 1.7 target_name = relation_manager.target_field.attname except AttributeError: # Django 1.6 target_name = relation_manager.through._meta.get_field_by_name( relation_manager.target_field_name)[0].attname current_ids = set(qs.values_list(target_name, flat=True)) being_removed = current_ids - new_ids being_added = new_ids - current_ids return list(being_removed), list(being_added)
:param instance: Versionable object :param new_objects: objects which are about to be associated with instance :return: (being_removed id list, being_added id list) :rtype : tuple
def group_add_user_action(model, request): """Add user to group. """ user_id = request.params.get('id') if not user_id: user_ids = request.params.getall('id[]') else: user_ids = [user_id] try: group = model.model validate_add_users_to_groups(model, user_ids, [group.id]) for user_id in user_ids: group.add(user_id) group() model.parent.invalidate(group.name) localizer = get_localizer(request) message = localizer.translate(_( 'added_user_to_group', default="Added user '${uid}' to group '${gid}'.", mapping={ 'uid': ', '.join(user_ids), 'gid': group.id } )) return { 'success': True, 'message': message } except ManageMembershipError as e: if e.reason is not LM_TARGET_UID_NOT_ALLOWED: raise Exception(u"Unknown ManageMembershipError reason.") localizer = get_localizer(request) message = localizer.translate(_( 'lm_add_target_uid_not_allowed', default=( "Failed adding user '${uid}' to group '${gid}'. " "Manage membership denied for user." ), mapping={ 'uid': e.data, 'gid': group.id } )) return { 'success': False, 'message': message } except Exception as e: return { 'success': False, 'message': str(e) }
Add user to group.
def fromBinaryString(value): """Create a |ASN.1| object initialized from a string of '0' and '1'. Parameters ---------- value: :class:`str` Text string like '1010111' """ bitNo = 8 byte = 0 r = [] for v in value: if bitNo: bitNo -= 1 else: bitNo = 7 r.append(byte) byte = 0 if v in ('0', '1'): v = int(v) else: raise error.PyAsn1Error( 'Non-binary OCTET STRING initializer %s' % (v,) ) byte |= v << bitNo r.append(byte) return octets.ints2octs(r)
Create a |ASN.1| object initialized from a string of '0' and '1'. Parameters ---------- value: :class:`str` Text string like '1010111'
def cli(ctx, feature_id, start, end, organism="", sequence=""): """Set the boundaries of a genomic feature Output: A standard apollo feature dictionary ({"features": [{...}]}) """ return ctx.gi.annotations.set_boundaries(feature_id, start, end, organism=organism, sequence=sequence)
Set the boundaries of a genomic feature Output: A standard apollo feature dictionary ({"features": [{...}]})
def _sanitize_url_components(comp_list, field): ''' Recursive function to sanitize each component of the url. ''' if not comp_list: return '' elif comp_list[0].startswith('{0}='.format(field)): ret = '{0}=XXXXXXXXXX&'.format(field) comp_list.remove(comp_list[0]) return ret + _sanitize_url_components(comp_list, field) else: ret = '{0}&'.format(comp_list[0]) comp_list.remove(comp_list[0]) return ret + _sanitize_url_components(comp_list, field)
Recursive function to sanitize each component of the url.
def import_categories(self, category_nodes): """ Import all the categories from 'wp:category' nodes, because categories in 'item' nodes are not necessarily all the categories and returning it in a dict for database optimizations. """ self.write_out(self.style.STEP('- Importing categories\n')) categories = {} for category_node in category_nodes: title = category_node.find('{%s}cat_name' % WP_NS).text[:255] slug = category_node.find( '{%s}category_nicename' % WP_NS).text[:255] try: parent = category_node.find( '{%s}category_parent' % WP_NS).text[:255] except TypeError: parent = None self.write_out('> %s... ' % title) category, created = Category.objects.get_or_create( slug=slug, defaults={'title': title, 'parent': categories.get(parent)}) categories[title] = category self.write_out(self.style.ITEM('OK\n')) return categories
Import all the categories from 'wp:category' nodes, because categories in 'item' nodes are not necessarily all the categories and returning it in a dict for database optimizations.
def sync(self, *sids): """ Synchronise data Parameters ---------- sids : list of str SensorIDs to sync Optional, leave empty to sync everything """ if sids == (): sids = [sid for (sid,) in self.dbcur.execute(SQL_SENSOR_ALL)] for sid in sids: self.dbcur.execute(SQL_TMPO_LAST, (sid,)) last = self.dbcur.fetchone() if last: rid, lvl, bid, ext = last self._clean(sid, rid, lvl, bid) # prevent needless polling if time.time() < bid + 256: return else: rid, lvl, bid = 0, 0, 0 self._req_sync(sid, rid, lvl, bid)
Synchronise data Parameters ---------- sids : list of str SensorIDs to sync Optional, leave empty to sync everything
def get_avg_price_fifo(self) -> Decimal: """ Calculates the average price paid for the security. security = Commodity Returns Decimal value. """ balance = self.get_quantity() if not balance: return Decimal(0) paid = Decimal(0) accounts = self.get_holding_accounts() # get unused splits (quantity and total paid) per account. for account in accounts: splits = self.get_available_splits_for_account(account) for split in splits: paid += split.value avg_price = paid / balance return avg_price
Calculates the average price paid for the security. security = Commodity Returns Decimal value.
def unit_is_related(self, location, worksheet): ''' Checks for relationship between a unit location and this block. Returns: True if the location is related to this block. ''' same_worksheet = worksheet == self.worksheet if isinstance(location, (tuple, list)): return (location[0] >= self.start[0] and location[0] < self.end[0] and location[1] >= self.start[1] and location[1] < self.end[1] and same_worksheet) else: return same_worksheet
Checks for relationship between a unit location and this block. Returns: True if the location is related to this block.
def plot(self, flip=False, ax_channels=None, ax=None, *args, **kwargs): """ {_gate_plot_doc} """ if ax == None: ax = pl.gca() if ax_channels is not None: flip = self._find_orientation(ax_channels) plot_func = ax.axes.axhline if flip else ax.axes.axvline kwargs.setdefault('color', 'black') a1 = plot_func(self.vert[0], *args, **kwargs) a2 = plot_func(self.vert[1], *args, **kwargs) return (a1, a2)
{_gate_plot_doc}
def validate_steps(self, request, workflow, start, end): """Validates the workflow steps from ``start`` to ``end``, inclusive. Returns a dict describing the validation state of the workflow. """ errors = {} for step in workflow.steps[start:end + 1]: if not step.action.is_valid(): errors[step.slug] = dict( (field, [six.text_type(error) for error in errors]) for (field, errors) in step.action.errors.items()) return { 'has_errors': bool(errors), 'workflow_slug': workflow.slug, 'errors': errors, }
Validates the workflow steps from ``start`` to ``end``, inclusive. Returns a dict describing the validation state of the workflow.
def fcast(value: float) -> TensorLike: """Cast to float tensor""" newvalue = tf.cast(value, FTYPE) if DEVICE == 'gpu': newvalue = newvalue.gpu() # Why is this needed? # pragma: no cover return newvalue
Cast to float tensor
def contains (self, p): """Returns True if point is contained inside this Polygon, False otherwise. This method uses the Ray Casting algorithm. Examples: >>> p = Polygon() >>> p.vertices = [Point(1, 1), Point(1, -1), Point(-1, -1), Point(-1, 1)] >>> p.contains( Point(0, 0) ) True >>> p.contains( Point(2, 3) ) False """ inside = False if p in self.bounds(): for s in self.segments(): if ((s.p.y > p.y) != (s.q.y > p.y) and (p.x < (s.q.x - s.p.x) * (p.y - s.p.y) / (s.q.y - s.p.y) + s.p.x)): inside = not inside return inside
Returns True if point is contained inside this Polygon, False otherwise. This method uses the Ray Casting algorithm. Examples: >>> p = Polygon() >>> p.vertices = [Point(1, 1), Point(1, -1), Point(-1, -1), Point(-1, 1)] >>> p.contains( Point(0, 0) ) True >>> p.contains( Point(2, 3) ) False
def import_medusa_data(mat_filename, config_file): """Import measurement data (a .mat file) of the FZJ EIT160 system. This data format is identified as 'FZJ-EZ-2017'. Parameters ---------- mat_filename: string filename to the .mat data file. Note that only MNU0 single-potentials are supported! config_file: string filename for configuration file. The configuration file contains N rows with 4 columns each (a, b, m, n) Returns ------- """ df_emd, df_md = _read_mat_mnu0(mat_filename) # 'configs' can be a numpy array or a filename if not isinstance(config_file, np.ndarray): configs = np.loadtxt(config_file).astype(int) else: configs = config_file # construct four-point measurements via superposition print('constructing four-point measurements') quadpole_list = [] if df_emd is not None: index = 0 for Ar, Br, M, N in configs: # print('constructing', Ar, Br, M, N) # the order of A and B doesn't concern us A = np.min((Ar, Br)) B = np.max((Ar, Br)) # first choice: correct ordering query_M = df_emd.query('a=={0} and b=={1} and p=={2}'.format( A, B, M )) query_N = df_emd.query('a=={0} and b=={1} and p=={2}'.format( A, B, N )) if query_M.size == 0 or query_N.size == 0: continue index += 1 # keep these columns as they are (no subtracting) keep_cols = [ 'datetime', 'frequency', 'a', 'b', 'Zg1', 'Zg2', 'Zg3', 'Is', 'Il', 'Zg', 'Iab', ] df4 = pd.DataFrame() diff_cols = ['Zt', ] df4[keep_cols] = query_M[keep_cols] for col in diff_cols: df4[col] = query_M[col].values - query_N[col].values df4['m'] = query_M['p'].values df4['n'] = query_N['p'].values quadpole_list.append(df4) if quadpole_list: dfn = pd.concat(quadpole_list) Rsign = np.sign(dfn['Zt'].real) dfn['r'] = Rsign * np.abs(dfn['Zt']) dfn['Vmn'] = dfn['r'] * dfn['Iab'] dfn['rpha'] = np.arctan2( np.imag(dfn['Zt'].values), np.real(dfn['Zt'].values) ) * 1e3 else: dfn = pd.DataFrame() return dfn, df_md
Import measurement data (a .mat file) of the FZJ EIT160 system. This data format is identified as 'FZJ-EZ-2017'. Parameters ---------- mat_filename: string filename to the .mat data file. Note that only MNU0 single-potentials are supported! config_file: string filename for configuration file. The configuration file contains N rows with 4 columns each (a, b, m, n) Returns -------
def set_digital_latch(self, pin, threshold_type, cb=None): """ This method "arms" a digital pin for its data to be latched and saved in the latching table If a callback method is provided, when latching criteria is achieved, the callback function is called with latching data notification. In that case, the latching table is not updated. :param pin: Digital pin number :param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW :param cb: callback function :return: True if successful, False if parameter data is invalid """ if 0 <= threshold_type <= 1: self._command_handler.set_digital_latch(pin, threshold_type, cb) return True else: return False
This method "arms" a digital pin for its data to be latched and saved in the latching table If a callback method is provided, when latching criteria is achieved, the callback function is called with latching data notification. In that case, the latching table is not updated. :param pin: Digital pin number :param threshold_type: DIGITAL_LATCH_HIGH | DIGITAL_LATCH_LOW :param cb: callback function :return: True if successful, False if parameter data is invalid
def surviors_are_inconsistent(survivor_mapping: Mapping[BaseEntity, Set[BaseEntity]]) -> Set[BaseEntity]: """Check that there's no transitive shit going on.""" victim_mapping = set() for victim in itt.chain.from_iterable(survivor_mapping.values()): if victim in survivor_mapping: victim_mapping.add(victim) return victim_mapping
Check that there's no transitive shit going on.
def get_outliers(self): ''' Performs iterative sigma clipping to get outliers. ''' log.info("Clipping outliers...") log.info('Iter %d/%d: %d outliers' % (0, self.oiter, len(self.outmask))) def M(x): return np.delete(x, np.concatenate( [self.nanmask, self.badmask, self.transitmask]), axis=0) t = M(self.time) outmask = [np.array([-1]), np.array(self.outmask)] # Loop as long as the last two outlier arrays aren't equal while not np.array_equal(outmask[-2], outmask[-1]): # Check if we've done this too many times if len(outmask) - 1 > self.oiter: log.error('Maximum number of iterations in ' + '``get_outliers()`` exceeded. Skipping...') break # Check if we're going in circles if np.any([np.array_equal(outmask[-1], i) for i in outmask[:-1]]): log.error('Function ``get_outliers()`` ' + 'is going in circles. Skipping...') break # Compute the model to get the flux self.compute() # Get the outliers f = SavGol(M(self.flux)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.where((f > med + self.osigma * MAD) | (f < med - self.osigma * MAD))[0] # Project onto unmasked time array inds = np.array([np.argmax(self.time == t[i]) for i in inds]) self.outmask = np.array(inds, dtype=int) # Add them to the running list outmask.append(np.array(inds)) # Log log.info('Iter %d/%d: %d outliers' % (len(outmask) - 2, self.oiter, len(self.outmask)))
Performs iterative sigma clipping to get outliers.
def initialize_renderer(extensions=None): """ Initializes the renderer by setting up the extensions (taking a comma separated string or iterable of extensions). These extensions are added alongside with the configured always-on extensions. Returns a markdown renderer instance. """ if extensions is None: extensions = [] if isinstance(extensions, str): extensions = [extension.strip() for extension in extensions.split(',')] for extension in getattr(settings, 'MARKYMARK_EXTENSIONS', DEFAULT_MARKYMARK_EXTENSIONS): extensions.append(extension) return markdown.Markdown(extensions=extensions)
Initializes the renderer by setting up the extensions (taking a comma separated string or iterable of extensions). These extensions are added alongside with the configured always-on extensions. Returns a markdown renderer instance.
def delete_subscription(self, subscription_id): """ Deleting an existing subscription :param subscription_id: is the subscription the client wants to delete """ self._validate_uuid(subscription_id) url = "/notification/v1/subscription/{}".format(subscription_id) response = NWS_DAO().deleteURL(url, self._write_headers()) if response.status != 204: raise DataFailureException(url, response.status, response.data) return response.status
Deleting an existing subscription :param subscription_id: is the subscription the client wants to delete
def boolbox(msg="Shall I continue?", title=" ", choices=("[Y]es", "[N]o"), image=None, default_choice='Yes', cancel_choice='No'): """ Display a boolean msgbox. The returned value is calculated this way:: if the first choice is chosen, or if the dialog is cancelled: returns True else: returns False :param str msg: the msg to be displayed :param str title: the window title :param list choices: a list or tuple of the choices to be displayed :param str image: Filename of image to display :param str default_choice: The choice you want highlighted when the gui appears :param str cancel_choice: If the user presses the 'X' close, which button should be pressed :return: True if first button pressed or dialog is cancelled, False if second button is pressed """ if len(choices) != 2: raise AssertionError( 'boolbox takes exactly 2 choices! Consider using indexbox instead' ) reply = bb.buttonbox(msg=msg, title=title, choices=choices, image=image, default_choice=default_choice, cancel_choice=cancel_choice) if reply == choices[0]: return True else: return False
Display a boolean msgbox. The returned value is calculated this way:: if the first choice is chosen, or if the dialog is cancelled: returns True else: returns False :param str msg: the msg to be displayed :param str title: the window title :param list choices: a list or tuple of the choices to be displayed :param str image: Filename of image to display :param str default_choice: The choice you want highlighted when the gui appears :param str cancel_choice: If the user presses the 'X' close, which button should be pressed :return: True if first button pressed or dialog is cancelled, False if second button is pressed
def set_computer_desc(desc=None): ''' Set the Windows computer description Args: desc (str): The computer description Returns: str: Description if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!' ''' if six.PY2: desc = _to_unicode(desc) # Make sure the system exists # Return an object containing current information array for the computer system_info = win32net.NetServerGetInfo(None, 101) # If desc is passed, decode it for unicode if desc is None: return False system_info['comment'] = desc # Apply new settings try: win32net.NetServerSetInfo(None, 101, system_info) except win32net.error as exc: (number, context, message) = exc.args log.error('Failed to update system') log.error('nbr: %s', number) log.error('ctx: %s', context) log.error('msg: %s', message) return False return {'Computer Description': get_computer_desc()}
Set the Windows computer description Args: desc (str): The computer description Returns: str: Description if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
def _expand_authorized_keys_path(path, user, home): ''' Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5) ''' converted_path = '' had_escape = False for char in path: if had_escape: had_escape = False if char == '%': converted_path += '%' elif char == 'u': converted_path += user elif char == 'h': converted_path += home else: error = 'AuthorizedKeysFile path: unknown token character "%{0}"'.format(char) raise CommandExecutionError(error) continue elif char == '%': had_escape = True else: converted_path += char if had_escape: error = "AuthorizedKeysFile path: Last character can't be escape character" raise CommandExecutionError(error) return converted_path
Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5)
def get_file_list(): """Return a list of strings corresponding to file names supplied by drag and drop or standard input.""" if len(sys.argv) > 1: file_list = list(sys.argv[1:]) # make copy else: files_str = input('Select the files you want to process and drag and drop them onto this window, ' 'or type their names separated by spaces. Paths containing spaces should be ' 'surrounded by quotation marks.\nPress ENTER when you\'re done: ') if "win" in sys.platform: # the POSIX shlex.split uses backslashes for escape sequences, so Windows paths need to set posix=False file_list = shlex.split(files_str, posix=False) # the non-POSIX shlex.split does not automatically clean quotation marks from the final product file_list = [f.replace('"', '').replace("'", "") for f in file_list] else: file_list = shlex.split(files_str, posix=True) # substitute in shell variables and get absolute paths for i in range(len(file_list)): file_list[i] = os.path.abspath( os.path.expanduser(os.path.expandvars(file_list[i])) ) return file_list
Return a list of strings corresponding to file names supplied by drag and drop or standard input.
def local(self, *args, **kwargs): ''' Run :ref:`execution modules <all-salt.modules>` synchronously See :py:meth:`salt.client.LocalClient.cmd` for all available parameters. Sends a command from the master to the targeted minions. This is the same interface that Salt's own CLI uses. Note the ``arg`` and ``kwarg`` parameters are sent down to the minion(s) and the given function, ``fun``, is called with those parameters. :return: Returns the result from the execution module ''' local = salt.client.get_local_client(mopts=self.opts) return local.cmd(*args, **kwargs)
Run :ref:`execution modules <all-salt.modules>` synchronously See :py:meth:`salt.client.LocalClient.cmd` for all available parameters. Sends a command from the master to the targeted minions. This is the same interface that Salt's own CLI uses. Note the ``arg`` and ``kwarg`` parameters are sent down to the minion(s) and the given function, ``fun``, is called with those parameters. :return: Returns the result from the execution module
def from_cif_file(cif_file, source='', comment=''): """ Static method to create Header object from cif_file Args: cif_file: cif_file path and name source: User supplied identifier, i.e. for Materials Project this would be the material ID number comment: User comment that goes in header Returns: Header Object """ r = CifParser(cif_file) structure = r.get_structures()[0] return Header(structure, source, comment)
Static method to create Header object from cif_file Args: cif_file: cif_file path and name source: User supplied identifier, i.e. for Materials Project this would be the material ID number comment: User comment that goes in header Returns: Header Object
def shard_data(source_fnames: List[str], target_fname: str, source_vocabs: List[vocab.Vocab], target_vocab: vocab.Vocab, num_shards: int, buckets: List[Tuple[int, int]], length_ratio_mean: float, length_ratio_std: float, output_prefix: str) -> Tuple[List[Tuple[List[str], str, 'DataStatistics']], 'DataStatistics']: """ Assign int-coded source/target sentence pairs to shards at random. :param source_fnames: The path to the source text (and optional token-parallel factor files). :param target_fname: The file name of the target file. :param source_vocabs: Source vocabulary (and optional source factor vocabularies). :param target_vocab: Target vocabulary. :param num_shards: The total number of shards. :param buckets: Bucket list. :param length_ratio_mean: Mean length ratio. :param length_ratio_std: Standard deviation of length ratios. :param output_prefix: The prefix under which the shard files will be created. :return: Tuple of source (and source factor) file names, target file names and statistics for each shard, as well as global statistics. """ os.makedirs(output_prefix, exist_ok=True) sources_shard_fnames = [[os.path.join(output_prefix, C.SHARD_SOURCE % i) + ".%d" % f for i in range(num_shards)] for f in range(len(source_fnames))] target_shard_fnames = [os.path.join(output_prefix, C.SHARD_TARGET % i) for i in range(num_shards)] # type: List[str] data_stats_accumulator = DataStatisticsAccumulator(buckets, source_vocabs[0], target_vocab, length_ratio_mean, length_ratio_std) per_shard_stat_accumulators = [DataStatisticsAccumulator(buckets, source_vocabs[0], target_vocab, length_ratio_mean, length_ratio_std) for shard_idx in range(num_shards)] with ExitStack() as exit_stack: sources_shards = [[exit_stack.enter_context(smart_open(f, mode="wt")) for f in sources_shard_fnames[i]] for i in range(len(source_fnames))] target_shards = [exit_stack.enter_context(smart_open(f, mode="wt")) for f in target_shard_fnames] source_readers, target_reader = create_sequence_readers(source_fnames, target_fname, source_vocabs, target_vocab) random_shard_iter = iter(lambda: random.randrange(num_shards), None) for (sources, target), random_shard_index in zip(parallel_iter(source_readers, target_reader), random_shard_iter): random_shard_index = cast(int, random_shard_index) source_len = len(sources[0]) target_len = len(target) buck_idx, buck = get_parallel_bucket(buckets, source_len, target_len) data_stats_accumulator.sequence_pair(sources[0], target, buck_idx) per_shard_stat_accumulators[random_shard_index].sequence_pair(sources[0], target, buck_idx) if buck is None: continue for i, line in enumerate(sources): sources_shards[i][random_shard_index].write(ids2strids(line) + "\n") target_shards[random_shard_index].write(ids2strids(target) + "\n") per_shard_stats = [shard_stat_accumulator.statistics for shard_stat_accumulator in per_shard_stat_accumulators] sources_shard_fnames_by_shards = zip(*sources_shard_fnames) # type: List[List[str]] return list( zip(sources_shard_fnames_by_shards, target_shard_fnames, per_shard_stats)), data_stats_accumulator.statistics
Assign int-coded source/target sentence pairs to shards at random. :param source_fnames: The path to the source text (and optional token-parallel factor files). :param target_fname: The file name of the target file. :param source_vocabs: Source vocabulary (and optional source factor vocabularies). :param target_vocab: Target vocabulary. :param num_shards: The total number of shards. :param buckets: Bucket list. :param length_ratio_mean: Mean length ratio. :param length_ratio_std: Standard deviation of length ratios. :param output_prefix: The prefix under which the shard files will be created. :return: Tuple of source (and source factor) file names, target file names and statistics for each shard, as well as global statistics.
def from_file(filename, use_cores=True, thresh=1.e-4): """ Reads an xr-formatted file to create an Xr object. Args: filename (str): name of file to read from. use_cores (bool): use core positions and discard shell positions if set to True (default). Otherwise, use shell positions and discard core positions. thresh (float): relative threshold for consistency check between cell parameters (lengths and angles) from header information and cell vectors, respectively. Returns: xr (Xr): Xr object corresponding to the input file. """ with zopen(filename, "rt") as f: return Xr.from_string( f.read(), use_cores=use_cores, thresh=thresh)
Reads an xr-formatted file to create an Xr object. Args: filename (str): name of file to read from. use_cores (bool): use core positions and discard shell positions if set to True (default). Otherwise, use shell positions and discard core positions. thresh (float): relative threshold for consistency check between cell parameters (lengths and angles) from header information and cell vectors, respectively. Returns: xr (Xr): Xr object corresponding to the input file.
def get_synset_xml(self,syn_id): """ call cdb_syn with synset identifier -> returns the synset xml; """ http, resp, content = self.connect() params = "" fragment = "" path = "cdb_syn" if self.debug: printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path ) # output_opt: plain, html, xml # 'xml' is actually xhtml (with markup), but it is not valid xml! # 'plain' is actually valid xml (without markup) output_opt = "plain" if self.debug: printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt ) action = "runQuery" if self.debug: printf( "cornettodb/views/query_remote_syn_id: action: %s" % action ) printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id ) qdict = {} qdict[ "action" ] = action qdict[ "query" ] = syn_id qdict[ "outtype" ] = output_opt query = urllib.urlencode( qdict ) db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment ) db_url = urlparse.urlunparse( db_url_tuple ) if self.debug: printf( "db_url: %s" % db_url ) resp, content = http.request( db_url, "GET" ) if self.debug: printf( "resp:\n%s" % resp ) # printf( "content:\n%s" % content ) # printf( "content is of type: %s" % type( content ) ) #<type 'str'> xml_data = eval( content ) return etree.fromstring( xml_data )
call cdb_syn with synset identifier -> returns the synset xml;
def setDataFrame(self, dataFrame): """setter function to _dataFrame. Holds all data. Note: It's not implemented with python properties to keep Qt conventions. Raises: TypeError: if dataFrame is not of type pandas.core.frame.DataFrame. Args: dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed. """ if not isinstance(dataFrame, pandas.core.frame.DataFrame): raise TypeError('Argument is not of type pandas.core.frame.DataFrame') self.layoutAboutToBeChanged.emit() self._dataFrame = dataFrame self.layoutChanged.emit()
setter function to _dataFrame. Holds all data. Note: It's not implemented with python properties to keep Qt conventions. Raises: TypeError: if dataFrame is not of type pandas.core.frame.DataFrame. Args: dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
def notch_fir(self, f1, f2, order, beta=5.0, remove_corrupted=True): """ notch filter the time series using an FIR filtered generated from the ideal response passed through a time-domain kaiser window (beta = 5.0) The suppression of the notch filter is related to the bandwidth and the number of samples in the filter length. For a few Hz bandwidth, a length corresponding to a few seconds is typically required to create significant suppression in the notched band. Parameters ---------- Time Series: TimeSeries The time series to be notched. f1: float The start of the frequency suppression. f2: float The end of the frequency suppression. order: int Number of corrupted samples on each side of the time series beta: float Beta parameter of the kaiser window that sets the side lobe attenuation. """ from pycbc.filter import notch_fir ts = notch_fir(self, f1, f2, order, beta=beta) if remove_corrupted: ts = ts[order:len(ts)-order] return ts
notch filter the time series using an FIR filtered generated from the ideal response passed through a time-domain kaiser window (beta = 5.0) The suppression of the notch filter is related to the bandwidth and the number of samples in the filter length. For a few Hz bandwidth, a length corresponding to a few seconds is typically required to create significant suppression in the notched band. Parameters ---------- Time Series: TimeSeries The time series to be notched. f1: float The start of the frequency suppression. f2: float The end of the frequency suppression. order: int Number of corrupted samples on each side of the time series beta: float Beta parameter of the kaiser window that sets the side lobe attenuation.
def normalize(s, replace_spaces=True): """Normalize non-ascii characters to their closest ascii counterparts """ whitelist = (' -' + string.ascii_letters + string.digits) if type(s) == six.binary_type: s = six.text_type(s, 'utf-8', 'ignore') table = {} for ch in [ch for ch in s if ch not in whitelist]: if ch not in table: try: replacement = unicodedata.normalize('NFKD', ch)[0] if replacement in whitelist: table[ord(ch)] = replacement else: table[ord(ch)] = u'_' except: table[ord(ch)] = u'_' if replace_spaces: return s.translate(table).replace(u'_', u'').replace(' ', '_') else: return s.translate(table).replace(u'_', u'')
Normalize non-ascii characters to their closest ascii counterparts
def find_comp_by_target(self, target): '''Finds a component using a TargetComponent or one of its subclasses. @param A @ref TargetComponent object or subclass of @ref TargetComponent. @return A Component object matching the target. @raises MissingComponentError ''' for comp in self._components: if comp.id == target.component_id and \ comp.instance_name == target.instance_name: return comp raise MissingComponentError
Finds a component using a TargetComponent or one of its subclasses. @param A @ref TargetComponent object or subclass of @ref TargetComponent. @return A Component object matching the target. @raises MissingComponentError
def remove_thumbnail(self, thumbnail): """Remove thumbnail.""" if thumbnail in self._thumbnails: index = self._thumbnails.index(thumbnail) self._thumbnails.remove(thumbnail) self.layout().removeWidget(thumbnail) thumbnail.deleteLater() thumbnail.sig_canvas_clicked.disconnect() thumbnail.sig_remove_figure.disconnect() thumbnail.sig_save_figure.disconnect() # Select a new thumbnail if any : if thumbnail == self.current_thumbnail: if len(self._thumbnails) > 0: self.set_current_index(min(index, len(self._thumbnails)-1)) else: self.current_thumbnail = None self.figure_viewer.figcanvas.clear_canvas()
Remove thumbnail.