positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def yieldable(self): """Return True if there is a line that the buffer can return, False otherwise.""" if self.read_buffer is None: return False t = _remove_trailing_new_line(self.read_buffer) n = _find_furthest_new_line(t) if n >= 0: return True # we have read in entire file and have some unprocessed lines if self.read_position == 0 and self.read_buffer is not None: return True return False
Return True if there is a line that the buffer can return, False otherwise.
def _is_ignorable_404(uri): """ Returns True if the given request *shouldn't* notify the site managers. """ urls = getattr(django_settings, "IGNORABLE_404_URLS", ()) return any(pattern.search(uri) for pattern in urls)
Returns True if the given request *shouldn't* notify the site managers.
def reply(self): """Reply to the selected status""" status = self.get_selected_status() app, user = self.app, self.user if not app or not user: self.footer.draw_message("You must be logged in to reply", Color.RED) return compose_modal = ComposeModal(self.stdscr, default_cw='\n'.join(status['spoiler_text']) or None, resize_callback=self.on_resize) content, cw = compose_modal.loop() self.full_redraw() if content is None: return elif len(content) == 0: self.footer.draw_message("Status must contain content", Color.RED) return self.footer.draw_message("Submitting reply...", Color.YELLOW) response = api.post_status(app, user, content, spoiler_text=cw, sensitive=cw is not None, in_reply_to_id=status['id']) status = parse_status(response) self.statuses.insert(0, status) self.selected += 1 self.left.draw_statuses(self.statuses, self.selected) self.footer.draw_message("✓ Reply posted", Color.GREEN)
Reply to the selected status
def get_mapping(session, table, candidates, generator, key_map): """Generate map of keys and values for the candidate from the generator. :param session: The database session. :param table: The table we will be inserting into (i.e. Feature or Label). :param candidates: The candidates to get mappings for. :param generator: A generator yielding (candidate_id, key, value) tuples. :param key_map: A mutable dict which values will be added to as {key: [relations]}. :type key_map: Dict :return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _} :rtype: generator of dict """ for cand in candidates: # Grab the old values currently in the DB try: temp = session.query(table).filter(table.candidate_id == cand.id).one() cand_map = dict(zip(temp.keys, temp.values)) except NoResultFound: cand_map = {} map_args = {"candidate_id": cand.id} for cid, key, value in generator(cand): if value == 0: continue cand_map[key] = value # Assemble label arguments map_args["keys"] = [*cand_map.keys()] map_args["values"] = [*cand_map.values()] # Update key_map by adding the candidate class for each key for key in map_args["keys"]: try: key_map[key].add(cand.__class__.__tablename__) except KeyError: key_map[key] = {cand.__class__.__tablename__} yield map_args
Generate map of keys and values for the candidate from the generator. :param session: The database session. :param table: The table we will be inserting into (i.e. Feature or Label). :param candidates: The candidates to get mappings for. :param generator: A generator yielding (candidate_id, key, value) tuples. :param key_map: A mutable dict which values will be added to as {key: [relations]}. :type key_map: Dict :return: Generator of dictionaries of {"candidate_id": _, "keys": _, "values": _} :rtype: generator of dict
def output(self, msg, indent, status=None): """ Alias for print_indent_msg with color determined by status.""" color = None if self.use_color: color = get_color_from_status(status) print_indent_msg(msg, indent, color)
Alias for print_indent_msg with color determined by status.
def from_config(cls, cp, model, nprocesses=1, use_mpi=False): """Loads the sampler from the given config file.""" section = "sampler" # check name assert cp.get(section, "name") == cls.name, ( "name in section [sampler] must match mine") # get the number of walkers to use nwalkers = int(cp.get(section, "nwalkers")) # get the checkpoint interval, if it's specified checkpoint_interval = cls.checkpoint_from_config(cp, section) checkpoint_signal = cls.ckpt_signal_from_config(cp, section) # get the logpost function lnpost = get_optional_arg_from_config(cp, section, 'logpost-function') obj = cls(model, nwalkers, checkpoint_interval=checkpoint_interval, checkpoint_signal=checkpoint_signal, logpost_function=lnpost, nprocesses=nprocesses, use_mpi=use_mpi) # set target obj.set_target_from_config(cp, section) # add burn-in if it's specified obj.set_burn_in_from_config(cp) # set prethin options obj.set_thin_interval_from_config(cp, section) return obj
Loads the sampler from the given config file.
def create_node(kwargs): ''' Convenience function to make the rest api call for node creation. ''' if not isinstance(kwargs, dict): kwargs = {} # Required parameters params = { 'Action': 'CreateInstance', 'InstanceType': kwargs.get('size_id', ''), 'RegionId': kwargs.get('region_id', DEFAULT_LOCATION), 'ImageId': kwargs.get('image_id', ''), 'SecurityGroupId': kwargs.get('securitygroup_id', ''), 'InstanceName': kwargs.get('name', ''), } # Optional parameters' optional = [ 'InstanceName', 'InternetChargeType', 'InternetMaxBandwidthIn', 'InternetMaxBandwidthOut', 'HostName', 'Password', 'SystemDisk.Category', 'VSwitchId' # 'DataDisk.n.Size', 'DataDisk.n.Category', 'DataDisk.n.SnapshotId' ] for item in optional: if item in kwargs: params.update({item: kwargs[item]}) # invoke web call result = query(params) return result['InstanceId']
Convenience function to make the rest api call for node creation.
def get_history_by_tail_number(self, tail_number, page=1, limit=100): """Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10) """ url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit) return self._fr24.get_data(url, True)
Fetch the history of a particular aircraft by its tail number. This method can be used to get the history of a particular aircraft by its tail number. It checks the user authentication and returns the data accordingly. Args: tail_number (str): The tail number, e.g. VT-ANL page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data limit (int): Optional limit on number of records returned Returns: A list of dicts with the data; one dict for each row of data from flightradar24 Example:: from pyflightdata import FlightData f=FlightData() #optional login f.login(myemail,mypassword) f.get_history_by_flight_number('VT-ANL') f.get_history_by_flight_number('VT-ANL',page=1,limit=10)
def calculate(cls, byte_arr, crc=0): """Compute CRC for input bytes.""" for byte in byte_iter(byte_arr): # Taken verbatim from FIT SDK docs tmp = cls.CRC_TABLE[crc & 0xF] crc = (crc >> 4) & 0x0FFF crc = crc ^ tmp ^ cls.CRC_TABLE[byte & 0xF] tmp = cls.CRC_TABLE[crc & 0xF] crc = (crc >> 4) & 0x0FFF crc = crc ^ tmp ^ cls.CRC_TABLE[(byte >> 4) & 0xF] return crc
Compute CRC for input bytes.
def exists(name, attributes): ''' Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not os.path.exists(name): ret['result'] = False ret['comment'] = "File or directory doesn't exist" return ret current_attrs = __salt__['xattr.list'](name) current_ids = current_attrs.keys() for attr in attributes: attr_id, attr_val = attr.split("=") attr_hex = attr_val.startswith("0x") if attr_hex: # Remove spaces and new lines so we can match these current_attrs[attr_id] = __salt__['xattr.read'](name, attr_id, hex=True).replace(" ", "").replace("\n", "") attr_val = attr_val[2:].replace(" ", "") if attr_id not in current_attrs: value_matches = False else: value_matches = ((current_attrs[attr_id] == attr_val) or (attr_hex and current_attrs[attr_id] == attr_val)) if attr_id in current_ids and value_matches: continue else: ret['changes'][attr_id] = attr_val __salt__['xattr.write'](name, attr_id, attr_val, attr_hex) if not ret['changes']: ret['comment'] = 'All values existed correctly.' return ret
Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value.
def factory(**default_opts): """ Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies. """ def wrapper(decorator): @functools.wraps(decorator) def wrapper(fn=None, **opts): _opts = default_opts.copy() _opts.update(opts) def wrapper(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return decorator(fn, _opts, *args, **kwargs) return wrapper return wrapper if fn is None else wrapper(fn) return wrapper return wrapper
Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies.
async def get_stats(self, battletag: str, regions=(EUROPE, KOREA, AMERICAS, CHINA, JAPAN, ANY), platform=None, _session=None, handle_ratelimit=None, max_tries=None, request_timeout=None): """Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.""" if platform is None: platform = self.default_platform try: blob_dict = await self._base_request(battletag, "stats", _session, platform=platform, handle_ratelimit=handle_ratelimit, max_tries=max_tries, request_timeout=request_timeout) except ProfileNotFoundError as e: # The battletag doesn't exist blob_dict = {} existing_regions = {key: val for key, val in blob_dict.items() if ((val is not None) and (key != "_request"))} return {key: [inner_val for inner_key, inner_val in val.items() if inner_key == "stats"][0] for key, val in existing_regions.items() if key in regions}
Returns the stats for the profiles on the specified regions and platform. The format for regions without a matching user, the format is the same as get_profile. The stats are returned in a dictionary with a similar format to what https://github.com/SunDwarf/OWAPI/blob/master/api.md#get-apiv3ubattletagstats specifies.
def is_restricted(self, assets, dt): """ Returns whether or not an asset or iterable of assets is restricted on a dt. """ if isinstance(assets, Asset): return self._is_restricted_for_asset(assets, dt) is_restricted = partial(self._is_restricted_for_asset, dt=dt) return pd.Series( index=pd.Index(assets), data=vectorize(is_restricted, otypes=[bool])(assets) )
Returns whether or not an asset or iterable of assets is restricted on a dt.
def groups_moderators(self, room_id=None, group=None, **kwargs): """Lists all moderators of a group.""" if room_id: return self.__call_api_get('groups.moderators', roomId=room_id, kwargs=kwargs) elif group: return self.__call_api_get('groups.moderators', roomName=group, kwargs=kwargs) else: raise RocketMissingParamException('roomId or group required')
Lists all moderators of a group.
def export(self, node): """Export tree starting at `node`.""" attriter = self.attriter or (lambda attr_values: attr_values) return self.__export(node, self.dictcls, attriter, self.childiter)
Export tree starting at `node`.
def __get_config(self): """ Really connect """ if not self.name: room_resp = self.conn.get(BASE_URL + "/new") room_resp.raise_for_status() url = room_resp.url try: self.name = re.search(r"r/(.+?)$", url).group(1) except Exception: raise IOError("Failed to create room") params = {"room": self.name} if self.key: params["roomKey"] = self.key if self.password: params["password"] = self.password config = self.conn.make_api_call("getRoomConfig", params) if "error" in config: raise RuntimeError( f"Failed to get room config for {self.name}\n" f"{config['error'].get('message') or config['error']}" ) self.config.update(config) self.__add_prop("private") self.__add_prop("title") self.__add_prop("motd") self.__add_prop("adult") self.__add_prop("disabled", True) self.__add_prop("file_ttl", True) return (self.config.room_id, self.config.owner, config["checksum2"])
Really connect
def update_balances(self, recursive=True): """ Calculate tree balance factor """ if self.node: if recursive: if self.node.left: self.node.left.update_balances() if self.node.right: self.node.right.update_balances() self.balance = self.node.left.height - self.node.right.height else: self.balance = 0
Calculate tree balance factor
def _get_metadata_path(self, key): """ return the metadata pathname for this key """ return "{group}/meta/{key}/meta".format(group=self.group._v_pathname, key=key)
return the metadata pathname for this key
async def withdraw_bulk(self, *args, **kwargs): """ Withdraw funds requests to user wallet Accepts: - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) - address [string] withdrawal address (in hex for tokens) - amount [int] withdrawal amount multiplied by decimals_k (10**8) Returns dictionary with following fields: - success [bool] """ await self.db.withdraw_requests.insert_one({ 'coinid': kwargs.get("coinid"), 'address': kwargs.get("address"), 'amount': int(kwargs.get("amount")), 'timestamp': datetime.datetime.utcnow() }) return {'success': True}
Withdraw funds requests to user wallet Accepts: - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) - address [string] withdrawal address (in hex for tokens) - amount [int] withdrawal amount multiplied by decimals_k (10**8) Returns dictionary with following fields: - success [bool]
def update(self, enabled=values.unset, webhook_url=values.unset, webhook_method=values.unset): """ Update the ExportConfigurationInstance :param bool enabled: The enabled :param unicode webhook_url: The webhook_url :param unicode webhook_method: The webhook_method :returns: Updated ExportConfigurationInstance :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance """ data = values.of({'Enabled': enabled, 'WebhookUrl': webhook_url, 'WebhookMethod': webhook_method, }) payload = self._version.update( 'POST', self._uri, data=data, ) return ExportConfigurationInstance( self._version, payload, resource_type=self._solution['resource_type'], )
Update the ExportConfigurationInstance :param bool enabled: The enabled :param unicode webhook_url: The webhook_url :param unicode webhook_method: The webhook_method :returns: Updated ExportConfigurationInstance :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationInstance
def poll(self): """ Poll the job status. Returns the changes in this iteration.""" self.runner.module_name = 'async_status' self.runner.module_args = "jid=%s" % self.jid self.runner.pattern = "*" self.runner.background = 0 self.runner.inventory.restrict_to(self.hosts_to_poll) results = self.runner.run() self.runner.inventory.lift_restriction() hosts = [] poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}} for (host, res) in results['contacted'].iteritems(): if res.get('started',False): hosts.append(host) poll_results['polled'][host] = res else: self.results['contacted'][host] = res poll_results['contacted'][host] = res if 'failed' in res: self.runner.callbacks.on_async_failed(host, res, self.jid) else: self.runner.callbacks.on_async_ok(host, res, self.jid) for (host, res) in results['dark'].iteritems(): self.results['dark'][host] = res poll_results['dark'][host] = res self.runner.callbacks.on_async_failed(host, res, self.jid) self.hosts_to_poll = hosts if len(hosts)==0: self.completed = True return poll_results
Poll the job status. Returns the changes in this iteration.
def modify(name, **kwargs): ''' Modify an existing job in the schedule CLI Example: .. code-block:: bash salt '*' schedule.modify job1 function='test.ping' seconds=3600 ''' ret = {'comment': '', 'changes': {}, 'result': True} time_conflict = False for item in ['seconds', 'minutes', 'hours', 'days']: if item in kwargs and 'when' in kwargs: time_conflict = True if item in kwargs and 'cron' in kwargs: time_conflict = True if time_conflict: ret['result'] = False ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.' return ret if 'when' in kwargs and 'cron' in kwargs: ret['result'] = False ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.' return ret current_schedule = list_(show_all=True, return_yaml=False) if name not in current_schedule: ret['comment'] = 'Job {0} does not exist in schedule.'.format(name) ret['result'] = False return ret _current = current_schedule[name] if '_seconds' in _current: _current['seconds'] = _current['_seconds'] del _current['_seconds'] _new = build_schedule_item(name, **kwargs) if 'result' in _new and not _new['result']: return _new if _new == _current: ret['comment'] = 'Job {0} in correct state'.format(name) return ret _current_lines = ['{0}:{1}\n'.format(key, value) for (key, value) in sorted(_current.items())] _new_lines = ['{0}:{1}\n'.format(key, value) for (key, value) in sorted(_new.items())] _diff = difflib.unified_diff(_current_lines, _new_lines) ret['changes']['diff'] = ''.join(_diff) if 'test' in kwargs and kwargs['test']: ret['comment'] = 'Job: {0} would be modified in schedule.'.format(name) else: persist = True if 'persist' in kwargs: persist = kwargs['persist'] if name in list_(show_all=True, where='opts', return_yaml=False): event_data = {'name': name, 'schedule': _new, 'func': 'modify', 'persist': persist} elif name in list_(show_all=True, where='pillar', return_yaml=False): event_data = {'name': name, 'schedule': _new, 'where': 'pillar', 'func': 'modify', 'persist': False} out = __salt__['event.fire'](event_data, 'manage_schedule') if out: ret['comment'] = 'Modified job: {0} in schedule.'.format(name) else: ret['comment'] = 'Failed to modify job {0} in schedule.'.format(name) ret['result'] = False return ret
Modify an existing job in the schedule CLI Example: .. code-block:: bash salt '*' schedule.modify job1 function='test.ping' seconds=3600
def close(self): """ Close all connections and empty the pool. This method is thread safe. """ if self._closed: return try: with self.lock: if not self._closed: self._closed = True for address in list(self.connections): self.remove(address) except TypeError as e: pass
Close all connections and empty the pool. This method is thread safe.
def setup_scrollarea(self): """Setup the scrollarea that will contain the FigureThumbnails.""" self.view = QWidget() self.scene = QGridLayout(self.view) self.scene.setColumnStretch(0, 100) self.scene.setColumnStretch(2, 100) self.scrollarea = QScrollArea() self.scrollarea.setWidget(self.view) self.scrollarea.setWidgetResizable(True) self.scrollarea.setFrameStyle(0) self.scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scrollarea.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)) # Set the vertical scrollbar explicitely : # This is required to avoid a "RuntimeError: no access to protected # functions or signals for objects not created from Python" in Linux. self.scrollarea.setVerticalScrollBar(QScrollBar()) return self.scrollarea
Setup the scrollarea that will contain the FigureThumbnails.
def glob(self, pattern): """ Return a list of Path objects that match the pattern. `pattern` - a path relative to this directory, with wildcards. For example, ``Path('/users').glob('*/bin/*')`` returns a list of all the files users have in their :file:`bin` directories. .. seealso:: :func:`glob.glob` .. note:: Glob is **not** recursive, even when using ``**``. To do recursive globbing see :func:`walk`, :func:`walkdirs` or :func:`walkfiles`. """ cls = self._next_class return [cls(s) for s in glob.glob(self / pattern)]
Return a list of Path objects that match the pattern. `pattern` - a path relative to this directory, with wildcards. For example, ``Path('/users').glob('*/bin/*')`` returns a list of all the files users have in their :file:`bin` directories. .. seealso:: :func:`glob.glob` .. note:: Glob is **not** recursive, even when using ``**``. To do recursive globbing see :func:`walk`, :func:`walkdirs` or :func:`walkfiles`.
def fcoe_get_login_output_fcoe_login_list_fcoe_login_fcoe_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe_get_login = ET.Element("fcoe_get_login") config = fcoe_get_login output = ET.SubElement(fcoe_get_login, "output") fcoe_login_list = ET.SubElement(output, "fcoe-login-list") fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac") fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac') fcoe_login_fcoe_interface_name = ET.SubElement(fcoe_login_list, "fcoe-login-fcoe-interface-name") fcoe_login_fcoe_interface_name.text = kwargs.pop('fcoe_login_fcoe_interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def fetch_and_transform( transformed_filename, transformer, loader, source_filename, source_url, subdir=None): """ Fetch a remote file from `source_url`, save it locally as `source_filename` and then use the `loader` and `transformer` function arguments to turn this saved data into an in-memory object. """ transformed_path = build_path(transformed_filename, subdir) if not os.path.exists(transformed_path): source_path = fetch_file(source_url, source_filename, subdir) logger.info("Generating data file %s from %s", transformed_path, source_path) result = transformer(source_path, transformed_path) else: logger.info("Cached data file: %s", transformed_path) result = loader(transformed_path) assert os.path.exists(transformed_path) return result
Fetch a remote file from `source_url`, save it locally as `source_filename` and then use the `loader` and `transformer` function arguments to turn this saved data into an in-memory object.
def get_logger(name=None, format_string=None): """ :type name: str :param name: used for declaring log channels. :type format_string: str :param format_string: for custom formatting """ logging.captureWarnings(True) log = logging.getLogger(name) # Bind custom method to instance. # Source: https://stackoverflow.com/a/2982 log.set_debug_level = _set_debug_level.__get__(log) log.set_debug_level(0) if not format_string: format_string = '[%(module)s]\t%(levelname)s\t%(message)s' # Setting up log formats log.handlers = [] handler = logging.StreamHandler(sys.stderr) handler.setFormatter( logging.Formatter(format_string), ) log.addHandler(handler) return log
:type name: str :param name: used for declaring log channels. :type format_string: str :param format_string: for custom formatting
def utcoffset(self): """ :return: None or a datetime.timedelta() of the offset from UTC """ if self.tzinfo is None: return None return self.tzinfo.utcoffset(self.replace(year=2000))
:return: None or a datetime.timedelta() of the offset from UTC
def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False split_dn = dn.split(r'.') leftmost, remainder = split_dn[0], split_dn[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname)
Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3
def captureQuery(self, query, params=(), engine=None, **kwargs): """ Creates an event for a SQL query. >>> client.captureQuery('SELECT * FROM foo') """ return self.capture( 'raven.events.Query', query=query, params=params, engine=engine, **kwargs)
Creates an event for a SQL query. >>> client.captureQuery('SELECT * FROM foo')
def create_segments(self, segments): """Enqueue segment creates""" for segment in segments: s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE, a_const.CREATE) self.provision_queue.put(s_res)
Enqueue segment creates
def get(self, arg): """ Return instance object with given EC2 ID or nametag. """ try: reservations = self.get_all_instances(filters={'tag:Name': [arg]}) instance = reservations[0].instances[0] except IndexError: try: instance = self.get_all_instances([arg])[0].instances[0] except (_ResponseError, IndexError): # TODO: encapsulate actual exception for debugging err = "Can't find any instance with name or ID '%s'" % arg raise ValueError(err) return instance
Return instance object with given EC2 ID or nametag.
def indices_to_labels(self, indices: Sequence[int]) -> List[str]: """ Converts a sequence of indices into their corresponding labels.""" return [(self.INDEX_TO_LABEL[index]) for index in indices]
Converts a sequence of indices into their corresponding labels.
def _generate_examples(self, data_dir): """Generate corrupted Cifar10 test data. Apply corruptions to the raw images according to self.corruption_type. Args: data_dir: root directory of downloaded dataset Yields: dictionary with image file and label. """ corruption = self.builder_config.corruption severity = self.builder_config.severity images_file = os.path.join(data_dir, _CORRUPTIONS_TO_FILENAMES[corruption]) labels_file = os.path.join(data_dir, _LABELS_FILENAME) with tf.io.gfile.GFile(labels_file, mode='rb') as f: labels = np.load(f) num_images = labels.shape[0] // 5 # Labels are stacked 5 times so we can just read the first iteration labels = labels[:num_images] with tf.io.gfile.GFile(images_file, mode='rb') as f: images = np.load(f) # Slice images corresponding to correct severity level images = images[(severity - 1) * num_images:severity * num_images] for image, label in zip(images, labels): yield { 'image': image, 'label': label, }
Generate corrupted Cifar10 test data. Apply corruptions to the raw images according to self.corruption_type. Args: data_dir: root directory of downloaded dataset Yields: dictionary with image file and label.
def par_relax_AX(i): """Parallel implementation of relaxation if option ``RelaxParam`` != 1.0. """ global mp_X global mp_Xnr global mp_DX global mp_DXnr mp_Xnr[mp_grp[i]:mp_grp[i+1]] = mp_X[mp_grp[i]:mp_grp[i+1]] mp_DXnr[i] = mp_DX[i] if mp_rlx != 1.0: grpind = slice(mp_grp[i], mp_grp[i+1]) mp_X[grpind] = mp_rlx * mp_X[grpind] + (1-mp_rlx)*mp_Y1[grpind] mp_DX[i] = mp_rlx*mp_DX[i] + (1-mp_rlx)*mp_Y0[i]
Parallel implementation of relaxation if option ``RelaxParam`` != 1.0.
def _ExtractMetadataFromFileEntry(self, mediator, file_entry, data_stream): """Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream. """ # Do not extract metadata from the root file entry when it is virtual. if file_entry.IsRoot() and file_entry.type_indicator not in ( self._TYPES_WITH_ROOT_METADATA): return # We always want to extract the file entry metadata but we only want # to parse it once per file entry, so we only use it if we are # processing the default data stream of regular files. if data_stream and not data_stream.IsDefault(): return display_name = mediator.GetDisplayName() logger.debug( '[ExtractMetadataFromFileEntry] processing file entry: {0:s}'.format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_EXTRACTING if self._processing_profiler: self._processing_profiler.StartTiming('extracting') self._event_extractor.ParseFileEntryMetadata(mediator, file_entry) if self._processing_profiler: self._processing_profiler.StopTiming('extracting') self.processing_status = definitions.STATUS_INDICATOR_RUNNING
Extracts metadata from a file entry. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. file_entry (dfvfs.FileEntry): file entry to extract metadata from. data_stream (dfvfs.DataStream): data stream or None if the file entry has no data stream.
def strip_prompt(self, a_string): """ Strip 'Done' from command output """ output = super(NetscalerSSH, self).strip_prompt(a_string) lines = output.split(self.RESPONSE_RETURN) if "Done" in lines[-1]: return self.RESPONSE_RETURN.join(lines[:-1]) else: return output
Strip 'Done' from command output
def _create_significance_table(self,data): """ Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc. """ # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc.
def rlmb_tiny_stochastic(): """Tiny setting with a stochastic next-frame model.""" hparams = rlmb_ppo_tiny() hparams.epochs = 1 # Too slow with 2 for regular runs. hparams.generative_model = "next_frame_basic_stochastic" hparams.generative_model_params = "next_frame_basic_stochastic" return hparams
Tiny setting with a stochastic next-frame model.
def cli(**args): """ Shakedown is a DC/OS test-harness wrapper for the pytest tool. """ import shakedown # Read configuration options from ~/.shakedown (if exists) args = read_config(args) # Set configuration defaults args = set_config_defaults(args) if args['quiet']: shakedown.cli.quiet = True if not args['dcos_url']: try: args['dcos_url'] = dcos_url() except: click.secho('error: cluster URL not set, use --dcos-url or see --help for more information.', fg='red', bold=True) sys.exit(1) if not args['dcos_url']: click.secho('error: --dcos-url is a required option; see --help for more information.', fg='red', bold=True) sys.exit(1) if args['ssh_key_file']: shakedown.cli.ssh_key_file = args['ssh_key_file'] if args['ssh_user']: shakedown.cli.ssh_user = args['ssh_user'] if not args['no_banner']: echo(banner(), n=False) echo('Running pre-flight checks...', d='step-maj') # required modules and their 'version' method imported = {} requirements = { 'pytest': '__version__', 'dcos': 'version' } for req in requirements: ver = requirements[req] echo("Checking for {} library...".format(req), d='step-min', n=False) try: imported[req] = importlib.import_module(req, package=None) except ImportError: click.secho("error: {p} is not installed; run 'pip install {p}'.".format(p=req), fg='red', bold=True) sys.exit(1) echo(getattr(imported[req], requirements[req])) if shakedown.attach_cluster(args['dcos_url']): echo('Checking DC/OS cluster version...', d='step-min', n=False) echo(shakedown.dcos_version()) else: with imported['dcos'].cluster.setup_directory() as temp_path: imported['dcos'].cluster.set_attached(temp_path) imported['dcos'].config.set_val('core.dcos_url', args['dcos_url']) if args['ssl_no_verify']: imported['dcos'].config.set_val('core.ssl_verify', 'False') try: imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False) except: echo('Authenticating with DC/OS cluster...', d='step-min') authenticated = False token = imported['dcos'].config.get_config_val("core.dcos_acs_token") if token is not None: echo('trying existing ACS token...', d='step-min', n=False) try: shakedown.dcos_leader() authenticated = True echo(fchr('PP'), d='pass') except imported['dcos'].errors.DCOSException: echo(fchr('FF'), d='fail') if not authenticated and args['oauth_token']: try: echo('trying OAuth token...', d='item-maj', n=False) token = shakedown.authenticate_oauth(args['oauth_token']) with stdchannel_redirected(sys.stderr, os.devnull): imported['dcos'].config.set_val('core.dcos_acs_token', token) authenticated = True echo(fchr('PP'), d='pass') except: echo(fchr('FF'), d='fail') if not authenticated and args['username'] and args['password']: try: echo('trying username and password...', d='item-maj', n=False) token = shakedown.authenticate(args['username'], args['password']) with stdchannel_redirected(sys.stderr, os.devnull): imported['dcos'].config.set_val('core.dcos_acs_token', token) authenticated = True echo(fchr('PP'), d='pass') except: echo(fchr('FF'), d='fail') if authenticated: imported['dcos'].cluster.setup_cluster_config(args['dcos_url'], temp_path, False) echo('Checking DC/OS cluster version...', d='step-min', n=False) echo(shakedown.dcos_version()) else: click.secho("error: no authentication credentials or token found.", fg='red', bold=True) sys.exit(1) class shakedown: """ This encapsulates a PyTest wrapper plugin """ state = {} stdout = [] tests = { 'file': {}, 'test': {} } report_stats = { 'passed':[], 'skipped':[], 'failed':[], 'total_passed':0, 'total_skipped':0, 'total_failed':0, } def output(title, state, text, status=True): """ Capture and display stdout/stderr output :param title: the title of the output box (eg. test name) :type title: str :param state: state of the result (pass, fail) :type state: str :param text: the stdout/stderr output :type text: str :param status: whether to output a status marker :type status: bool """ if state == 'fail': schr = fchr('FF') elif state == 'pass': schr = fchr('PP') elif state == 'skip': schr = fchr('SK') else: schr = '' if status: if not args['stdout_inline']: if state == 'fail': echo(schr, d='fail') elif state == 'pass': echo(schr, d='pass') else: if not text: if state == 'fail': echo(schr, d='fail') elif state == 'pass': if '::' in title: echo(title.split('::')[-1], d='item-min', n=False) echo(schr, d='pass') if text and args['stdout'] in [state, 'all']: o = decorate(schr + ': ', 'quote-head-' + state) o += click.style(decorate(title, style=state), bold=True) + "\n" o += decorate(str(text).strip(), style='quote-' + state) if args['stdout_inline']: echo(o) else: shakedown.stdout.append(o) def pytest_collectreport(self, report): """ Collect and validate individual test files """ if not 'collect' in shakedown.state: shakedown.state['collect'] = 1 echo('Collecting and validating test files...', d='step-min') if report.nodeid: echo(report.nodeid, d='item-maj', n=False) state = None if report.failed: state = 'fail' if report.passed: state = 'pass' if report.skipped: state = 'skip' if state: if report.longrepr: shakedown.output(report.nodeid, state, report.longrepr) else: shakedown.output(report.nodeid, state, None) def pytest_sessionstart(self): """ Tests have been collected, begin running them... """ echo('Initiating testing phase...', d='step-maj') def pytest_report_teststatus(self, report): """ Print report results to the console as they are run """ try: report_file, report_test = report.nodeid.split('::', 1) except ValueError: return if not 'test' in shakedown.state: shakedown.state['test'] = 1 echo('Running individual tests...', d='step-min') if not report_file in shakedown.tests['file']: shakedown.tests['file'][report_file] = 1 echo(report_file, d='item-maj') if not report.nodeid in shakedown.tests['test']: shakedown.tests['test'][report.nodeid] = {} if args['stdout_inline']: echo('') echo(report_test + ':', d='item-min') else: echo(report_test, d='item-min', n=False) if report.failed: shakedown.tests['test'][report.nodeid]['fail'] = True if report.when == 'teardown' and not 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, 'pass', None) # Suppress excess terminal output return report.outcome, None, None def pytest_runtest_logreport(self, report): """ Log the [stdout, stderr] results of tests if desired """ state = None for secname, content in report.sections: if report.failed: state = 'fail' if report.passed: state = 'pass' if report.skipped: state = 'skip' if state and secname != 'Captured stdout call': module = report.nodeid.split('::', 1)[0] cap_type = secname.split(' ')[-1] if not 'setup' in shakedown.tests['test'][report.nodeid]: shakedown.tests['test'][report.nodeid]['setup'] = True shakedown.output(module + ' ' + cap_type, state, content, False) elif cap_type == 'teardown': shakedown.output(module + ' ' + cap_type, state, content, False) elif state and report.when == 'call': if 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, state, content, False) else: shakedown.tests['test'][report.nodeid]['tested'] = True shakedown.output(report.nodeid, state, content) # Capture execution crashes if hasattr(report.longrepr, 'reprcrash'): longreport = report.longrepr if 'tested' in shakedown.tests['test'][report.nodeid]: shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash), False) else: shakedown.tests['test'][report.nodeid]['tested'] = True shakedown.output(report.nodeid, 'fail', 'error: ' + str(longreport.reprcrash)) def pytest_sessionfinish(self, session, exitstatus): """ Testing phase is complete; print extra reports (stdout/stderr, JSON) as requested """ echo('Test phase completed.', d='step-maj') if ('stdout' in args and args['stdout']) and shakedown.stdout: for output in shakedown.stdout: echo(output) opts = ['-q', '--tb=no', "--timeout={}".format(args['timeout'])] if args['fail'] == 'fast': opts.append('-x') if args['pytest_option']: for opt in args['pytest_option']: opts.append(opt) if args['stdout_inline']: opts.append('-s') if args['tests']: tests_to_run = [] for test in args['tests']: tests_to_run.extend(test.split()) for test in tests_to_run: opts.append(test) exitstatus = imported['pytest'].main(opts, plugins=[shakedown()]) sys.exit(exitstatus)
Shakedown is a DC/OS test-harness wrapper for the pytest tool.
def _parse_network_settings(opts, current): ''' Filters given options and outputs valid settings for the global network settings file. ''' # Normalize keys opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts)) current = dict((k.lower(), v) for (k, v) in six.iteritems(current)) # Check for supported parameters retain_settings = opts.get('retain_settings', False) result = current if retain_settings else {} # Default quote type is an empty string, which will not quote values quote_type = '' valid = _CONFIG_TRUE + _CONFIG_FALSE if 'enabled' not in opts: try: opts['networking'] = current['networking'] # If networking option is quoted, use its quote type quote_type = salt.utils.stringutils.is_quoted(opts['networking']) _log_default_network('networking', current['networking']) except ValueError: _raise_error_network('networking', valid) else: opts['networking'] = opts['enabled'] true_val = '{0}yes{0}'.format(quote_type) false_val = '{0}no{0}'.format(quote_type) networking = salt.utils.stringutils.dequote(opts['networking']) if networking in valid: if networking in _CONFIG_TRUE: result['networking'] = true_val elif networking in _CONFIG_FALSE: result['networking'] = false_val else: _raise_error_network('networking', valid) if 'hostname' not in opts: try: opts['hostname'] = current['hostname'] _log_default_network('hostname', current['hostname']) except Exception: _raise_error_network('hostname', ['server1.example.com']) if opts['hostname']: result['hostname'] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts['hostname']), quote_type) else: _raise_error_network('hostname', ['server1.example.com']) if 'nozeroconf' in opts: nozeroconf = salt.utils.stringutils.dequote(opts['nozeroconf']) if nozeroconf in valid: if nozeroconf in _CONFIG_TRUE: result['nozeroconf'] = true_val elif nozeroconf in _CONFIG_FALSE: result['nozeroconf'] = false_val else: _raise_error_network('nozeroconf', valid) for opt in opts: if opt not in ['networking', 'hostname', 'nozeroconf']: result[opt] = '{1}{0}{1}'.format( salt.utils.stringutils.dequote(opts[opt]), quote_type) return result
Filters given options and outputs valid settings for the global network settings file.
def delete_connector_c_pool(name, target='server', cascade=True, server=None): ''' Delete a connection pool ''' data = {'target': target, 'cascade': cascade} return _delete_element(name, 'resources/connector-connection-pool', data, server)
Delete a connection pool
def _check_lookup_prop(self, result_data): """Checks that selected lookup property can be used for this testcase.""" if not self._lookup_prop: return False if not result_data.get("id") and self._lookup_prop != "name": return False if not result_data.get("title") and self._lookup_prop == "name": return False return True
Checks that selected lookup property can be used for this testcase.
def list_path_traversal(path): ''' Returns a full list of directories leading up to, and including, a path. So list_path_traversal('/path/to/salt') would return: ['/', '/path', '/path/to', '/path/to/salt'] in that order. This routine has been tested on Windows systems as well. list_path_traversal('c:\\path\\to\\salt') on Windows would return: ['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt'] ''' out = [path] (head, tail) = os.path.split(path) if tail == '': # paths with trailing separators will return an empty string out = [head] (head, tail) = os.path.split(head) while head != out[0]: # loop until head is the same two consecutive times out.insert(0, head) (head, tail) = os.path.split(head) return out
Returns a full list of directories leading up to, and including, a path. So list_path_traversal('/path/to/salt') would return: ['/', '/path', '/path/to', '/path/to/salt'] in that order. This routine has been tested on Windows systems as well. list_path_traversal('c:\\path\\to\\salt') on Windows would return: ['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt']
def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] if retcode: raise CalledProcessError(retcode, cmd) return retcode
Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: check_call(["ls", "-l"])
def path(self): """Timestamp for placing into filepaths.""" out = self.datetime.strftime("%Y-%m-%d") out += " " ssm = ( self.datetime - self.datetime.replace(hour=0, minute=0, second=0, microsecond=0) ).total_seconds() out += str(int(ssm)).zfill(5) return out
Timestamp for placing into filepaths.
def register_event(self, direction, verb, child_fn, priority=10): """Register an event with all servers. Args: direction (str): `in`, `out`, `both`, or `girc`. verb (str): Event name, `all`, or `raw`. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first). Note: `all` will not match `raw` events. If you wish to receive both `raw` and all other events, you need to register these separately. """ event_managers = [] if direction in ('in', 'both'): event_managers.append(self._events_in) if direction in ('out', 'both'): event_managers.append(self._events_out) if direction == 'girc': event_managers.append(self._girc_events) for event_manager in event_managers: event_manager.register(verb, child_fn, priority=priority)
Register an event with all servers. Args: direction (str): `in`, `out`, `both`, or `girc`. verb (str): Event name, `all`, or `raw`. child_fn (function): Handler function. priority (int): Handler priority (lower priority executes first). Note: `all` will not match `raw` events. If you wish to receive both `raw` and all other events, you need to register these separately.
def _refine_v(seq, species): ''' Completes the 5' end of a a truncated sequence with germline nucleotides. Input is a MongoDB dict (seq) and the species. ''' vgerm = germlines.get_germline(seq['v_gene']['full'], species) aln = global_alignment(seq['vdj_nt'], vgerm) prepend = '' for s, g in zip(aln.aligned_query, aln.aligned_target): if s != '-': break else: prepend += g seq['vdj_nt'] = prepend + seq['vdj_nt']
Completes the 5' end of a a truncated sequence with germline nucleotides. Input is a MongoDB dict (seq) and the species.
def format_datetime(d: PotentialDatetimeType, fmt: str, default: str = None) -> Optional[str]: """ Format a datetime with a ``strftime`` format specification string, or return ``default`` if the input is ``None``. """ d = coerce_to_pendulum(d) if d is None: return default return d.strftime(fmt)
Format a datetime with a ``strftime`` format specification string, or return ``default`` if the input is ``None``.
def index(): """ This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with file.show_file directly and are instead dynamically generated through javascript for performance purposes. """ if current_app.config['ARA_PLAYBOOK_OVERRIDE'] is not None: override = current_app.config['ARA_PLAYBOOK_OVERRIDE'] files = (models.File.query .filter(models.File.playbook_id.in_(override))) else: files = models.File.query.all() return render_template('file_index.html', files=files)
This is not served anywhere in the web application. It is used explicitly in the context of generating static files since flask-frozen requires url_for's to crawl content. url_for's are not used with file.show_file directly and are instead dynamically generated through javascript for performance purposes.
def load(parser, serializer): """Returns a dictionary of builtin functions for Fortran. Checks the cache first to see if we have a serialized version. If we don't, it loads it from the XML file. :arg parser: the DocParser instance for parsing the XML tags. :arg serializer: a Serializer instance from the CodeParser to cache the loaded XML file. """ fortdir = os.path.dirname(fortpy.__file__) xmlpath = os.path.join(fortdir, "isense", "builtin.xml") if not os.path.isfile(xmlpath): return {} changed_time = os.path.getmtime(xmlpath) cached = serializer.load_module("builtin.xml", changed_time) if cached is None: result = _load_builtin_xml(xmlpath, parser) serializer.save_module("builtin.xml", result, changed_time) else: result = cached return result
Returns a dictionary of builtin functions for Fortran. Checks the cache first to see if we have a serialized version. If we don't, it loads it from the XML file. :arg parser: the DocParser instance for parsing the XML tags. :arg serializer: a Serializer instance from the CodeParser to cache the loaded XML file.
def get_instance(self, payload): """ Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.session.message.MessageInstance :rtype: twilio.rest.messaging.v1.session.message.MessageInstance """ return MessageInstance(self._version, payload, session_sid=self._solution['session_sid'], )
Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.messaging.v1.session.message.MessageInstance :rtype: twilio.rest.messaging.v1.session.message.MessageInstance
def _is_version_duplicate(self): """ Define should new version be created for object or no. Reasons to provide custom check instead of default `ignore_revision_duplicates`: - no need to compare all revisions - it is OK if right object version exists in any revision; - need to compare object attributes (not serialized data) to avoid version creation on wrong <float> vs <int> comparison; """ if self.id is None: return False try: latest_version = Version.objects.get_for_object(self).latest('revision__date_created') except Version.DoesNotExist: return False latest_version_object = latest_version._object_version.object fields = self.get_version_fields() return all([getattr(self, f) == getattr(latest_version_object, f) for f in fields])
Define should new version be created for object or no. Reasons to provide custom check instead of default `ignore_revision_duplicates`: - no need to compare all revisions - it is OK if right object version exists in any revision; - need to compare object attributes (not serialized data) to avoid version creation on wrong <float> vs <int> comparison;
def getModulePath(project_path,module_name,verbose): '''Searches for module_name in searchpath and returns the filepath. If no filepath was found, returns None.''' if not module_name: return None sys.path.append(project_path) try: package = pkgutil.get_loader(module_name) except ImportError: if verbose: print("Parent module for "+module_name+" not found.") return None except: if verbose: print(module_name+" not loaded for bizarre reasons") try: if package: if package.get_code(module_name): filename = package.get_code(module_name).co_filename return filename elif package.find_spec(module_name).has_location==False: return None #built-in module such as itertools else: pass #perhaps filename is in package.find_spec(module_name).origin? pass #a good reference is https://www.python.org/dev/peps/pep-0302/ except ImportError: if verbose: print("Code object unavailable for "+module_name) return None except AttributeError: if verbose: print(module_name+" is an ExtensionFileLoader object") return None except: if verbose: print(module_name+" not loaded for bizarre reasons") return None else: if verbose: print ("Module "+module_name+" not found.") return None
Searches for module_name in searchpath and returns the filepath. If no filepath was found, returns None.
def _read_protocol_line(self): """Reads the next line of instrumentation output relevant to snippets. This method will skip over lines that don't start with 'SNIPPET' or 'INSTRUMENTATION_RESULT'. Returns: (str) Next line of snippet-related instrumentation output, stripped. Raises: jsonrpc_client_base.AppStartError: If EOF is reached without any protocol lines being read. """ while True: line = self._proc.stdout.readline().decode('utf-8') if not line: raise jsonrpc_client_base.AppStartError( self._ad, 'Unexpected EOF waiting for app to start') # readline() uses an empty string to mark EOF, and a single newline # to mark regular empty lines in the output. Don't move the strip() # call above the truthiness check, or this method will start # considering any blank output line to be EOF. line = line.strip() if (line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET ')): self.log.debug( 'Accepted line from instrumentation output: "%s"', line) return line self.log.debug('Discarded line from instrumentation output: "%s"', line)
Reads the next line of instrumentation output relevant to snippets. This method will skip over lines that don't start with 'SNIPPET' or 'INSTRUMENTATION_RESULT'. Returns: (str) Next line of snippet-related instrumentation output, stripped. Raises: jsonrpc_client_base.AppStartError: If EOF is reached without any protocol lines being read.
def generate_signature(method, version, endpoint, date, rel_url, content_type, content, access_key, secret_key, hash_type): ''' Generates the API request signature from the given parameters. ''' hash_type = hash_type hostname = endpoint._val.netloc # FIXME: migrate to public API if version >= 'v4.20181215': content = b'' else: if content_type.startswith('multipart/'): content = b'' body_hash = hashlib.new(hash_type, content).hexdigest() sign_str = '{}\n{}\n{}\nhost:{}\ncontent-type:{}\nx-backendai-version:{}\n{}'.format( # noqa method.upper(), rel_url, date.isoformat(), hostname, content_type.lower(), version, body_hash ) sign_bytes = sign_str.encode() sign_key = hmac.new(secret_key.encode(), date.strftime('%Y%m%d').encode(), hash_type).digest() sign_key = hmac.new(sign_key, hostname.encode(), hash_type).digest() signature = hmac.new(sign_key, sign_bytes, hash_type).hexdigest() headers = { 'Authorization': 'BackendAI signMethod=HMAC-{}, credential={}:{}'.format( hash_type.upper(), access_key, signature ), } return headers, signature
Generates the API request signature from the given parameters.
def list_to_serialized(ref, the_list): """Serialize the list of elements Used for the retention store :param ref: Not used :type ref: :param the_list: dictionary to convert :type the_list: dict :return: dict of serialized :rtype: dict """ result = [] for elt in the_list: if not getattr(elt, 'serialize', None): continue result.append(elt.serialize()) return result
Serialize the list of elements Used for the retention store :param ref: Not used :type ref: :param the_list: dictionary to convert :type the_list: dict :return: dict of serialized :rtype: dict
def _compute_distance(self, dists, C): """ Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.) """ return C["c4"] * np.log(np.sqrt(dists.rrup ** 2. + C["h"] ** 2.))
Compute the second term of the equation described on p. 1144: `` c4 * np.log(sqrt(R ** 2. + h ** 2.)
def proj_l2(v, gamma, axis=None): r"""Compute the projection operator of the :math:`\ell_2` norm. The projection operator of the uncentered :math:`\ell_2` norm, .. math:: \mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \; \text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma can be computed as :math:`\mathbf{s} + \mathrm{proj}_{f,\gamma} (\mathbf{v} - \mathbf{s})` where :math:`f(\mathbf{x}) = \| \mathbf{x} \|_2`. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` axis : None or int or tuple of ints, optional (default None) Axes of `v` over which to compute the :math:`\ell_2` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct norm values are computed over the indices of the remaining axes of input array `v`. Returns ------- x : ndarray Output array """ d = np.sqrt(np.sum(v**2, axis=axis, keepdims=True)) return np.asarray((d <= gamma) * v + (d > gamma) * (gamma * sl.zdivide(v, d)), dtype=v.dtype)
r"""Compute the projection operator of the :math:`\ell_2` norm. The projection operator of the uncentered :math:`\ell_2` norm, .. math:: \mathrm{argmin}_{\mathbf{x}} (1/2) \| \mathbf{x} - \mathbf{v} \|_2^2 \; \text{ s.t. } \; \| \mathbf{x} - \mathbf{s} \|_2 \leq \gamma can be computed as :math:`\mathbf{s} + \mathrm{proj}_{f,\gamma} (\mathbf{v} - \mathbf{s})` where :math:`f(\mathbf{x}) = \| \mathbf{x} \|_2`. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` axis : None or int or tuple of ints, optional (default None) Axes of `v` over which to compute the :math:`\ell_2` norm. If `None`, an entire multi-dimensional array is treated as a vector. If axes are specified, then distinct norm values are computed over the indices of the remaining axes of input array `v`. Returns ------- x : ndarray Output array
def find_max_and_min_frequencies(name, mass_range_params, freqs): """ ADD DOCS """ cutoff_fns = pnutils.named_frequency_cutoffs if name not in cutoff_fns.keys(): err_msg = "%s not recognized as a valid cutoff frequency choice." %name err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys()) raise ValueError(err_msg) # Can I do this quickly? total_mass_approxs = { "SchwarzISCO": pnutils.f_SchwarzISCO, "LightRing" : pnutils.f_LightRing, "ERD" : pnutils.f_ERD } if name in total_mass_approxs.keys(): # This can be done quickly if the cutoff only depends on total mass # Assumes that lower total mass = higher cutoff frequency upper_f_cutoff = total_mass_approxs[name](mass_range_params.minTotMass) lower_f_cutoff = total_mass_approxs[name](mass_range_params.maxTotMass) else: # Do this numerically # FIXME: Is 1000000 the right choice? I think so, but just highlighting mass1, mass2, spin1z, spin2z = \ get_random_mass(1000000, mass_range_params) mass_dict = {} mass_dict['mass1'] = mass1 mass_dict['mass2'] = mass2 mass_dict['spin1z'] = spin1z mass_dict['spin2z'] = spin2z tmp_freqs = cutoff_fns[name](mass_dict) upper_f_cutoff = tmp_freqs.max() lower_f_cutoff = tmp_freqs.min() cutoffs = numpy.array([lower_f_cutoff,upper_f_cutoff]) if lower_f_cutoff < freqs.min(): warn_msg = "WARNING: " warn_msg += "Lowest frequency cutoff is %s Hz " %(lower_f_cutoff,) warn_msg += "which is lower than the lowest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.min()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the lowest available metric frequency." logging.warn(warn_msg) if upper_f_cutoff > freqs.max(): warn_msg = "WARNING: " warn_msg += "Highest frequency cutoff is %s Hz " %(upper_f_cutoff,) warn_msg += "which is larger than the highest frequency calculated " warn_msg += "for the metric: %s Hz. " %(freqs.max()) warn_msg += "Distances for these waveforms will be calculated at " warn_msg += "the largest available metric frequency." logging.warn(warn_msg) return find_closest_calculated_frequencies(cutoffs, freqs)
ADD DOCS
def select_eps(xmrs, nodeid=None, iv=None, label=None, pred=None): """ Return the list of matching elementary predications in *xmrs*. :class:`~delphin.mrs.components.ElementaryPredication` objects for *xmrs* match if their `nodeid` matches *nodeid*, `intrinsic_variable` matches *iv*, `label` matches *label*, and `pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query nodeid (optional): nodeid to match iv (str, optional): intrinsic variable to match label (str, optional): label to match pred (str, :class:`~delphin.mrs.components.Pred`, optional): predicate to match Returns: list: matching elementary predications """ epmatch = lambda n: ((nodeid is None or n.nodeid == nodeid) and (iv is None or n.iv == iv) and (label is None or n.label == label) and (pred is None or n.pred == pred)) return list(filter(epmatch, xmrs.eps()))
Return the list of matching elementary predications in *xmrs*. :class:`~delphin.mrs.components.ElementaryPredication` objects for *xmrs* match if their `nodeid` matches *nodeid*, `intrinsic_variable` matches *iv*, `label` matches *label*, and `pred` to *pred*. The *nodeid*, *iv*, *label*, and *pred* filters are ignored if they are `None`. Args: xmrs (:class:`~delphin.mrs.xmrs.Xmrs`): semantic structure to query nodeid (optional): nodeid to match iv (str, optional): intrinsic variable to match label (str, optional): label to match pred (str, :class:`~delphin.mrs.components.Pred`, optional): predicate to match Returns: list: matching elementary predications
def file_list(*packages, **kwargs): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list ''' cmd = ['rpm'] if kwargs.get('root'): cmd.extend(['--root', kwargs['root']]) cmd.append('-ql' if packages else '-qla') if packages: # Can't concatenate a tuple, must do a list.extend() cmd.extend(packages) ret = __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False).splitlines() return {'errors': [], 'files': ret}
List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's rpm database (not generally recommended). root use root as top level directory (default: "/") CLI Examples: .. code-block:: bash salt '*' lowpkg.file_list httpd salt '*' lowpkg.file_list httpd postfix salt '*' lowpkg.file_list
def do_stop_role(self, role): """ Stop a role Usage: > stop_role <role> Stops this role """ if not role: return None if not self.has_cluster(): return None if '-' not in role: print("Please enter a valid role name") return None try: service = api.get_cluster(self.cluster).get_service(role.split('-')[0]) service.stop_roles(role) print("Stopping Role") except ApiException: print("Error: Role or Service Not Found")
Stop a role Usage: > stop_role <role> Stops this role
def tx2genefile(gtf, out_file=None, data=None, tsv=True, keep_version=False): """ write out a file of transcript->gene mappings. """ if tsv: extension = ".tsv" sep = "\t" else: extension = ".csv" sep = "," if file_exists(out_file): return out_file with file_transaction(data, out_file) as tx_out_file: with open(tx_out_file, "w") as out_handle: for k, v in tx2genedict(gtf, keep_version).items(): out_handle.write(sep.join([k, v]) + "\n") logger.info("tx2gene file %s created from %s." % (out_file, gtf)) return out_file
write out a file of transcript->gene mappings.
def close(self, filehandle): """Close openend file if no longer used.""" with self.lock: if filehandle in self.files: self.files[filehandle] -= 1 # trim the file cache index = 0 size = len(self.past) while size > self.size and index < size: filehandle = self.past[index] if self.files[filehandle] == 0: filehandle.close() del self.files[filehandle] del self.past[index] size -= 1 else: index += 1
Close openend file if no longer used.
def zone_data(self): """Get zone data""" if self._zone_data is None: self._zone_data = self._get('/zones/' + self.domain).json() return self._zone_data
Get zone data
def load_module(self, name): """Load and return a module""" if name in sys.modules: return sys.modules[name] # load the actual import hook module module_name = self.mount2name(name) __import__(module_name) # alias the import hook module to the mount, so both can be used interchangeably module = sys.modules[name] = sys.modules[module_name] module.install() return module
Load and return a module
def _get_approved_attributes(self, idp, idp_policy, sp_entity_id, state): """ Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes """ name_format = idp_policy.get_name_form(sp_entity_id) attrconvs = idp.config.attribute_converters idp_policy.acs = attrconvs attribute_filter = [] for aconv in attrconvs: if aconv.name_format == name_format: all_attributes = {v: None for v in aconv._fro.values()} attribute_filter = list(idp_policy.restrict(all_attributes, sp_entity_id, idp.metadata).keys()) break attribute_filter = self.converter.to_internal_filter(self.attribute_profile, attribute_filter) satosa_logging(logger, logging.DEBUG, "Filter: %s" % attribute_filter, state) return attribute_filter
Returns a list of approved attributes :type idp: saml.server.Server :type idp_policy: saml2.assertion.Policy :type sp_entity_id: str :type state: satosa.state.State :rtype: list[str] :param idp: The saml frontend idp server :param idp_policy: The idp policy :param sp_entity_id: The requesting sp entity id :param state: The current state :return: A list containing approved attributes
def from_path(kls, vertices): """ Given an Nx3 array of vertices that constitute a single path, generate a skeleton with appropriate edges. """ if vertices.shape[0] == 0: return PrecomputedSkeleton() skel = PrecomputedSkeleton(vertices) edges = np.zeros(shape=(skel.vertices.shape[0] - 1, 2), dtype=np.uint32) edges[:,0] = np.arange(skel.vertices.shape[0] - 1) edges[:,1] = np.arange(1, skel.vertices.shape[0]) skel.edges = edges return skel
Given an Nx3 array of vertices that constitute a single path, generate a skeleton with appropriate edges.
def replicate(source, model, cache=None): '''Replicates the `source` object to `model` class and returns its reflection.''' target = replicate_no_merge(source, model, cache=cache) if target is not None: db = object_session(source) target = db.merge(target) return target
Replicates the `source` object to `model` class and returns its reflection.
def _get_sorted_section(self, nts_section): """Sort GO IDs in each section, if requested by user.""" #pylint: disable=unnecessary-lambda if self.section_sortby is True: return sorted(nts_section, key=lambda nt: self.sortgos.usrgo_sortby(nt)) if self.section_sortby is False or self.section_sortby is None: return nts_section # print('SORT GO IDS IN A SECTION') return sorted(nts_section, key=lambda nt: self.section_sortby(nt))
Sort GO IDs in each section, if requested by user.
def add_data(self, metric, value, ts=None): """ Add data to queue :param metric: the metric name :type metric: str :param value: the value of data :type value: int :param ts: the timestamp :type ts: int | None :return: True if added successfully, otherwise False :rtype: bool """ if not ts: ts = time.time() if self.__data_lock.acquire(): self.__data.append((metric, (ts, value))) self.__data_lock.release() return True return False
Add data to queue :param metric: the metric name :type metric: str :param value: the value of data :type value: int :param ts: the timestamp :type ts: int | None :return: True if added successfully, otherwise False :rtype: bool
def load_zip_data(zipname, f_sino_real, f_sino_imag, f_angles=None, f_phantom=None, f_info=None): """Load example sinogram data from a .zip file""" ret = [] with zipfile.ZipFile(str(zipname)) as arc: sino_real = np.loadtxt(arc.open(f_sino_real)) sino_imag = np.loadtxt(arc.open(f_sino_imag)) sino = sino_real + 1j * sino_imag ret.append(sino) if f_angles: angles = np.loadtxt(arc.open(f_angles)) ret.append(angles) if f_phantom: phantom = np.loadtxt(arc.open(f_phantom)) ret.append(phantom) if f_info: with arc.open(f_info) as info: cfg = {} for li in info.readlines(): li = li.decode() if li.count("=") == 1: key, val = li.split("=") cfg[key.strip()] = float(val.strip()) ret.append(cfg) return ret
Load example sinogram data from a .zip file
def value_counts(expr, sort=True, ascending=False, dropna=False): """ Return object containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occuring element. Exclude NA values by default :param expr: sequence :param sort: if sort :type sort: bool :param dropna: Don’t include counts of None, default False :return: collection with two columns :rtype: :class:`odps.df.expr.expressions.CollectionExpr` """ names = [expr.name, 'count'] typos = [expr.dtype, types.int64] return ValueCounts(_input=expr, _schema=Schema.from_lists(names, typos), _sort=sort, _ascending=ascending, _dropna=dropna)
Return object containing counts of unique values. The resulting object will be in descending order so that the first element is the most frequently-occuring element. Exclude NA values by default :param expr: sequence :param sort: if sort :type sort: bool :param dropna: Don’t include counts of None, default False :return: collection with two columns :rtype: :class:`odps.df.expr.expressions.CollectionExpr`
def trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step): ''' This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file ''' [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step) [fs, x] = audioBasicIO.readAudioFile(wav_file) [F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * 0.050), round(fs * 0.050)) start_prob, transmat, means, cov = trainHMM_computeStatistics(F, flags) hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means hmm.covars_ = cov fo = open(hmm_model_name, "wb") cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(class_names, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() return hmm, class_names
This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file
def reset_small(self, eq): """Reset numbers smaller than 1e-12 in f and g equations""" assert eq in ('f', 'g') for idx, var in enumerate(self.__dict__[eq]): if abs(var) <= 1e-12: self.__dict__[eq][idx] = 0
Reset numbers smaller than 1e-12 in f and g equations
def _get_tag_match(self, ele, tree): """ Match tag :param ele: :type ele: :param tree: :type tree: None, list :return: :rtype: None | list """ if tree in [None, []]: return [ele] res = [] t = tree[0] branch = tree[1:] attributes = {} for attr in t: if isinstance(t[attr], dict): if t[attr].get("type", None) == "reg": t[attr] = re.compile(t[attr]['reg']) attributes.update(t) if "name" in attributes: del attributes['name'] if "text" in attributes: del attributes['text'] if "recursive" in attributes: del attributes['recursive'] if "[]" in attributes: del attributes['[]'] possibles = ele.find_all( t.get('name', None), text=t.get('text', None), attrs=attributes, recursive=t.get('recursive', True) ) if not possibles: return None else: pass if "[]" in t: try: possibles = eval("possibles[{}]".format(t["[]"])) except: # no possibles return None if not isinstance(possibles, list): possibles = [possibles] for a in possibles: match = self._get_tag_match(a, branch) if match: res.extend(match) if not res: return None else: return res
Match tag :param ele: :type ele: :param tree: :type tree: None, list :return: :rtype: None | list
def export(self, nidm_version, export_dir): """ Create prov entities and activities. """ if nidm_version['major'] < 1 or \ (nidm_version['major'] == 1 and nidm_version['minor'] < 3): self.type = NLX_OLD_FSL atts = ( (PROV['type'], self.type), (PROV['type'], PROV['SoftwareAgent']), (PROV['label'], Literal(self.label, datatype=XSD_STRING)), (NIDM_SOFTWARE_VERSION, self.version) ) if self.feat_version: atts = atts + ((FSL_FEAT_VERSION, self.feat_version),) self.add_attributes(atts)
Create prov entities and activities.
def _multitaper_cross_spectrum(self, clm, slm, k, convention='power', unit='per_l', clat=None, clon=None, coord_degrees=True, lmax=None, taper_wt=None): """ Return the multitaper cross-spectrum estimate and standard error for two input SHCoeffs class instances. """ if lmax is None: lmax = min(clm.lmax, slm.lmax) if (clat is not None and clon is not None and clat == self.clat and clon == self.clon and coord_degrees is self.coord_degrees and k <= self.nwinrot): # use the already stored coeffs pass elif (clat is None and clon is None) and \ (self.clat is not None and self.clon is not None and k <= self.nwinrot): # use the already stored coeffs pass else: if clat is None: clat = self.clat if clon is None: clon = self.clon if (clat is None and clon is not None) or \ (clat is not None and clon is None): raise ValueError('clat and clon must both be input. ' + 'clat = {:s}, clon = {:s}' .format(repr(clat), repr(clon))) if clat is None and clon is None: self.rotate(clat=90., clon=0., coord_degrees=True, nwinrot=k) else: self.rotate(clat=clat, clon=clon, coord_degrees=coord_degrees, nwinrot=k) sh1 = clm.to_array(normalization='4pi', csphase=1, lmax=lmax) sh2 = slm.to_array(normalization='4pi', csphase=1, lmax=lmax) if taper_wt is None: mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs, lmax1=lmax, lmax2=lmax, k=k) else: mtse, sd = _shtools.SHMultiTaperMaskCSE(sh1, sh2, self.coeffs, lmax1=lmax, lmax2=lmax, k=k, taper_wt=taper_wt) if (unit == 'per_l'): pass elif (unit == 'per_lm'): degree_l = _np.arange(len(mtse)) mtse /= (2.0 * degree_l + 1.0) sd /= (2.0 * degree_l + 1.0) else: raise ValueError( "unit must be 'per_l' or 'per_lm'." + "Input value was {:s}".format(repr(unit))) if (convention == 'power'): return mtse, sd elif (convention == 'energy'): return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi else: raise ValueError( "convention must be 'power' or 'energy'." + "Input value was {:s}".format(repr(convention)))
Return the multitaper cross-spectrum estimate and standard error for two input SHCoeffs class instances.
def post_periodic_filtered(values, repeat_after, block): """ After every *repeat_after* items, blocks the next *block* items from *values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after* can't be 0. For example, to block every tenth item read from an ADC:: from gpiozero import MCP3008 from gpiozero.tools import post_periodic_filtered adc = MCP3008(channel=0) for value in post_periodic_filtered(adc, 9, 1): print(value) """ values = _normalize(values) if repeat_after < 1: raise ValueError("repeat_after must be 1 or larger") if block < 1: raise ValueError("block must be 1 or larger") it = iter(values) try: while True: for _ in range(repeat_after): yield next(it) for _ in range(block): next(it) except StopIteration: pass
After every *repeat_after* items, blocks the next *block* items from *values*. Note that unlike :func:`pre_periodic_filtered`, *repeat_after* can't be 0. For example, to block every tenth item read from an ADC:: from gpiozero import MCP3008 from gpiozero.tools import post_periodic_filtered adc = MCP3008(channel=0) for value in post_periodic_filtered(adc, 9, 1): print(value)
def has_attr(self, table_name, attr_name): """ :param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite """ self.verify_table_existence(table_name) if typepy.is_null_string(attr_name): return False return attr_name in self.fetch_attr_names(table_name)
:param str table_name: Table name that the attribute exists. :param str attr_name: Attribute name to be tested. :return: |True| if the table has the attribute. :rtype: bool :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :Sample Code: .. code:: python import simplesqlite table_name = "sample_table" con = simplesqlite.SimpleSQLite("sample.sqlite", "w") con.create_table_from_data_matrix( table_name, ["attr_a", "attr_b"], [[1, "a"], [2, "b"]]) print(con.has_attr(table_name, "attr_a")) print(con.has_attr(table_name, "not_existing")) try: print(con.has_attr("not_existing", "attr_a")) except simplesqlite.TableNotFoundError as e: print(e) :Output: .. parsed-literal:: True False 'not_existing' table not found in /tmp/sample.sqlite
def render_povray(scene, filename='ipython', width=600, height=600, antialiasing=0.01, extra_opts={}): '''Render the scene with povray for publication. :param dict scene: The scene to render :param string filename: Output filename or 'ipython' to render in the notebook. :param int width: Width in pixels. :param int height: Height in pixels. :param dict extra_opts: Dictionary to merge/override with the passed scene. ''' if not vapory_available: raise Exception("To render with povray, you need to have the vapory" " package installed.") # Adding extra options scene = normalize_scene(scene) scene.update(extra_opts) # Camera target aspect = scene['camera']['aspect'] up = np.dot(rmatrixquaternion(scene['camera']['quaternion']), [0, 1, 0]) v_fov = scene['camera']['vfov'] / 180.0 * np.pi h_fov = 2.0 * np.arctan(np.tan(v_fov/2.0) * aspect) / np.pi * 180 # Setup camera position camera = vp.Camera( 'location', scene['camera']['location'], 'direction', [0, 0, -1], 'sky', up, 'look_at', scene['camera']['target'], 'angle', h_fov ) global_settings = [] # Setup global illumination if scene.get('radiosity', False): # Global Illumination radiosity = vp.Radiosity( 'brightness', 2.0, 'count', 100, 'error_bound', 0.15, 'gray_threshold', 0.0, 'low_error_factor', 0.2, 'minimum_reuse', 0.015, 'nearest_count', 10, 'recursion_limit', 1, #Docs say 1 is enough 'adc_bailout', 0.01, 'max_sample', 0.5, 'media off', 'normal off', 'always_sample', 1, 'pretrace_start', 0.08, 'pretrace_end', 0.01) light_sources = [] global_settings.append(radiosity) else: # Lights light_sources = [ vp.LightSource( np.array([2,4,-3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-2,-4,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([-1,2,3]) * 1000, 'color', [1,1,1] ), vp.LightSource( np.array([1,-2,-3]) * 1000, 'color', [1,1,1] ) ] # Background -- white for now background = vp.Background([1, 1, 1]) # Things to display stuff = _generate_objects(scene['representations']) scene = vp.Scene( camera, objects = light_sources + stuff + [background], global_settings=global_settings) return scene.render(filename, width=width, height=height, antialiasing = antialiasing)
Render the scene with povray for publication. :param dict scene: The scene to render :param string filename: Output filename or 'ipython' to render in the notebook. :param int width: Width in pixels. :param int height: Height in pixels. :param dict extra_opts: Dictionary to merge/override with the passed scene.
def subnetpool_create(request, name, prefixes, **kwargs): """Create a subnetpool. ip_version is auto-detected in back-end. Parameters: request -- Request context name -- Name for subnetpool prefixes -- List of prefixes for pool Keyword Arguments (optional): min_prefixlen -- Minimum prefix length for allocations from pool max_prefixlen -- Maximum prefix length for allocations from pool default_prefixlen -- Default prefix length for allocations from pool default_quota -- Default quota for allocations from pool shared -- Subnetpool should be shared (Admin-only) tenant_id -- Owner of subnetpool Returns: SubnetPool object """ LOG.debug("subnetpool_create(): name=%(name)s, prefixes=%(prefixes)s, " "kwargs=%(kwargs)s", {'name': name, 'prefixes': prefixes, 'kwargs': kwargs}) body = {'subnetpool': {'name': name, 'prefixes': prefixes, } } if 'tenant_id' not in kwargs: kwargs['tenant_id'] = request.user.project_id body['subnetpool'].update(kwargs) subnetpool = \ neutronclient(request).create_subnetpool(body=body).get('subnetpool') return SubnetPool(subnetpool)
Create a subnetpool. ip_version is auto-detected in back-end. Parameters: request -- Request context name -- Name for subnetpool prefixes -- List of prefixes for pool Keyword Arguments (optional): min_prefixlen -- Minimum prefix length for allocations from pool max_prefixlen -- Maximum prefix length for allocations from pool default_prefixlen -- Default prefix length for allocations from pool default_quota -- Default quota for allocations from pool shared -- Subnetpool should be shared (Admin-only) tenant_id -- Owner of subnetpool Returns: SubnetPool object
def populate_user(self): """ Populates the Django user object using the default bind credentials. """ user = None try: # self.attrs will only be non-None if we were able to load this user # from the LDAP directory, so this filters out nonexistent users. if self.attrs is not None: self._get_or_create_user(force_populate=True) user = self._user except ldap.LDAPError as e: results = ldap_error.send( self.backend.__class__, context="populate_user", user=self._user, exception=e, ) if len(results) == 0: logger.warning( "Caught LDAPError while authenticating {}: {}".format( self._username, pprint.pformat(e) ) ) except Exception as e: logger.warning("{} while authenticating {}".format(e, self._username)) raise return user
Populates the Django user object using the default bind credentials.
def create(cls, statement_format, date_start, date_end, monetary_account_id=None, regional_format=None, custom_headers=None): """ :type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_STATEMENT_FORMAT: statement_format, cls.FIELD_DATE_START: date_start, cls.FIELD_DATE_END: date_end, cls.FIELD_REGIONAL_FORMAT: regional_format } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
:type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
def reaction_signature(eq, direction=False, stoichiometry=False): """Return unique signature object for :class:`Reaction`. Signature objects are hashable, and compare equal only if the reactions are considered the same according to the specified rules. Args: direction: Include reaction directionality when considering equality. stoichiometry: Include stoichiometry when considering equality. """ def compounds_sig(compounds): if stoichiometry: return tuple(sorted(compounds)) else: return tuple(sorted(compound for compound, _ in compounds)) left = compounds_sig(eq.left) right = compounds_sig(eq.right) if left < right: reaction_sig = left, right direction_sig = eq.direction else: reaction_sig = right, left direction_sig = eq.direction.flipped() if direction: return reaction_sig, direction_sig return reaction_sig
Return unique signature object for :class:`Reaction`. Signature objects are hashable, and compare equal only if the reactions are considered the same according to the specified rules. Args: direction: Include reaction directionality when considering equality. stoichiometry: Include stoichiometry when considering equality.
def crop_to_fit(self, image_size, view_size): """ Set cropping values in `p:blipFill/a:srcRect` such that an image of *image_size* will stretch to exactly fit *view_size* when its aspect ratio is preserved. """ self.blipFill.crop(self._fill_cropping(image_size, view_size))
Set cropping values in `p:blipFill/a:srcRect` such that an image of *image_size* will stretch to exactly fit *view_size* when its aspect ratio is preserved.
def ecs_idsKEGG(organism): """ Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism :param organism: an organisms as listed in organismsKEGG() :returns: a Pandas dataframe of with 'ec' and 'KEGGid'. """ kegg_ec=urlopen("http://rest.kegg.jp/link/"+organism+"/enzyme").read() kegg_ec=kegg_ec.split("\n") final=[] for k in kegg_ec: final.append(k.split("\t")) df=pd.DataFrame(final[0:len(final)-1])[[0,1]] df.columns=['ec','KEGGid'] return df
Uses KEGG to retrieve all ids and respective ecs for a given KEGG organism :param organism: an organisms as listed in organismsKEGG() :returns: a Pandas dataframe of with 'ec' and 'KEGGid'.
def get_content(self, renderer, data, accepted_media_type, renderer_context): """ Get the content as if it had been rendered by the default non-documenting renderer. """ if not renderer: return '[No renderers were found]' renderer_context['indent'] = 4 content = renderer.render(data, accepted_media_type, renderer_context) render_style = getattr(renderer, 'render_style', 'text') assert render_style in ['text', 'binary'], 'Expected .render_style ' \ '"text" or "binary", but got "%s"' % render_style if render_style == 'binary': return '[%d bytes of binary content]' % len(content) return content
Get the content as if it had been rendered by the default non-documenting renderer.
def replace(self, to_replace, value=_NoValue, subset=None): """Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`) and arbitrary replacement will be used. :param to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then `value` is ignored or can be omitted, and `to_replace` must be a mapping between a value and a replacement. :param value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If `value` is a list, `value` should be of the same length and type as `to_replace`. If `value` is a scalar and `to_replace` is a sequence, then `value` is used as a replacement for each item in `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace('Alice', None).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace({'Alice': None}).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+ """ if value is _NoValue: if isinstance(to_replace, dict): value = None else: raise TypeError("value argument is required when to_replace is not a dictionary.") # Helper functions def all_of(types): """Given a type or tuple of types and a sequence of xs check if each x is instance of type(s) >>> all_of(bool)([True, False]) True >>> all_of(basestring)(["a", 1]) False """ def all_of_(xs): return all(isinstance(x, types) for x in xs) return all_of_ all_of_bool = all_of(bool) all_of_str = all_of(basestring) all_of_numeric = all_of((float, int, long)) # Validate input types valid_types = (bool, float, int, long, basestring, list, tuple) if not isinstance(to_replace, valid_types + (dict, )): raise ValueError( "to_replace should be a bool, float, int, long, string, list, tuple, or dict. " "Got {0}".format(type(to_replace))) if not isinstance(value, valid_types) and value is not None \ and not isinstance(to_replace, dict): raise ValueError("If to_replace is not a dict, value should be " "a bool, float, int, long, string, list, tuple or None. " "Got {0}".format(type(value))) if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)): if len(to_replace) != len(value): raise ValueError("to_replace and value lists should be of the same length. " "Got {0} and {1}".format(len(to_replace), len(value))) if not (subset is None or isinstance(subset, (list, tuple, basestring))): raise ValueError("subset should be a list or tuple of column names, " "column name or None. Got {0}".format(type(subset))) # Reshape input arguments if necessary if isinstance(to_replace, (float, int, long, basestring)): to_replace = [to_replace] if isinstance(to_replace, dict): rep_dict = to_replace if value is not None: warnings.warn("to_replace is a dict and value is not None. value will be ignored.") else: if isinstance(value, (float, int, long, basestring)) or value is None: value = [value for _ in range(len(to_replace))] rep_dict = dict(zip(to_replace, value)) if isinstance(subset, basestring): subset = [subset] # Verify we were not passed in mixed type generics. if not any(all_of_type(rep_dict.keys()) and all_of_type(x for x in rep_dict.values() if x is not None) for all_of_type in [all_of_bool, all_of_str, all_of_numeric]): raise ValueError("Mixed type replacements are not supported") if subset is None: return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx) else: return DataFrame( self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
Returns a new :class:`DataFrame` replacing a value with another value. :func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are aliases of each other. Values to_replace and value must have the same type and can only be numerics, booleans, or strings. Value can have None. When replacing, the new value will be cast to the type of the existing column. For numeric replacements all values to be replaced should have unique floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`) and arbitrary replacement will be used. :param to_replace: bool, int, long, float, string, list or dict. Value to be replaced. If the value is a dict, then `value` is ignored or can be omitted, and `to_replace` must be a mapping between a value and a replacement. :param value: bool, int, long, float, string, list or None. The replacement value must be a bool, int, long, float, string or None. If `value` is a list, `value` should be of the same length and type as `to_replace`. If `value` is a scalar and `to_replace` is a sequence, then `value` is used as a replacement for each item in `to_replace`. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.replace(10, 20).show() +----+------+-----+ | age|height| name| +----+------+-----+ | 20| 80|Alice| | 5| null| Bob| |null| null| Tom| |null| null| null| +----+------+-----+ >>> df4.na.replace('Alice', None).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace({'Alice': None}).show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80|null| | 5| null| Bob| |null| null| Tom| |null| null|null| +----+------+----+ >>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show() +----+------+----+ | age|height|name| +----+------+----+ | 10| 80| A| | 5| null| B| |null| null| Tom| |null| null|null| +----+------+----+
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False): ''' Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds ''' if weight >= 1: weight = 0.99 if weight <= 0: weight = 0.01 # Step 1: feature extraction x = audioBasicIO.stereo2mono(x) st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs, st_step * fs) # Step 2: train binary svm classifier of low vs high energy frames # keep only the energy short-term sequence (2nd feature) st_energy = st_feats[1, :] en = numpy.sort(st_energy) # number of 10% of the total short-term windows l1 = int(len(en) / 10) # compute "lower" 10% energy threshold t1 = numpy.mean(en[0:l1]) + 0.000000000000001 # compute "higher" 10% energy threshold t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001 # get all features that correspond to low energy class1 = st_feats[:, numpy.where(st_energy <= t1)[0]] # get all features that correspond to high energy class2 = st_feats[:, numpy.where(st_energy >= t2)[0]] # form the binary classification task and ... faets_s = [class1.T, class2.T] # normalize and train the respective svm probabilistic model # (ONSET vs SILENCE) [faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s) svm = aT.trainSVM(faets_s_norm, 1.0) # Step 3: compute onset probability based on the trained svm prob_on_set = [] for i in range(st_feats.shape[1]): # for each frame cur_fv = (st_feats[:, i] - means_s) / stds_s # get svm probability (that it belongs to the ONSET class) prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1]) prob_on_set = numpy.array(prob_on_set) # smooth probability: prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step) # Step 4A: detect onset frame indices: prog_on_set_sort = numpy.sort(prob_on_set) # find probability Threshold as a weighted average # of top 10% and lower 10% of the values Nt = int(prog_on_set_sort.shape[0] / 10) T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) + weight * numpy.mean(prog_on_set_sort[-Nt::])) max_idx = numpy.where(prob_on_set > T)[0] # get the indices of the frames that satisfy the thresholding i = 0 time_clusters = [] seg_limits = [] # Step 4B: group frame indices to onset segments while i < len(max_idx): # for each of the detected onset indices cur_cluster = [max_idx[i]] if i == len(max_idx)-1: break while max_idx[i+1] - cur_cluster[-1] <= 2: cur_cluster.append(max_idx[i+1]) i += 1 if i == len(max_idx)-1: break i += 1 time_clusters.append(cur_cluster) seg_limits.append([cur_cluster[0] * st_step, cur_cluster[-1] * st_step]) # Step 5: Post process: remove very small segments: min_dur = 0.2 seg_limits_2 = [] for s in seg_limits: if s[1] - s[0] > min_dur: seg_limits_2.append(s) seg_limits = seg_limits_2 if plot: timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs) plt.subplot(2, 1, 1) plt.plot(timeX, x) for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.subplot(2, 1, 2) plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step), prob_on_set) plt.title('Signal') for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.title('svm Probability') plt.show() return seg_limits
Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
def receive_empty(self, message): """ Pair ACKs with requests. :type message: Message :param message: the received message :rtype : Transaction :return: the transaction to which the message belongs to """ logger.debug("receive_empty - " + str(message)) try: host, port = message.source except AttributeError: return key_mid = str_append_hash(host, port, message.mid) key_mid_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.mid) key_token = str_append_hash(host, port, message.token) key_token_multicast = str_append_hash(defines.ALL_COAP_NODES, port, message.token) if key_mid in list(self._transactions.keys()): transaction = self._transactions[key_mid] elif key_token in self._transactions_token: transaction = self._transactions_token[key_token] elif key_mid_multicast in list(self._transactions.keys()): transaction = self._transactions[key_mid_multicast] elif key_token_multicast in self._transactions_token: transaction = self._transactions_token[key_token_multicast] else: logger.warning("Un-Matched incoming empty message " + str(host) + ":" + str(port)) return None if message.type == defines.Types["ACK"]: if not transaction.request.acknowledged: transaction.request.acknowledged = True elif (transaction.response is not None) and (not transaction.response.acknowledged): transaction.response.acknowledged = True elif message.type == defines.Types["RST"]: if not transaction.request.acknowledged: transaction.request.rejected = True elif not transaction.response.acknowledged: transaction.response.rejected = True elif message.type == defines.Types["CON"]: #implicit ACK (might have been lost) logger.debug("Implicit ACK on received CON for waiting transaction") transaction.request.acknowledged = True else: logger.warning("Unhandled message type...") if transaction.retransmit_stop is not None: transaction.retransmit_stop.set() return transaction
Pair ACKs with requests. :type message: Message :param message: the received message :rtype : Transaction :return: the transaction to which the message belongs to
def post(self, endpoint, **kwargs): """Create a resource. Args: endpoint: resource endpoint. """ return self._request(requests.post, endpoint, **kwargs)
Create a resource. Args: endpoint: resource endpoint.
def get_hotkey_name(names=None): """ Returns a string representation of hotkey from the given key names, or the currently pressed keys if not given. This function: - normalizes names; - removes "left" and "right" prefixes; - replaces the "+" key name with "plus" to avoid ambiguity; - puts modifier keys first, in a standardized order; - sort remaining keys; - finally, joins everything with "+". Example: get_hotkey_name(['+', 'left ctrl', 'shift']) # "ctrl+shift+plus" """ if names is None: _listener.start_if_necessary() with _pressed_events_lock: names = [e.name for e in _pressed_events.values()] else: names = [normalize_name(name) for name in names] clean_names = set(e.replace('left ', '').replace('right ', '').replace('+', 'plus') for e in names) # https://developer.apple.com/macos/human-interface-guidelines/input-and-output/keyboard/ # > List modifier keys in the correct order. If you use more than one modifier key in a # > hotkey, always list them in this order: Control, Option, Shift, Command. modifiers = ['ctrl', 'alt', 'shift', 'windows'] sorting_key = lambda k: (modifiers.index(k) if k in modifiers else 5, str(k)) return '+'.join(sorted(clean_names, key=sorting_key))
Returns a string representation of hotkey from the given key names, or the currently pressed keys if not given. This function: - normalizes names; - removes "left" and "right" prefixes; - replaces the "+" key name with "plus" to avoid ambiguity; - puts modifier keys first, in a standardized order; - sort remaining keys; - finally, joins everything with "+". Example: get_hotkey_name(['+', 'left ctrl', 'shift']) # "ctrl+shift+plus"
def run(self): """ the main loop """ try: master_process = BackgroundProcess.objects.filter(pk=self.process_id).first() if master_process: master_process.last_update = now() master_process.message = 'init child processes' master_process.save() else: self.delete_pid(force_del=True) self.stderr.write("no such process in BackgroundProcesses") sys.exit(0) self.manage_processes() while True: # handle signals sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None # check the DB connection check_db_connection() # update the P BackgroundProcess.objects.filter(pk=self.process_id).update( last_update=now(), message='running..') if sig is None: self.manage_processes() elif sig not in self.SIGNALS: logger.error('%s, unhandled signal %d' % (self.label, sig)) continue elif sig == signal.SIGTERM: logger.debug('%s, termination signal' % self.label) raise StopIteration elif sig == signal.SIGHUP: # todo handle sighup pass elif sig == signal.SIGUSR1: # restart all child processes logger.debug('PID %d, processed SIGUSR1 (%d) signal' % (self.pid, sig)) self.restart() elif sig == signal.SIGUSR2: # write the process status to stdout self.status() pass sleep(5) except StopIteration: self.stop() self.delete_pid() sys.exit(0) except SystemExit: raise except: logger.error('%s(%d), unhandled exception\n%s' % (self.label, getpid(), traceback.format_exc()))
the main loop
def write_batch_json(self, content): """Write batch json data to a file.""" timestamp = str(time.time()).replace('.', '') batch_json_file = os.path.join( self.tcex.args.tc_temp_path, 'batch-{}.json'.format(timestamp) ) with open(batch_json_file, 'w') as fh: json.dump(content, fh, indent=2)
Write batch json data to a file.
def session_preparation(self): """Prepare the session after the connection has been established.""" self._test_channel_read(pattern=r"[>#]") self.ansi_escape_codes = True self.set_base_prompt() self.disable_paging() # Clear the read buffer time.sleep(0.3 * self.global_delay_factor) self.clear_buffer()
Prepare the session after the connection has been established.
def _load_yaml_config(cls, config_data, filename="(unknown)"): """Load a yaml config file.""" try: config = yaml.safe_load(config_data) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = ("Invalid YAML syntax in Configuration file " "%(file)s at line: %(line)s, column: %(column)s." % dict(file=filename, line=mark.line + 1, column=mark.column + 1)) else: errmsg = ("YAML error reading Configuration file " "%(file)s" % dict(file=filename)) logger.error(errmsg) raise logger.info("Configuration: %s", config) return config
Load a yaml config file.
def filter(self, obj, *args, **kwargs): """ Filter the given object through the filter chain. :param obj: The object to filter :param args: Additional arguments to pass to each filter function. :param kwargs: Additional keyword arguments to pass to each filter function. :return: The filtered object or :data:`None` See the documentation of :class:`Filter` on how filtering operates. Returns the object returned by the last function in the filter chain or :data:`None` if any function returned :data:`None`. """ for _, _, func in self._filter_order: obj = func(obj, *args, **kwargs) if obj is None: return None return obj
Filter the given object through the filter chain. :param obj: The object to filter :param args: Additional arguments to pass to each filter function. :param kwargs: Additional keyword arguments to pass to each filter function. :return: The filtered object or :data:`None` See the documentation of :class:`Filter` on how filtering operates. Returns the object returned by the last function in the filter chain or :data:`None` if any function returned :data:`None`.