code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_global_register_objects(self, do_sort=None, reverse=False, **kwargs): """Generate register objects (list) from register name list Usage: get_global_register_objects(name = ["Amp2Vbn", "GateHitOr", "DisableColumnCnfg"], address = [2, 3]) Receives: keyword lists of register names, addresses,... for making cuts Returns: list of register objects """ # speed up of the most often used keyword name try: names = iterable(kwargs.pop('name')) except KeyError: register_objects = [] else: register_objects = [self.global_registers[reg] for reg in names] for keyword in kwargs.iterkeys(): allowed_values = iterable(kwargs[keyword]) register_objects.extend(filter(lambda global_register: set(iterable(global_register[keyword])).intersection(allowed_values), self.global_registers.itervalues())) if not register_objects and filter(None, kwargs.itervalues()): raise ValueError('Global register objects empty') if do_sort: return sorted(register_objects, key=itemgetter(*do_sort), reverse=reverse) else: return register_objects
Generate register objects (list) from register name list Usage: get_global_register_objects(name = ["Amp2Vbn", "GateHitOr", "DisableColumnCnfg"], address = [2, 3]) Receives: keyword lists of register names, addresses,... for making cuts Returns: list of register objects
def _write_fuzzmanagerconf(self, path): """ Write fuzzmanager config file for selected build @type path: basestring @param path: A string representation of the fuzzmanager config path """ output = configparser.RawConfigParser() output.add_section('Main') output.set('Main', 'platform', self.moz_info['processor'].replace('_', '-')) output.set('Main', 'product', 'mozilla-' + self._branch) output.set('Main', 'product_version', '%.8s-%.12s' % (self.build_id, self.changeset)) # make sure 'os' match what FM expects os_name = self.moz_info['os'].lower() if os_name.startswith('android'): output.set('Main', 'os', 'android') elif os_name.startswith('lin'): output.set('Main', 'os', 'linux') elif os_name.startswith('mac'): output.set('Main', 'os', 'macosx') elif os_name.startswith('win'): output.set('Main', 'os', 'windows') else: output.set('Main', 'os', self.moz_info['os']) output.add_section('Metadata') output.set('Metadata', 'pathPrefix', self.moz_info['topsrcdir']) output.set('Metadata', 'buildFlags', self._flags.build_string().lstrip('-')) if self._platform.system == "Windows": fm_name = self._target + '.exe.fuzzmanagerconf' conf_path = os.path.join(path, 'dist', 'bin', fm_name) elif self._platform.system == "Android": conf_path = os.path.join(path, 'target.apk.fuzzmanagerconf') else: fm_name = self._target + '.fuzzmanagerconf' conf_path = os.path.join(path, 'dist', 'bin', fm_name) with open(conf_path, 'w') as conf_fp: output.write(conf_fp)
Write fuzzmanager config file for selected build @type path: basestring @param path: A string representation of the fuzzmanager config path
def add_node(self, node): """Add a node and connect it to the center.""" nodes = self.nodes() if len(nodes) > 1: first_node = min(nodes, key=attrgetter("creation_time")) first_node.connect(direction="both", whom=node)
Add a node and connect it to the center.
def region_interface_areas(regions, areas, voxel_size=1, strel=None): r""" Calculates the interfacial area between all pairs of adjecent regions Parameters ---------- regions : ND-array An image of the pore space partitioned into individual pore regions. Note that zeros in the image will not be considered for area calculation. areas : array_like A list containing the areas of each regions, as determined by ``region_surface_area``. Note that the region number and list index are offset by 1, such that the area for region 1 is stored in ``areas[0]``. voxel_size : scalar The resolution of the image, expressed as the length of one side of a voxel, so the volume of a voxel would be **voxel_size**-cubed. The default is 1. strel : array_like The structuring element used to blur the region. If not provided, then a spherical element (or disk) with radius 1 is used. See the docstring for ``mesh_region`` for more details, as this argument is passed to there. Returns ------- result : named_tuple A named-tuple containing 2 arrays. ``conns`` holds the connectivity information and ``area`` holds the result for each pair. ``conns`` is a N-regions by 2 array with each row containing the region number of an adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and ``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial area shared by regions 0 and 5. """ print('_'*60) print('Finding interfacial areas between each region') from skimage.morphology import disk, square, ball, cube im = regions.copy() if im.ndim != im.squeeze().ndim: warnings.warn('Input image conains a singleton axis:' + str(im.shape) + ' Reduce dimensionality with np.squeeze(im) to avoid' + ' unexpected behavior.') if im.ndim == 2: cube = square ball = disk # Get 'slices' into im for each region slices = spim.find_objects(im) # Initialize arrays Ps = sp.arange(1, sp.amax(im)+1) sa = sp.zeros_like(Ps, dtype=float) sa_combined = [] # Difficult to preallocate since number of conns unknown cn = [] # Start extracting area from im for i in tqdm(Ps): reg = i - 1 if slices[reg] is not None: s = extend_slice(slices[reg], im.shape) sub_im = im[s] mask_im = sub_im == i sa[reg] = areas[reg] im_w_throats = spim.binary_dilation(input=mask_im, structure=ball(1)) im_w_throats = im_w_throats*sub_im Pn = sp.unique(im_w_throats)[1:] - 1 for j in Pn: if j > reg: cn.append([reg, j]) merged_region = im[(min(slices[reg][0].start, slices[j][0].start)): max(slices[reg][0].stop, slices[j][0].stop), (min(slices[reg][1].start, slices[j][1].start)): max(slices[reg][1].stop, slices[j][1].stop)] merged_region = ((merged_region == reg + 1) + (merged_region == j + 1)) mesh = mesh_region(region=merged_region, strel=strel) sa_combined.append(mesh_surface_area(mesh)) # Interfacial area calculation cn = sp.array(cn) ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined) ia[ia <= 0] = 1 result = namedtuple('interfacial_areas', ('conns', 'area')) result.conns = cn result.area = ia * voxel_size**2 return result
r""" Calculates the interfacial area between all pairs of adjecent regions Parameters ---------- regions : ND-array An image of the pore space partitioned into individual pore regions. Note that zeros in the image will not be considered for area calculation. areas : array_like A list containing the areas of each regions, as determined by ``region_surface_area``. Note that the region number and list index are offset by 1, such that the area for region 1 is stored in ``areas[0]``. voxel_size : scalar The resolution of the image, expressed as the length of one side of a voxel, so the volume of a voxel would be **voxel_size**-cubed. The default is 1. strel : array_like The structuring element used to blur the region. If not provided, then a spherical element (or disk) with radius 1 is used. See the docstring for ``mesh_region`` for more details, as this argument is passed to there. Returns ------- result : named_tuple A named-tuple containing 2 arrays. ``conns`` holds the connectivity information and ``area`` holds the result for each pair. ``conns`` is a N-regions by 2 array with each row containing the region number of an adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and ``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial area shared by regions 0 and 5.
def encode_max_apdu_length_accepted(arg): """Return the encoding of the highest encodable value less than the value of the arg.""" for i in range(5, -1, -1): if (arg >= _max_apdu_length_encoding[i]): return i raise ValueError("invalid max APDU length accepted: %r" % (arg,))
Return the encoding of the highest encodable value less than the value of the arg.
def score_leaves(self) -> Set[BaseEntity]: """Calculate the score for all leaves. :return: The set of leaf nodes that were scored """ leaves = set(self.iter_leaves()) if not leaves: log.warning('no leaves.') return set() for leaf in leaves: self.graph.nodes[leaf][self.tag] = self.calculate_score(leaf) log.log(5, 'chomping %s', leaf) return leaves
Calculate the score for all leaves. :return: The set of leaf nodes that were scored
def join_mwp(tags: List[str]) -> List[str]: """ Join multi-word predicates to a single predicate ('V') token. """ ret = [] verb_flag = False for tag in tags: if "V" in tag: # Create a continuous 'V' BIO span prefix, _ = tag.split("-") if verb_flag: # Continue a verb label across the different predicate parts prefix = 'I' ret.append(f"{prefix}-V") verb_flag = True else: ret.append(tag) verb_flag = False return ret
Join multi-word predicates to a single predicate ('V') token.
def get_accounts(cls, soco=None): """Get all accounts known to the Sonos system. Args: soco (`SoCo`, optional): a `SoCo` instance to query. If `None`, a random instance is used. Defaults to `None`. Returns: dict: A dict containing account instances. Each key is the account's serial number, and each value is the related Account instance. Accounts which have been marked as deleted are excluded. Note: Any existing Account instance will have its attributes updated to those currently stored on the Sonos system. """ root = XML.fromstring(cls._get_account_xml(soco)) # _get_account_xml returns an ElementTree element like this: # <ZPSupportInfo type="User"> # <Accounts # LastUpdateDevice="RINCON_000XXXXXXXX400" # Version="8" NextSerialNum="5"> # <Account Type="2311" SerialNum="1"> # <UN>12345678</UN> # <MD>1</MD> # <NN></NN> # <OADevID></OADevID> # <Key></Key> # </Account> # <Account Type="41735" SerialNum="3" Deleted="1"> # <UN></UN> # <MD>1</MD> # <NN>Nickname</NN> # <OADevID></OADevID> # <Key></Key> # </Account> # ... # <Accounts /> xml_accounts = root.findall('.//Account') result = {} for xml_account in xml_accounts: serial_number = xml_account.get('SerialNum') is_deleted = True if xml_account.get('Deleted') == '1' else False # cls._all_accounts is a weakvaluedict keyed by serial number. # We use it as a database to store details of the accounts we # know about. We need to update it with info obtained from the # XML just obtained, so (1) check to see if we already have an # entry in cls._all_accounts for the account we have found in # XML; (2) if so, delete it if the XML says it has been deleted; # and (3) if not, create an entry for it if cls._all_accounts.get(serial_number): # We have an existing entry in our database. Do we need to # delete it? if is_deleted: # Yes, so delete it and move to the next XML account del cls._all_accounts[serial_number] continue else: # No, so load up its details, ready to update them account = cls._all_accounts.get(serial_number) else: # We have no existing entry for this account if is_deleted: # but it is marked as deleted, so we don't need one continue # If it is not marked as deleted, we need to create an entry account = Account() account.serial_number = serial_number cls._all_accounts[serial_number] = account # Now, update the entry in our database with the details from XML account.service_type = xml_account.get('Type') account.deleted = is_deleted account.username = xml_account.findtext('UN') # Not sure what 'MD' stands for. Metadata? May Delete? account.metadata = xml_account.findtext('MD') account.nickname = xml_account.findtext('NN') account.oa_device_id = xml_account.findtext('OADevID') account.key = xml_account.findtext('Key') result[serial_number] = account # There is always a TuneIn account, but it is handled separately # by Sonos, and does not appear in the xml account data. We # need to add it ourselves. tunein = Account() tunein.service_type = '65031' # Is this always the case? tunein.deleted = False tunein.username = '' tunein.metadata = '' tunein.nickname = '' tunein.oa_device_id = '' tunein.key = '' tunein.serial_number = '0' result['0'] = tunein return result
Get all accounts known to the Sonos system. Args: soco (`SoCo`, optional): a `SoCo` instance to query. If `None`, a random instance is used. Defaults to `None`. Returns: dict: A dict containing account instances. Each key is the account's serial number, and each value is the related Account instance. Accounts which have been marked as deleted are excluded. Note: Any existing Account instance will have its attributes updated to those currently stored on the Sonos system.
def do_reload(bot, target, cmdargs, server_send=None): """The reloading magic. - First, reload handler.py. - Then make copies of all the handler data we want to keep. - Create a new handler and restore all the data. """ def send(msg): if server_send is not None: server_send("%s\n" % msg) else: do_log(bot.connection, bot.get_target(target), msg) confdir = bot.handler.confdir if cmdargs == 'pull': # Permission checks. if isinstance(target, irc.client.Event) and target.source.nick != bot.config['auth']['owner']: bot.connection.privmsg(bot.get_target(target), "Nope, not gonna do it.") return if exists(join(confdir, '.git')): send(misc.do_pull(srcdir=confdir)) else: send(misc.do_pull(repo=bot.config['api']['githubrepo'])) # Reload config importlib.reload(config) bot.config = config.load_config(join(confdir, 'config.cfg'), send) # Reimport helpers errored_helpers = modutils.scan_and_reimport('helpers') if errored_helpers: send("Failed to load some helpers.") for error in errored_helpers: send("%s: %s" % error) return False if not load_modules(bot.config, confdir, send): return False # preserve data data = bot.handler.get_data() bot.shutdown_mp() bot.handler = handler.BotHandler(bot.config, bot.connection, bot.channels, confdir) bot.handler.set_data(data) bot.handler.connection = bot.connection bot.handler.channels = bot.channels return True
The reloading magic. - First, reload handler.py. - Then make copies of all the handler data we want to keep. - Create a new handler and restore all the data.
def _push_cm_exit(self, cm, cm_exit): """Helper to correctly register callbacks to __exit__ methods.""" _exit_wrapper = self._create_exit_wrapper(cm, cm_exit) self._push_exit_callback(_exit_wrapper, True)
Helper to correctly register callbacks to __exit__ methods.
def run_from_command_line(): """ Run Firenado's management commands from a command line """ for commands_conf in firenado.conf.management['commands']: logger.debug("Loading %s commands from %s." % ( commands_conf['name'], commands_conf['module'] )) exec('import %s' % commands_conf['module']) command_index = 1 for arg in sys.argv[1:]: command_index += 1 if arg[0] != "-": break parser = FirenadoArgumentParser(prog=os.path.split(sys.argv[0])[1], add_help=False) parser.add_argument("-h", "--help", default=argparse.SUPPRESS) parser.add_argument("command", default="help", help="Command to executed") try: namespace = parser.parse_args(sys.argv[1:command_index]) if not command_exists(namespace.command): show_command_line_usage(parser) else: run_command(namespace.command, sys.argv[command_index-1:]) except FirenadoArgumentError: show_command_line_usage(parser, True)
Run Firenado's management commands from a command line
def get_options(server): """Retrieve the available HTTP verbs""" try: response = requests.options( server, allow_redirects=False, verify=False, timeout=5) except (requests.exceptions.ConnectionError, requests.exceptions.MissingSchema): return "Server {} is not available!".format(server) try: return {'allowed': response.headers['Allow']} except KeyError: return "Unable to get HTTP methods"
Retrieve the available HTTP verbs
def get_key(self, key_id): """ Returns a restclients.Key object for the given key ID. If the key ID isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown. """ url = ENCRYPTION_KEY_URL.format(key_id) return self._key_from_json(self._get_resource(url))
Returns a restclients.Key object for the given key ID. If the key ID isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown.
def regular(u): ''' Equation matrix generation for the regular (cubic) lattice. The order of constants is as follows: .. math:: C_{11}, C_{12}, C_{44} :param u: vector of deformations: [ :math:`u_{xx}, u_{yy}, u_{zz}, u_{yz}, u_{xz}, u_{xy}` ] :returns: Symmetry defined stress-strain equation matrix ''' uxx, uyy, uzz, uyz, uxz, uxy = u[0], u[1], u[2], u[3], u[4], u[5] return array( [[uxx, uyy + uzz, 0], [uyy, uxx + uzz, 0], [uzz, uxx + uyy, 0], [0, 0, 2*uyz], [0, 0, 2*uxz], [0, 0, 2*uxy]])
Equation matrix generation for the regular (cubic) lattice. The order of constants is as follows: .. math:: C_{11}, C_{12}, C_{44} :param u: vector of deformations: [ :math:`u_{xx}, u_{yy}, u_{zz}, u_{yz}, u_{xz}, u_{xy}` ] :returns: Symmetry defined stress-strain equation matrix
def _update_mtime(self): """ Updates modif time """ try: self._mtime = os.path.getmtime(self.editor.file.path) except OSError: # file_path does not exists. self._mtime = 0 self._timer.stop() except (TypeError, AttributeError): # file path is none, this happen if you use setPlainText instead of # openFile. This is perfectly fine, we just do not have anything to # watch try: self._timer.stop() except AttributeError: pass
Updates modif time
def crop(self, lat, lon, var): """ Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380]. """ dims, idx = cropIndices(self.dims, lat, lon) subset = {} for v in var: subset = {v: self.ncs[0][v][idx['yn'], idx['xn']]} return subset, dims
Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380].
def is_reseller(self): """is the user a reseller""" return self.role == self.roles.reseller.value and self.state == State.approved
is the user a reseller
def pydict2xmlstring(metadata_dict, **kwargs): """Create an XML string from a metadata dictionary.""" ordering = kwargs.get('ordering', UNTL_XML_ORDER) root_label = kwargs.get('root_label', 'metadata') root_namespace = kwargs.get('root_namespace', None) elements_namespace = kwargs.get('elements_namespace', None) namespace_map = kwargs.get('namespace_map', None) root_attributes = kwargs.get('root_attributes', None) # Set any root namespace and namespace map. if root_namespace and namespace_map: root = Element(root_namespace + root_label, nsmap=namespace_map) elif namespace_map: root = Element(root_label, nsmap=namespace_map) else: root = Element(root_label) # Set any root element attributes. if root_attributes: for key, value in root_attributes.items(): root.attrib[key] = value # Create an XML structure from field list. for metadata_key in ordering: if metadata_key in metadata_dict: for element in metadata_dict[metadata_key]: if 'content' in element and 'qualifier' in element: create_dict_subelement( root, metadata_key, element['content'], attribs={'qualifier': element['qualifier']}, namespace=elements_namespace, ) elif 'content' in element and 'role' in element: create_dict_subelement( root, metadata_key, element['content'], attribs={'role': element['role']}, namespace=elements_namespace, ) elif 'content' in element and 'scheme' in element: create_dict_subelement( root, metadata_key, element['content'], attribs={'scheme': element['scheme']}, namespace=elements_namespace, ) elif 'content' in element: create_dict_subelement( root, metadata_key, element['content'], namespace=elements_namespace, ) # Create the XML tree. return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring( root, pretty_print=True )
Create an XML string from a metadata dictionary.
def scroll_deck_x(self, decknum, scroll_x): """Move a deck left or right.""" if decknum >= len(self.decks): raise IndexError("I have no deck at {}".format(decknum)) if decknum >= len(self.deck_x_hint_offsets): self.deck_x_hint_offsets = list(self.deck_x_hint_offsets) + [0] * ( decknum - len(self.deck_x_hint_offsets) + 1 ) self.deck_x_hint_offsets[decknum] += scroll_x self._trigger_layout()
Move a deck left or right.
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, memoryview): s = bytes(s) if isinstance(s, bytes): if encoding == 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) if strings_only and (s is None or isinstance(s, int)): return s if not isinstance(s, six.string_types): try: if six.PY3: return six.text_type(s).encode(encoding) else: return bytes(s) except UnicodeEncodeError: if isinstance(s, Exception): # An Exception subclass containing non-ASCII data that doesn't # know how to print itself properly. We shouldn't raise a # further exception. return b' '.join([force_bytes(arg, encoding, strings_only, errors) for arg in s]) return six.text_type(s).encode(encoding, errors) else: return s.encode(encoding, errors)
Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects.
def extract_symbols(self, raw_data_directory: str, destination_directory: str): """ Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into individual symbols :param raw_data_directory: The directory, that contains the xml-files and matching images :param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per symbol category will be generated automatically """ print("Extracting Symbols from Audiveris OMR Dataset...") all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))] all_image_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.png'))] data_pairs = [] for i in range(len(all_xml_files)): data_pairs.append((all_xml_files[i], all_image_files[i])) for data_pair in data_pairs: self.__extract_symbols(data_pair[0], data_pair[1], destination_directory)
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into individual symbols :param raw_data_directory: The directory, that contains the xml-files and matching images :param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per symbol category will be generated automatically
def dependents_of_addresses(self, addresses): """Given an iterable of addresses, yield all of those addresses dependents.""" seen = OrderedSet(addresses) for address in addresses: seen.update(self._dependent_address_map[address]) seen.update(self._implicit_dependent_address_map[address]) return seen
Given an iterable of addresses, yield all of those addresses dependents.
def dump(self): '''Print the entire contents of this to debug log messages. This is really only intended for debugging. It could produce a lot of data. ''' with self.registry.lock(identifier=self.worker_id) as session: for work_spec_name in self.registry.pull(NICE_LEVELS).iterkeys(): def scan(sfx): v = self.registry.pull(WORK_UNITS_ + work_spec_name + sfx) if v is None: return [] return v.keys() for key in scan(''): logger.debug('spec {0} unit {1} available or pending' .format(work_spec_name, key)) for key in scan(_BLOCKED): blocked_on = session.get( WORK_UNITS_ + work_spec_name + _DEPENDS, key) logger.debug('spec {0} unit {1} blocked on {2!r}' .format(work_spec_name, key, blocked_on)) for key in scan(_FINISHED): logger.debug('spec {0} unit {1} finished' .format(work_spec_name, key)) for key in scan(_FAILED): logger.debug('spec {0} unit {1} failed' .format(work_spec_name, key))
Print the entire contents of this to debug log messages. This is really only intended for debugging. It could produce a lot of data.
def remove_hairs_from_tags(dom): """ Use :func:`.remove_hairs` to some of the tags: - mods:title - mods:placeTerm """ transform_content( dom.match("mods:mods", "mods:titleInfo", "mods:title"), lambda x: remove_hairs(x.getContent()) ) transform_content( dom.match( "mods:originInfo", "mods:place", ["mods:placeTerm", {"type": "text"}] ), lambda x: remove_hairs(x.getContent()) )
Use :func:`.remove_hairs` to some of the tags: - mods:title - mods:placeTerm
def start_hq(output_dir, config, topic, is_master=True, **kwargs): """Start a HQ """ HightQuarter = get_hq_class(config.get('hq_class')) hq = HightQuarter(output_dir, config, topic, **kwargs) hq.setup() if is_master: hq.wait_turrets(config.get("min_turrets", 1)) hq.run() hq.tear_down()
Start a HQ
def __deserialize_model(self, data, klass): """ Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object. """ instance = klass() if not instance.swagger_types: return data for attr, attr_type in iteritems(instance.swagger_types): if data is not None \ and instance.attribute_map[attr] in data\ and isinstance(data, (list, dict)): value = data[instance.attribute_map[attr]] setattr(instance, '_' + attr, self.__deserialize(value, attr_type)) return instance
Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object.
def phase_to_color_wheel(complex_number): """Map a phase of a complexnumber to a color in (r,g,b). complex_number is phase is first mapped to angle in the range [0, 2pi] and then to a color wheel with blue at zero phase. """ angles = np.angle(complex_number) angle_round = int(((angles + 2 * np.pi) % (2 * np.pi))/np.pi*6) color_map = { 0: (0, 0, 1), # blue, 1: (0.5, 0, 1), # blue-violet 2: (1, 0, 1), # violet 3: (1, 0, 0.5), # red-violet, 4: (1, 0, 0), # red 5: (1, 0.5, 0), # red-oranage, 6: (1, 1, 0), # orange 7: (0.5, 1, 0), # orange-yellow 8: (0, 1, 0), # yellow, 9: (0, 1, 0.5), # yellow-green, 10: (0, 1, 1), # green, 11: (0, 0.5, 1) # green-blue, } return color_map[angle_round]
Map a phase of a complexnumber to a color in (r,g,b). complex_number is phase is first mapped to angle in the range [0, 2pi] and then to a color wheel with blue at zero phase.
def _parse_coroutine(self): """ Parser state machine. Every 'yield' expression returns the next byte. """ while True: d = yield if d == int2byte(0): pass # NOP # Go to state escaped. elif d == IAC: d2 = yield if d2 == IAC: self.received_data(d2) # Handle simple commands. elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA): self.command_received(d2, None) # Handle IAC-[DO/DONT/WILL/WONT] commands. elif d2 in (DO, DONT, WILL, WONT): d3 = yield self.command_received(d2, d3) # Subnegotiation elif d2 == SB: # Consume everything until next IAC-SE data = [] while True: d3 = yield if d3 == IAC: d4 = yield if d4 == SE: break else: data.append(d4) else: data.append(d3) self.negotiate(b''.join(data)) else: self.received_data(d)
Parser state machine. Every 'yield' expression returns the next byte.
def apply_tasks_to_issue(self, tasks, issue_number, issue_body): """Applies task numbers to an issue.""" issue_body = issue_body task_numbers = format_task_numbers_with_links(tasks) if task_numbers: new_body = ASANA_SECTION_RE.sub('', issue_body) new_body = new_body + "\n## Asana Tasks:\n\n%s" % task_numbers put("issue_edit", issue_number=issue_number, body=new_body) return new_body return issue_body
Applies task numbers to an issue.
def overloaded_build(type_, add_name=None): """Factory for constant transformers that apply to a given build instruction. Parameters ---------- type_ : type The object type to overload the construction of. This must be one of "buildable" types, or types with a "BUILD_*" instruction. add_name : str, optional The suffix of the instruction tha adds elements to the collection. For example: 'add' or 'append' Returns ------- transformer : subclass of CodeTransformer A new code transformer class that will overload the provided literal types. """ typename = type_.__name__ instrname = 'BUILD_' + typename.upper() dict_ = OrderedDict( __doc__=dedent( """ A CodeTransformer for overloading {name} instructions. """.format(name=instrname) ) ) try: build_instr = getattr(instructions, instrname) except AttributeError: raise TypeError("type %s is not buildable" % typename) if add_name is not None: try: add_instr = getattr( instructions, '_'.join((typename, add_name)).upper(), ) except AttributeError: TypeError("type %s is not addable" % typename) dict_['_start_comprehension'] = pattern( build_instr, matchany[var], add_instr, )(_start_comprehension) dict_['_return_value'] = pattern( instructions.RETURN_VALUE, startcodes=(IN_COMPREHENSION,), )(_return_value) else: add_instr = None dict_['_build'] = pattern(build_instr)(_build) if not typename.endswith('s'): typename = typename + 's' return type( 'overloaded_' + typename, (overloaded_constants(type_),), dict_, )
Factory for constant transformers that apply to a given build instruction. Parameters ---------- type_ : type The object type to overload the construction of. This must be one of "buildable" types, or types with a "BUILD_*" instruction. add_name : str, optional The suffix of the instruction tha adds elements to the collection. For example: 'add' or 'append' Returns ------- transformer : subclass of CodeTransformer A new code transformer class that will overload the provided literal types.
def add_apt_key(filename=None, url=None, keyid=None, keyserver='subkeys.pgp.net', update=False): """ Trust packages signed with this public key. Example:: import burlap # Varnish signing key from URL and verify fingerprint) burlap.deb.add_apt_key(keyid='C4DEFFEB', url='http://repo.varnish-cache.org/debian/GPG-key.txt') # Nginx signing key from default key server (subkeys.pgp.net) burlap.deb.add_apt_key(keyid='7BD9BF62') # From custom key server burlap.deb.add_apt_key(keyid='7BD9BF62', keyserver='keyserver.ubuntu.com') # From a file burlap.deb.add_apt_key(keyid='7BD9BF62', filename='nginx.asc' """ if keyid is None: if filename is not None: run_as_root('apt-key add %(filename)s' % locals()) elif url is not None: run_as_root('wget %(url)s -O - | apt-key add -' % locals()) else: raise ValueError('Either filename, url or keyid must be provided as argument') else: if filename is not None: _check_pgp_key(filename, keyid) run_as_root('apt-key add %(filename)s' % locals()) elif url is not None: tmp_key = '/tmp/tmp.burlap.key.%(keyid)s.key' % locals() run_as_root('wget %(url)s -O %(tmp_key)s' % locals()) _check_pgp_key(tmp_key, keyid) run_as_root('apt-key add %(tmp_key)s' % locals()) else: keyserver_opt = '--keyserver %(keyserver)s' % locals() if keyserver is not None else '' run_as_root('apt-key adv %(keyserver_opt)s --recv-keys %(keyid)s' % locals()) if update: update_index()
Trust packages signed with this public key. Example:: import burlap # Varnish signing key from URL and verify fingerprint) burlap.deb.add_apt_key(keyid='C4DEFFEB', url='http://repo.varnish-cache.org/debian/GPG-key.txt') # Nginx signing key from default key server (subkeys.pgp.net) burlap.deb.add_apt_key(keyid='7BD9BF62') # From custom key server burlap.deb.add_apt_key(keyid='7BD9BF62', keyserver='keyserver.ubuntu.com') # From a file burlap.deb.add_apt_key(keyid='7BD9BF62', filename='nginx.asc'
def get_flashed_messages( with_categories: bool=False, category_filter: List[str]=[], ) -> Union[List[str], List[Tuple[str, str]]]: """Retrieve the flashed messages stored in the session. This is mostly useful in templates where it is exposed as a global function, for example .. code-block:: html+jinja <ul> {% for message in get_flashed_messages() %} <li>{{ message }}</li> {% endfor %} </ul> Note that caution is required for usage of ``category_filter`` as all messages will be popped, but only those matching the filter returned. See :func:`~quart.helpers.flash` for message creation. """ flashes = session.pop('_flashes') if '_flashes' in session else [] if category_filter: flashes = [flash for flash in flashes if flash[0] in category_filter] if not with_categories: flashes = [flash[1] for flash in flashes] return flashes
Retrieve the flashed messages stored in the session. This is mostly useful in templates where it is exposed as a global function, for example .. code-block:: html+jinja <ul> {% for message in get_flashed_messages() %} <li>{{ message }}</li> {% endfor %} </ul> Note that caution is required for usage of ``category_filter`` as all messages will be popped, but only those matching the filter returned. See :func:`~quart.helpers.flash` for message creation.
def authenticate(self, request): """Authenticate the user, requiring a logged-in account and CSRF. This is exactly the same as the `SessionAuthentication` implementation, with the `user.is_active` check removed. Args: request (HttpRequest) Returns: Tuple of `(user, token)` Raises: PermissionDenied: The CSRF token check failed. """ # Get the underlying HttpRequest object request = request._request # pylint: disable=protected-access user = getattr(request, 'user', None) # Unauthenticated, CSRF validation not required # This is where regular `SessionAuthentication` checks that the user is active. # We have removed that check in this implementation. # But we added a check to prevent anonymous users since we require a logged-in account. if not user or user.is_anonymous: return None self.enforce_csrf(request) # CSRF passed with authenticated user return (user, None)
Authenticate the user, requiring a logged-in account and CSRF. This is exactly the same as the `SessionAuthentication` implementation, with the `user.is_active` check removed. Args: request (HttpRequest) Returns: Tuple of `(user, token)` Raises: PermissionDenied: The CSRF token check failed.
def _compute_quantile(data, dims, cutoffs): """Helper method for stretch_linear. Dask delayed functions need to be non-internal functions (created inside a function) to be serializable on a multi-process scheduler. Quantile requires the data to be loaded since it not supported on dask arrays yet. """ # numpy doesn't get a 'quantile' function until 1.15 # for better backwards compatibility we use xarray's version data_arr = xr.DataArray(data, dims=dims) # delayed will provide us the fully computed xarray with ndarray left, right = data_arr.quantile([cutoffs[0], 1. - cutoffs[1]], dim=['x', 'y']) logger.debug("Interval: left=%s, right=%s", str(left), str(right)) return left.data, right.data
Helper method for stretch_linear. Dask delayed functions need to be non-internal functions (created inside a function) to be serializable on a multi-process scheduler. Quantile requires the data to be loaded since it not supported on dask arrays yet.
def stop_and_persist(self, symbol=' ', text=None): """Stops the spinner and persists the final frame to be shown. Parameters ---------- symbol : str, optional Symbol to be shown in final frame text: str, optional Text to be shown in final frame Returns ------- self """ if not self._enabled: return self symbol = decode_utf_8_text(symbol) if text is not None: text = decode_utf_8_text(text) else: text = self._text['original'] text = text.strip() if self._text_color: text = colored_frame(text, self._text_color) self.stop() output = '\r{0} {1}\n'.format(*[ (text, symbol) if self._placement == 'right' else (symbol, text) ][0]) with self.output: self.output.outputs = self._output(output)
Stops the spinner and persists the final frame to be shown. Parameters ---------- symbol : str, optional Symbol to be shown in final frame text: str, optional Text to be shown in final frame Returns ------- self
def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options env = None if hasattr(self, "_home_dir") and self._home_dir: env = {'ROUGE_EVAL_HOME': self._home_dir} self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command, env=env).decode("UTF-8") return rouge_output
Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string.
def join(self, other, how='left', lsuffix='', rsuffix=''): """ Join items with other Panel either on major and minor axes column. Parameters ---------- other : Panel or list of Panels Index should be similar to one of the columns in this one how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns Returns ------- joined : Panel """ from pandas.core.reshape.concat import concat if isinstance(other, Panel): join_major, join_minor = self._get_join_index(other, how) this = self.reindex(major=join_major, minor=join_minor) other = other.reindex(major=join_major, minor=join_minor) merged_data = this._data.merge(other._data, lsuffix, rsuffix) return self._constructor(merged_data) else: if lsuffix or rsuffix: raise ValueError('Suffixes not supported when passing ' 'multiple panels') if how == 'left': how = 'outer' join_axes = [self.major_axis, self.minor_axis] elif how == 'right': raise ValueError('Right join not supported with multiple ' 'panels') else: join_axes = None return concat([self] + list(other), axis=0, join=how, join_axes=join_axes, verify_integrity=True)
Join items with other Panel either on major and minor axes column. Parameters ---------- other : Panel or list of Panels Index should be similar to one of the columns in this one how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns Returns ------- joined : Panel
def parse(cls, fptr, offset, length): """Parse JPEG 2000 color specification box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ColourSpecificationBox Instance of the current colour specification box. """ num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) lst = struct.unpack_from('>BBB', read_buffer, offset=0) method, precedence, approximation = lst if method == 1: # enumerated colour space colorspace, = struct.unpack_from('>I', read_buffer, offset=3) if colorspace not in _COLORSPACE_MAP_DISPLAY.keys(): msg = "Unrecognized colorspace ({colorspace})." msg = msg.format(colorspace=colorspace) warnings.warn(msg, UserWarning) icc_profile = None else: # ICC profile colorspace = None if (num_bytes - 3) < 128: msg = ("ICC profile header is corrupt, length is " "only {length} when it should be at least 128.") warnings.warn(msg.format(length=num_bytes - 3), UserWarning) icc_profile = None else: profile = _ICCProfile(read_buffer[3:]) icc_profile = profile.header return cls(method=method, precedence=precedence, approximation=approximation, colorspace=colorspace, icc_profile=icc_profile, length=length, offset=offset)
Parse JPEG 2000 color specification box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ColourSpecificationBox Instance of the current colour specification box.
def read_folder(folder, ext='*', uppercase=False, replace_dot='.', parent=''): """ This will read all of the files in the folder with the extension equal to ext :param folder: str of the folder name :param ext: str of the extension :param uppercase: bool if True will uppercase all the file names :param replace_dot: str will replace "." in the filename :param parent: str of the parent folder :return: dict of basename with the value of the text in the file """ ret = {} if os.path.exists(folder): for file in os.listdir(folder): if os.path.isdir(os.path.join(folder, file)): child = read_folder(os.path.join(folder, file), ext, uppercase, replace_dot, parent=parent + file + '/') ret.update(child) else: if ext == '*' or file.endswith(ext): key = file.replace('.', replace_dot) key = uppercase and key.upper() or key ret[parent + key] = read_file(os.path.join(folder, file)) return ret
This will read all of the files in the folder with the extension equal to ext :param folder: str of the folder name :param ext: str of the extension :param uppercase: bool if True will uppercase all the file names :param replace_dot: str will replace "." in the filename :param parent: str of the parent folder :return: dict of basename with the value of the text in the file
def stderr_output(cmd): """Wraps the execution of check_output in a way that ignores stderr when not in debug mode""" handle, gpg_stderr = stderr_handle() try: output = subprocess.check_output(cmd, stderr=gpg_stderr) # nosec if handle: handle.close() return str(polite_string(output)) except subprocess.CalledProcessError as exception: LOGGER.debug("GPG Command %s", ' '.join(exception.cmd)) LOGGER.debug("GPG Output %s", exception.output) raise CryptoritoError('GPG Execution')
Wraps the execution of check_output in a way that ignores stderr when not in debug mode
def _apply_worksheet_template_reference_analyses(self, wst, type='all'): """ Add reference analyses to worksheet according to the worksheet template layout passed in. Does not overwrite slots that are already filled. :param wst: worksheet template used as the layout """ if type == 'all': self._apply_worksheet_template_reference_analyses(wst, 'b') self._apply_worksheet_template_reference_analyses(wst, 'c') return if type not in ['b', 'c']: return references = self._resolve_reference_samples(wst, type) for reference in references: slot = reference['slot'] sample = reference['sample'] services = reference['supported_services'] self.addReferenceAnalyses(sample, services, slot)
Add reference analyses to worksheet according to the worksheet template layout passed in. Does not overwrite slots that are already filled. :param wst: worksheet template used as the layout
def _adb(self, commands): """Call the adb executable from the SDK, passing the given commands as arguments.""" ctx = self.ctx ctx.prepare_build_environment(user_sdk_dir=self.sdk_dir, user_ndk_dir=self.ndk_dir, user_android_api=self.android_api, user_ndk_api=self.ndk_api) if platform in ('win32', 'cygwin'): adb = sh.Command(join(ctx.sdk_dir, 'platform-tools', 'adb.exe')) else: adb = sh.Command(join(ctx.sdk_dir, 'platform-tools', 'adb')) info_notify('Starting adb...') output = adb(*commands, _iter=True, _out_bufsize=1, _err_to_out=True) for line in output: sys.stdout.write(line) sys.stdout.flush()
Call the adb executable from the SDK, passing the given commands as arguments.
def hrmint(xvals, yvals, x): """ Evaluate a Hermite interpolating polynomial at a specified abscissa value. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/hrmint_c.html :param xvals: Abscissa values. :type xvals: Array of floats :param yvals: Ordinate and derivative values. :type yvals: Array of floats :param x: Point at which to interpolate the polynomial. :type x: int :return: Interpolated function value at x and the Interpolated function's derivative at x :rtype: tuple """ work = stypes.emptyDoubleVector(int(2*len(yvals)+1)) n = ctypes.c_int(len(xvals)) xvals = stypes.toDoubleVector(xvals) yvals = stypes.toDoubleVector(yvals) x = ctypes.c_double(x) f = ctypes.c_double(0) df = ctypes.c_double(0) libspice.hrmint_c(n, xvals, yvals, x, work, f, df) return f.value, df.value
Evaluate a Hermite interpolating polynomial at a specified abscissa value. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/hrmint_c.html :param xvals: Abscissa values. :type xvals: Array of floats :param yvals: Ordinate and derivative values. :type yvals: Array of floats :param x: Point at which to interpolate the polynomial. :type x: int :return: Interpolated function value at x and the Interpolated function's derivative at x :rtype: tuple
def min_time(self): """ Get the `~astropy.time.Time` time of the tmoc first observation Returns ------- min_time : `astropy.time.Time` time of the first observation """ min_time = Time(self._interval_set.min / TimeMOC.DAY_MICRO_SEC, format='jd', scale='tdb') return min_time
Get the `~astropy.time.Time` time of the tmoc first observation Returns ------- min_time : `astropy.time.Time` time of the first observation
def master_primary_name(self) -> Optional[str]: """ Return the name of the primary node of the master instance """ master_primary_name = self.master_replica.primaryName if master_primary_name: return self.master_replica.getNodeName(master_primary_name) return None
Return the name of the primary node of the master instance
def get_callproc_signature(self, name, param_types): """Returns a procedure's signature from the name and list of types. :name: the name of the procedure :params: can be either strings, or 2-tuples. 2-tuples must be of the form (name, db_type). :return: the procedure's signature """ if isinstance(param_types[0], (list, tuple)): params = [self.sql_writer.to_placeholder(*pt) for pt in param_types] else: params = [self.sql_writer.to_placeholder(None, pt) for pt in param_types] return name + self.sql_writer.to_tuple(params)
Returns a procedure's signature from the name and list of types. :name: the name of the procedure :params: can be either strings, or 2-tuples. 2-tuples must be of the form (name, db_type). :return: the procedure's signature
def main(argv=None): """to install and/or test from the command line use:: python cma.py [options | func dim sig0 [optkey optval][optkey optval]...] with options being ``--test`` (or ``-t``) to run the doctest, ``--test -v`` to get (much) verbosity. ``install`` to install cma.py (uses setup from distutils.core). ``--doc`` for more infos. Or start Python or (even better) ``ipython`` and:: import cma cma.main('--test') help(cma) help(cma.fmin) res = fmin(cma.fcts.rosen, 10 * [0], 1) cma.plot() Examples ======== Testing with the local python distribution from a command line in a folder where ``cma.py`` can be found:: python cma.py --test And a single run on the Rosenbrock function:: python cma.py rosen 10 1 # dimension initial_sigma python cma.py plot In the python shell:: import cma cma.main('--test') """ if argv is None: argv = sys.argv # should have better been sys.argv[1:] else: if isinstance(argv, list): argv = ['python'] + argv # see above else: argv = ['python'] + [argv] # uncomment for unit test # _test() # handle input arguments, getopt might be helpful ;-) if len(argv) >= 1: # function and help if len(argv) == 1 or argv[1].startswith('-h') or argv[1].startswith('--help'): print(main.__doc__) fun = None elif argv[1].startswith('-t') or argv[1].startswith('--test'): import doctest if len(argv) > 2 and (argv[2].startswith('--v') or argv[2].startswith('-v')): # verbose print('doctest for cma.py: due to different platforms and python versions') print('and in some cases due to a missing unique random seed') print('many examples will "fail". This is OK, if they give a similar') print('to the expected result and if no exception occurs. ') # if argv[1][2] == 'v': doctest.testmod(sys.modules[__name__], report=True) # this is quite cool! else: # was: if len(argv) > 2 and (argv[2].startswith('--qu') or argv[2].startswith('-q')): print('doctest for cma.py: launching...') # not anymore: (it might be necessary to close the pop up window to finish) fn = '_cma_doctest_.txt' stdout = sys.stdout try: with open(fn, 'w') as f: sys.stdout = f clock = ElapsedTime() doctest.testmod(sys.modules[__name__], report=True) # this is quite cool! t_elapsed = clock() finally: sys.stdout = stdout process_doctest_output(fn) # clean up try: import os for name in os.listdir('.'): if (name.startswith('bound_method_FitnessFunctions.rosen_of_cma.FitnessFunctions_object_at_') and name.endswith('.pkl')): os.remove(name) except: pass print('doctest for cma.py: finished (no other output should be seen after launching, more in file _cma_doctest_.txt)') print(' elapsed time [s]:', t_elapsed) return elif argv[1] == '--doc': print(__doc__) print(CMAEvolutionStrategy.__doc__) print(fmin.__doc__) fun = None elif argv[1] == '--fcts': print('List of valid function names:') print([d for d in dir(fcts) if not d.startswith('_')]) fun = None elif argv[1] in ('install', '--install'): from distutils.core import setup setup(name="cma", long_description=__doc__, version=__version__.split()[0], description="CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python", author="Nikolaus Hansen", author_email="hansen at lri.fr", maintainer="Nikolaus Hansen", maintainer_email="hansen at lri.fr", url="https://www.lri.fr/~hansen/cmaes_inmatlab.html#python", license="BSD", classifiers = [ "Intended Audience :: Science/Research", "Intended Audience :: Education", "Intended Audience :: Other Audience", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Operating System :: OS Independent", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Development Status :: 4 - Beta", "Environment :: Console", "License :: OSI Approved :: BSD License", # "License :: OSI Approved :: MIT License", ], keywords=["optimization", "CMA-ES", "cmaes"], py_modules=["cma"], requires=["numpy"], ) fun = None elif argv[1] in ('plot',): plot(name=argv[2] if len(argv) > 2 else None) raw_input('press return') fun = None elif len(argv) > 3: fun = eval('fcts.' + argv[1]) else: print('try -h option') fun = None if fun is not None: if len(argv) > 2: # dimension x0 = np.ones(eval(argv[2])) if len(argv) > 3: # sigma sig0 = eval(argv[3]) opts = {} for i in xrange(5, len(argv), 2): opts[argv[i - 1]] = eval(argv[i]) # run fmin if fun is not None: tic = time.time() fmin(fun, x0, sig0, opts) # ftarget=1e-9, tolfacupx=1e9, verb_log=10) # plot() # print ' best function value ', res[2]['es'].best[1] print('elapsed time [s]: + %.2f', round(time.time() - tic, 2)) elif not len(argv): fmin(fcts.elli, np.ones(6) * 0.1, 0.1, {'ftarget':1e-9})
to install and/or test from the command line use:: python cma.py [options | func dim sig0 [optkey optval][optkey optval]...] with options being ``--test`` (or ``-t``) to run the doctest, ``--test -v`` to get (much) verbosity. ``install`` to install cma.py (uses setup from distutils.core). ``--doc`` for more infos. Or start Python or (even better) ``ipython`` and:: import cma cma.main('--test') help(cma) help(cma.fmin) res = fmin(cma.fcts.rosen, 10 * [0], 1) cma.plot() Examples ======== Testing with the local python distribution from a command line in a folder where ``cma.py`` can be found:: python cma.py --test And a single run on the Rosenbrock function:: python cma.py rosen 10 1 # dimension initial_sigma python cma.py plot In the python shell:: import cma cma.main('--test')
def mv(hdfs_src, hdfs_dst): """Move a file on hdfs :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful """ cmd = "hadoop fs -mv %s %s" % (hdfs_src, hdfs_dst) rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
Move a file on hdfs :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful
def _generate_initial_score(self): """Runs the evaluation function for the initial pose.""" self.current_energy = self.eval_fn(self.polypeptide, *self.eval_args) self.best_energy = copy.deepcopy(self.current_energy) self.best_model = copy.deepcopy(self.polypeptide) return
Runs the evaluation function for the initial pose.
def filename(self): """Filename of the attachment, without the full 'attachment' path.""" if self.value and 'value' in self._json_data and self._json_data['value']: return self._json_data['value'].split('/')[-1] return None
Filename of the attachment, without the full 'attachment' path.
def _check_auth(self, must_admin, redir_login=True): """ check if a user is autheticated and, optionnaly an administrator if user not authenticated -> redirect to login page (with escaped URL of the originaly requested page (redirection after login) if user authenticated, not admin and must_admin enabled -> 403 error @boolean must_admin: flag "user must be an administrator to access this page" @rtype str: login of the user """ if self.auth_mode == 'none': return 'anonymous' username = self._check_session() if cherrypy.request.query_string == '': qs = '' else: qs = '?' + cherrypy.request.query_string # Escaped version of the requested URL quoted_requrl = quote_plus(cherrypy.url() + qs) if not username: # return to login page (with quoted url in query string) if redir_login: raise cherrypy.HTTPRedirect( "/signin?url=%(url)s" % {'url': quoted_requrl}, ) else: raise cherrypy.HTTPError( "403 Forbidden", "You must be logged in to access this ressource.", ) if 'connected' not in cherrypy.session \ or not cherrypy.session['connected']: if redir_login: raise cherrypy.HTTPRedirect( "/signin?url=%(url)s" % {'url': quoted_requrl}, ) else: raise cherrypy.HTTPError( "403 Forbidden", "You must be logged in to access this ressource.", ) if cherrypy.session['connected'] and \ not cherrypy.session['isadmin']: if must_admin: # user is not an administrator, so he gets 403 Forbidden raise cherrypy.HTTPError( "403 Forbidden", "You are not allowed to access this resource.", ) else: return username if cherrypy.session['connected'] and \ cherrypy.session['isadmin']: return username else: if redir_login: raise cherrypy.HTTPRedirect( "/signin?url=%(url)s" % {'url': quoted_requrl}, ) else: raise cherrypy.HTTPError( "403 Forbidden", "You must be logged in to access this ressource.", )
check if a user is autheticated and, optionnaly an administrator if user not authenticated -> redirect to login page (with escaped URL of the originaly requested page (redirection after login) if user authenticated, not admin and must_admin enabled -> 403 error @boolean must_admin: flag "user must be an administrator to access this page" @rtype str: login of the user
def _get_simple(self, name): """ Query the stack for a non-dotted name. """ for item in reversed(self._stack): result = _get_value(item, name) if result is not _NOT_FOUND: return result raise KeyNotFoundError(name, "part missing")
Query the stack for a non-dotted name.
def get_next_non_summer_term(term): """ Return the Term object for the quarter after as the given term (skip the summer quarter) """ next_term = get_term_after(term) if next_term.is_summer_quarter(): return get_next_autumn_term(next_term) return next_term
Return the Term object for the quarter after as the given term (skip the summer quarter)
def list_(): ''' Get a list of automatically running programs CLI Example: .. code-block:: bash salt '*' autoruns.list ''' autoruns = {} # Find autoruns in registry keys = ['HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run', 'HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run /reg:64', 'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run' ] for key in keys: autoruns[key] = [] cmd = ['reg', 'query', key] for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines(): if line and line[0:4] != "HKEY" and line[0:5] != "ERROR": # Remove junk lines autoruns[key].append(line) # Find autoruns in user's startup folder user_dir = 'C:\\Documents and Settings\\' startup_dir = '\\Start Menu\\Programs\\Startup' full_dirs = _get_dirs(user_dir, startup_dir) if not full_dirs: user_dir = 'C:\\Users\\' startup_dir = '\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup' full_dirs = _get_dirs(user_dir, startup_dir) for full_dir in full_dirs: files = os.listdir(full_dir) autoruns[full_dir] = [] for single_file in files: autoruns[full_dir].append(single_file) return autoruns
Get a list of automatically running programs CLI Example: .. code-block:: bash salt '*' autoruns.list
def accuracy(self): """ Calculates the accuracy of the tree by comparing the model predictions to the dataset (TP + TN) / (TP + TN + FP + FN) == (T / (T + F)) """ sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr]) return float((self.model_predictions() == sub_observed).sum()) / self.data_size
Calculates the accuracy of the tree by comparing the model predictions to the dataset (TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
def _invalid_frame(fobj): """Select valid stack frame to process.""" fin = fobj.f_code.co_filename invalid_module = any([fin.endswith(item) for item in _INVALID_MODULES_LIST]) return invalid_module or (not os.path.isfile(fin))
Select valid stack frame to process.
def get_gravatar(email, size=80, default='identicon'): """ Get's a Gravatar for a email address. :param size: The size in pixels of one side of the Gravatar's square image. Optional, if not supplied will default to ``80``. :param default: Defines what should be displayed if no image is found for this user. Optional argument which defaults to ``identicon``. The argument can be a URI to an image or one of the following options: ``404`` Do not load any image if none is associated with the email hash, instead return an HTTP 404 (File Not Found) response. ``mm`` Mystery-man, a simple, cartoon-style silhouetted outline of a person (does not vary by email hash). ``identicon`` A geometric pattern based on an email hash. ``monsterid`` A generated 'monster' with different colors, faces, etc. ``wavatar`` Generated faces with differing features and backgrounds :return: The URI pointing to the Gravatar. """ if userena_settings.USERENA_MUGSHOT_GRAVATAR_SECURE: base_url = 'https://secure.gravatar.com/avatar/' else: base_url = '//www.gravatar.com/avatar/' gravatar_url = '%(base_url)s%(gravatar_id)s?' % \ {'base_url': base_url, 'gravatar_id': md5(email.lower().encode('utf-8')).hexdigest()} gravatar_url += urlencode({ 's': str(size), 'd': default }) return gravatar_url
Get's a Gravatar for a email address. :param size: The size in pixels of one side of the Gravatar's square image. Optional, if not supplied will default to ``80``. :param default: Defines what should be displayed if no image is found for this user. Optional argument which defaults to ``identicon``. The argument can be a URI to an image or one of the following options: ``404`` Do not load any image if none is associated with the email hash, instead return an HTTP 404 (File Not Found) response. ``mm`` Mystery-man, a simple, cartoon-style silhouetted outline of a person (does not vary by email hash). ``identicon`` A geometric pattern based on an email hash. ``monsterid`` A generated 'monster' with different colors, faces, etc. ``wavatar`` Generated faces with differing features and backgrounds :return: The URI pointing to the Gravatar.
def get_main_version(version): "Returns main version (X.Y[.Z]) from VERSION." parts = 2 if version[2] == 0 else 3 return '.'.join(str(x) for x in version[:parts])
Returns main version (X.Y[.Z]) from VERSION.
def sequence_id_factory(value, datatype_cls, validation_level=None): """ Creates a :class:`SI <hl7apy.base_datatypes.SI>` object The value in input can be a string representing an integer number or an ``int``. (i.e. a string valid for ``int()`` ). If it's not, a :exc:`ValueError` is raised Also an empty string or ``None`` are allowed :type value: ``str`` or ``None`` :param value: the value to assign the date object :type datatype_cls: `class` :param value: the SI class to use. It has to be loaded from one implementation of the different version modules :type validation_level: ``int`` :param validation_level: It must be a value from class :class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value :rtype: :class:`SI <hl7apy.base_datatypes.SI>` """ if not value: return datatype_cls(validation_level=validation_level) try: return datatype_cls(int(value), validation_level=validation_level) except ValueError: raise ValueError('{0} is not an HL7 valid SI value'.format(value))
Creates a :class:`SI <hl7apy.base_datatypes.SI>` object The value in input can be a string representing an integer number or an ``int``. (i.e. a string valid for ``int()`` ). If it's not, a :exc:`ValueError` is raised Also an empty string or ``None`` are allowed :type value: ``str`` or ``None`` :param value: the value to assign the date object :type datatype_cls: `class` :param value: the SI class to use. It has to be loaded from one implementation of the different version modules :type validation_level: ``int`` :param validation_level: It must be a value from class :class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value :rtype: :class:`SI <hl7apy.base_datatypes.SI>`
def add_report_data(list_all=[], module_name="TestModule", **kwargs): ''' add report data to a list @param list_all: a list which save the report data @param module_name: test set name or test module name @param kwargs: such as case_name: testcase name status: test result, Pass or Fail resp_tester: responsible tester who write this case tester: tester who execute the test start_at: tester run this case at time end_at: tester stop this case at time ''' start_at = kwargs.get("start_at") case_name = kwargs.get("case_name","TestCase") raw_case_name = kwargs.get("raw_case_name","TestCase") exec_date_time = time.localtime(start_at) execdate = time.strftime("%Y-%m-%d",exec_date_time) exectime = time.strftime("%H:%M:%S",exec_date_time) _case_report = { 'resp_tester': kwargs.get("resp_tester","administrator"), 'tester': kwargs.get("tester","administrator"), 'case_name': case_name, 'raw_case_name': raw_case_name, 'status': kwargs.get("status","Pass"), 'exec_date': execdate, 'exec_time': exectime, 'start_at': start_at, 'end_at': kwargs.get("end_at"), } for module in list_all: if module_name != module["Name"]: continue for case in module["TestCases"]: if raw_case_name == case["raw_case_name"]: case.update(_case_report) return list_all module["TestCases"].append(_case_report) return list_all list_all.append({"Name": module_name, "TestCases": [_case_report]}) return list_all
add report data to a list @param list_all: a list which save the report data @param module_name: test set name or test module name @param kwargs: such as case_name: testcase name status: test result, Pass or Fail resp_tester: responsible tester who write this case tester: tester who execute the test start_at: tester run this case at time end_at: tester stop this case at time
def set_values(self, choice_ids): """assume choice_ids is a list of choiceIds, like ["57978959cdfc5c42eefb36d1", "57978959cdfc5c42eefb36d0", "57978959cdfc5c42eefb36cf", "57978959cdfc5c42eefb36ce"] """ # if not self.my_osid_object._my_map['choices']: # raise IllegalState() organized_choices = [] for choice_id in choice_ids: choice_obj = [c for c in self._original_choice_order if c['id'] == choice_id][0] organized_choices.append(choice_obj) self.my_osid_object._my_map['choices'] = organized_choices
assume choice_ids is a list of choiceIds, like ["57978959cdfc5c42eefb36d1", "57978959cdfc5c42eefb36d0", "57978959cdfc5c42eefb36cf", "57978959cdfc5c42eefb36ce"]
def kill_all(self): """kill all slaves and reap the monitor """ for pid in self.children: try: os.kill(pid, signal.SIGTRAP) except OSError: continue self.join()
kill all slaves and reap the monitor
def clear_data(self): """Removes the content data. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ if (self.get_data_metadata().is_read_only() or self.get_data_metadata().is_required()): raise errors.NoAccess() if self._my_map['data'] == self._data_default: return dbase = JSONClientValidated('repository', runtime=self._runtime).raw() filesys = gridfs.GridFS(dbase) filesys.delete(self._my_map['data']) self._my_map['data'] = self._data_default del self._my_map['base64']
Removes the content data. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
def iter_components(self): """Iterate over all defined components yielding IOTile objects.""" names = self.list_components() for name in names: yield self.get_component(name)
Iterate over all defined components yielding IOTile objects.
def channels_rename(self, room_id, name, **kwargs): """Changes the name of the channel.""" return self.__call_api_post('channels.rename', roomId=room_id, name=name, kwargs=kwargs)
Changes the name of the channel.
def place(vertices_resources, nets, machine, constraints, random=default_random): """A random placer. This algorithm performs uniform-random placement of vertices (completely ignoring connectivty) and thus in the general case is likely to produce very poor quality placements. It exists primarily as a baseline comparison for placement quality and is probably of little value to most users. Parameters ---------- random : :py:class:`random.Random` Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`. """ # Within the algorithm we modify the resource availability values in the # machine to account for the effects of the current placement. As a result, # an internal copy of the structure must be made. machine = machine.copy() # {vertex: (x, y), ...} gives the location of all vertices, updated # throughout the function. placements = {} # Handle constraints vertices_resources, nets, constraints, substitutions = \ apply_same_chip_constraints(vertices_resources, nets, constraints) for constraint in constraints: if isinstance(constraint, LocationConstraint): # Location constraints are handled by recording the set of fixed # vertex locations and subtracting their resources from the chips # they're allocated to. location = constraint.location if location not in machine: raise InvalidConstraintError( "Chip requested by {} unavailable".format(machine)) vertex = constraint.vertex # Record the constrained vertex's location placements[vertex] = location # Make sure the vertex fits at the requested location (updating the # resource availability after placement) resources = vertices_resources[vertex] machine[location] = subtract_resources(machine[location], resources) if overallocated(machine[location]): raise InsufficientResourceError( "Cannot meet {}".format(constraint)) elif isinstance(constraint, # pragma: no branch ReserveResourceConstraint): apply_reserve_resource_constraint(machine, constraint) # The set of vertices which have not been constrained. movable_vertices = [v for v in vertices_resources if v not in placements] locations = set(machine) for vertex in movable_vertices: # Keep choosing random chips until we find one where the vertex fits. while True: if len(locations) == 0: raise InsufficientResourceError( "Ran out of chips while attempting to place vertex " "{}".format(vertex)) location = random.sample(locations, 1)[0] resources_if_placed = subtract_resources( machine[location], vertices_resources[vertex]) if overallocated(resources_if_placed): # The vertex won't fit on this chip, we'll assume it is full # and not try it in the future. locations.remove(location) else: # The vertex fits: record the resources consumed and move on to # the next vertex. placements[vertex] = location machine[location] = resources_if_placed break finalise_same_chip_constraints(substitutions, placements) return placements
A random placer. This algorithm performs uniform-random placement of vertices (completely ignoring connectivty) and thus in the general case is likely to produce very poor quality placements. It exists primarily as a baseline comparison for placement quality and is probably of little value to most users. Parameters ---------- random : :py:class:`random.Random` Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`.
def _sra_download_worker(*args): """A worker to download SRA files. To be used with multiprocessing. """ gsm = args[0][0] email = args[0][1] dirpath = args[0][2] kwargs = args[0][3] return (gsm.get_accession(), gsm.download_SRA(email, dirpath, **kwargs))
A worker to download SRA files. To be used with multiprocessing.
def load_config(): """ Load settings from default config and optionally overwrite with config file and commandline parameters (in that order). """ # We start with the default config config = flatten(default_config.DEFAULT_CONFIG) # Read commandline arguments cli_config = flatten(parse_args()) if "configfile" in cli_config: logging.info("Reading config file {}".format(cli_config['configfile'])) configfile = parse_configfile(cli_config['configfile']) config = overwrite_config(config, configfile) # Parameters from commandline take precedence over all others config = overwrite_config(config, cli_config) # Set verbosity level if 'verbose' in config: if config['verbose'] == 1: logging.getLogger().setLevel(logging.INFO) elif config['verbose'] > 1: logging.getLogger().setLevel(logging.DEBUG) return ObjectView(config)
Load settings from default config and optionally overwrite with config file and commandline parameters (in that order).
def webhooks(self): """ Access the webhooks :returns: twilio.rest.messaging.v1.session.webhook.WebhookList :rtype: twilio.rest.messaging.v1.session.webhook.WebhookList """ if self._webhooks is None: self._webhooks = WebhookList(self._version, session_sid=self._solution['sid'], ) return self._webhooks
Access the webhooks :returns: twilio.rest.messaging.v1.session.webhook.WebhookList :rtype: twilio.rest.messaging.v1.session.webhook.WebhookList
def getrdfdata(): """Downloads Project Gutenberg RDF catalog. Yields: xml.etree.ElementTree.Element: An etext meta-data definition. """ if not os.path.exists(RDFFILES): _, _ = urllib.urlretrieve(RDFURL, RDFFILES) with tarfile.open(RDFFILES) as archive: for tarinfo in archive: yield ElementTree.parse(archive.extractfile(tarinfo))
Downloads Project Gutenberg RDF catalog. Yields: xml.etree.ElementTree.Element: An etext meta-data definition.
def get_instance(self, contract_name: str) -> None: """ Fetches a contract instance belonging to deployment after validating contract name. """ self._validate_name_and_references(contract_name) # Use a deployment's "contract_type" to lookup contract factory # in case the deployment uses a contract alias contract_type = self.deployment_data[contract_name]["contract_type"] factory = self.contract_factories[contract_type] address = to_canonical_address(self.deployment_data[contract_name]["address"]) contract_kwargs = { "abi": factory.abi, "bytecode": factory.bytecode, "bytecode_runtime": factory.bytecode_runtime, } return self.w3.eth.contract(address=address, **contract_kwargs)
Fetches a contract instance belonging to deployment after validating contract name.
def load(self, prof_name): """ Load the profile with the given name. :param str prof_name: Profile name. :rtype: ProfileStub :return: An stub to loaded profile. """ prof_dir = self.__profile_dir(prof_name) prof_ini_path = self.__profile_ini_path(prof_dir) if not os.path.exists(prof_ini_path): msg = "Profile '{}' does not exist" raise Exception(msg.format(prof_name)) # Load profile prof_ini_file = open(prof_ini_path, "r") prof_ini = configparser.ConfigParser() prof_ini.read_file(prof_ini_file) prof_ini_file.close() # Prepare profile prof_type = prof_ini["profile"]["type"] prof_stub = self.__profile_stub(prof_name, prof_type, prof_dir) prof_stub.prepare(prof_ini["properties"]) return prof_stub
Load the profile with the given name. :param str prof_name: Profile name. :rtype: ProfileStub :return: An stub to loaded profile.
def replace(self, src): "Given some source html substitute and annotated as applicable" for html in self.substitutions.keys(): if src == html: annotation = self.annotation % self.substitutions[src][1] return annotation + self.substitutions[src][0] return src
Given some source html substitute and annotated as applicable
def get_user(self, user_id=None, username=None, email=None): """ Returns the user specified by either ID, username or email. Since more than user can have the same email address, searching by that term will return a list of 1 or more User objects. Searching by username or ID will return a single User. If a user_id that doesn't belong to the current account is searched for, a Forbidden exception is raised. When searching by username or email, a NotFound exception is raised if there is no matching user. """ if user_id: uri = "/users/%s" % user_id elif username: uri = "/users?name=%s" % username elif email: uri = "/users?email=%s" % email else: raise ValueError("You must include one of 'user_id', " "'username', or 'email' when calling get_user().") resp, resp_body = self.method_get(uri) if resp.status_code == 404: raise exc.NotFound("No such user exists.") users = resp_body.get("users", []) if users: return [User(self, user) for user in users] else: user = resp_body.get("user", {}) if user: return User(self, user) else: raise exc.NotFound("No such user exists.")
Returns the user specified by either ID, username or email. Since more than user can have the same email address, searching by that term will return a list of 1 or more User objects. Searching by username or ID will return a single User. If a user_id that doesn't belong to the current account is searched for, a Forbidden exception is raised. When searching by username or email, a NotFound exception is raised if there is no matching user.
def assertFileExists(self, filename, msg=None): '''Fail if ``filename`` does not exist as determined by ``os.path.isfile(filename)``. Parameters ---------- filename : str, bytes msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used. ''' standardMsg = '%s does not exist' % filename if not os.path.isfile(filename): self.fail(self._formatMessage(msg, standardMsg))
Fail if ``filename`` does not exist as determined by ``os.path.isfile(filename)``. Parameters ---------- filename : str, bytes msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used.
def _load_yaml_config(path=None): """Open and return the yaml contents.""" furious_yaml_path = path or find_furious_yaml() if furious_yaml_path is None: logging.debug("furious.yaml not found.") return None with open(furious_yaml_path) as yaml_file: return yaml_file.read()
Open and return the yaml contents.
def load_obo_file(self, obo_file, optional_attrs, load_obsolete, prt): """Read obo file. Store results.""" reader = OBOReader(obo_file, optional_attrs) # Save alt_ids and their corresponding main GO ID. Add to GODag after populating GO Terms alt2rec = {} for rec in reader: # Save record if: # 1) Argument load_obsolete is True OR # 2) Argument load_obsolete is False and the GO term is "live" (not obsolete) if load_obsolete or not rec.is_obsolete: self[rec.item_id] = rec for alt in rec.alt_ids: alt2rec[alt] = rec # Save the typedefs and parsed optional_attrs # self.optobj = reader.optobj self.typedefs = reader.typedefs self._populate_terms(reader.optobj) self._set_level_depth(reader.optobj) # Add alt_ids to go2obj for goid_alt, rec in alt2rec.items(): self[goid_alt] = rec desc = self._str_desc(reader) if prt is not None: prt.write("{DESC}\n".format(DESC=desc)) return desc
Read obo file. Store results.
def _load_zp_mappings(self, file): """ Given a file that defines the mapping between ZFIN-specific EQ definitions and the automatically derived ZP ids, create a mapping here. This may be deprecated in the future :return: """ zp_map = {} LOG.info("Loading ZP-to-EQ mappings") line_counter = 0 with open(file, 'r', encoding="utf-8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (zp_id, zp_label, superterm1_id, subterm1_id, quality_id, modifier, superterm2_id, subterm2_id) = row key = self._make_zpkey( superterm1_id, subterm1_id, quality_id, superterm2_id, subterm2_id, modifier) zp_map[key] = { 'zp_id': zp_id, 'label': zp_label, 'superterm1_id': superterm1_id, 'subterm1_id': subterm1_id, 'quality_id': quality_id, 'modifier': modifier, 'superterm2_id': superterm2_id, 'subterm2_id': subterm2_id, } LOG.info("Loaded %s zp terms", zp_map.__len__()) return zp_map
Given a file that defines the mapping between ZFIN-specific EQ definitions and the automatically derived ZP ids, create a mapping here. This may be deprecated in the future :return:
def _step(self, theme, direction): """ Traverse the list in the given direction and return the next theme """ if not self.themes: self.reload() # Try to find the starting index key = (theme.source, theme.name) for i, val in enumerate(self.themes): if (val.source, val.name) == key: index = i break else: # If the theme was set from a custom source it might # not be a part of the list returned by list_themes(). self.themes.insert(0, theme) index = 0 index = (index + direction) % len(self.themes) new_theme = self.themes[index] return new_theme
Traverse the list in the given direction and return the next theme
def p_try_statement_1(self, p): """try_statement : TRY block catch""" p[0] = ast.Try(statements=p[2], catch=p[3])
try_statement : TRY block catch
def refresh_balance(self): """ Recalculate self.balance and self.depth based on child node values. """ left_depth = self.left_node.depth if self.left_node else 0 right_depth = self.right_node.depth if self.right_node else 0 self.depth = 1 + max(left_depth, right_depth) self.balance = right_depth - left_depth
Recalculate self.balance and self.depth based on child node values.
def activate_right(self, token): """Make a copy of the received token and call `_activate_right`.""" watchers.MATCHER.debug( "Node <%s> activated right with token %r", self, token) return self._activate_right(token.copy())
Make a copy of the received token and call `_activate_right`.
def _validate_plan(plan): """Validate if given plan is valid based on kafka-cluster-assignment protocols. Validate following parameters: - Correct format of plan - Partition-list should be unique - Every partition of a topic should have same replication-factor - Replicas of a partition should have unique broker-set """ # Validate format of plan if not _validate_format(plan): return False # Verify no duplicate partitions partition_names = [ (p_data['topic'], p_data['partition']) for p_data in plan['partitions'] ] duplicate_partitions = [ partition for partition, count in six.iteritems(Counter(partition_names)) if count > 1 ] if duplicate_partitions: _log.error( 'Duplicate partitions in plan {p_list}' .format(p_list=duplicate_partitions), ) return False # Verify no duplicate brokers in partition-replicas dup_replica_brokers = [] for p_data in plan['partitions']: dup_replica_brokers = [ broker for broker, count in Counter(p_data['replicas']).items() if count > 1 ] if dup_replica_brokers: _log.error( 'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}' .format( topic=p_data['topic'], p_id=p_data['partition'], replicas=p_data['replicas'], ) ) return False # Verify same replication-factor for every topic topic_replication_factor = {} for partition_info in plan['partitions']: topic = partition_info['topic'] replication_factor = len(partition_info['replicas']) if topic in list(topic_replication_factor.keys()): if topic_replication_factor[topic] != replication_factor: _log.error( 'Mismatch in replication-factor of partitions for topic ' '{topic}'.format(topic=topic), ) return False else: topic_replication_factor[topic] = replication_factor return True
Validate if given plan is valid based on kafka-cluster-assignment protocols. Validate following parameters: - Correct format of plan - Partition-list should be unique - Every partition of a topic should have same replication-factor - Replicas of a partition should have unique broker-set
def _config_from_url(self): """ Manage block configuration from requests.args (url params) """ config = { "name": self._blk.name, "options": {} } for key, value in six.iteritems(request.args): if isinstance(value, list) and len(value) == 1: config["options"][key] = value[0] else: config["options"][key] = value return config
Manage block configuration from requests.args (url params)
def set_attributes(self, doc, fields, parent_type=None): """ Fields are specified as a list so that order is preserved for display purposes only. (Might be used for certain serialization formats...) :param str doc: Description of type. :param list(Field) fields: Ordered list of fields for type. :param Optional[Composite] parent_type: The type this type inherits from. """ self.raw_doc = doc self.doc = doc_unwrap(doc) self.fields = fields self.parent_type = parent_type self._raw_examples = OrderedDict() self._examples = OrderedDict() self._fields_by_name = {} # Dict[str, Field] # Check that no two fields share the same name. for field in self.fields: if field.name in self._fields_by_name: orig_lineno = self._fields_by_name[field.name]._ast_node.lineno raise InvalidSpec("Field '%s' already defined on line %s." % (field.name, orig_lineno), field._ast_node.lineno) self._fields_by_name[field.name] = field # Check that the fields for this type do not match any of the fields of # its parents. cur_type = self.parent_type while cur_type: for field in self.fields: if field.name in cur_type._fields_by_name: lineno = cur_type._fields_by_name[field.name]._ast_node.lineno raise InvalidSpec( "Field '%s' already defined in parent '%s' on line %d." % (field.name, cur_type.name, lineno), field._ast_node.lineno) cur_type = cur_type.parent_type # Import namespaces containing any custom annotations # Note: we don't need to do this for builtin annotations because # they are treated as globals at the IR level for field in self.fields: for annotation in field.custom_annotations: # first, check the annotation *type* if annotation.annotation_type.namespace.name != self.namespace.name: self.namespace.add_imported_namespace( annotation.annotation_type.namespace, imported_annotation_type=True) # second, check if we need to import the annotation itself # the annotation namespace is currently not actually used in the # backends, which reconstruct the annotation from the annotation # type directly. This could be changed in the future, and at # the IR level it makes sense to include the dependency if annotation.namespace.name != self.namespace.name: self.namespace.add_imported_namespace( annotation.namespace, imported_annotation=True) # Indicate that the attributes of the type have been populated. self._is_forward_ref = False
Fields are specified as a list so that order is preserved for display purposes only. (Might be used for certain serialization formats...) :param str doc: Description of type. :param list(Field) fields: Ordered list of fields for type. :param Optional[Composite] parent_type: The type this type inherits from.
def get_package_data(filename, mode='rb'): '''Return the contents of a real file or a zip file.''' if os.path.exists(filename): with open(filename, mode=mode) as in_file: return in_file.read() else: parts = os.path.normpath(filename).split(os.sep) for part, index in zip(parts, range(len(parts))): if part.endswith('.zip'): zip_path = os.sep.join(parts[:index + 1]) member_path = os.sep.join(parts[index + 1:]) break if platform.system() == 'Windows': member_path = member_path.replace('\\', '/') with zipfile.ZipFile(zip_path) as zip_file: return zip_file.read(member_path)
Return the contents of a real file or a zip file.
def render( self, tag, single, between, kwargs ): """Append the actual tags to content.""" out = "<%s" % tag for key, value in list( kwargs.items( ) ): if value is not None: # when value is None that means stuff like <... checked> key = key.strip('_') # strip this so class_ will mean class, etc. if key == 'http_equiv': # special cases, maybe change _ to - overall? key = 'http-equiv' elif key == 'accept_charset': key = 'accept-charset' out = "%s %s=\"%s\"" % ( out, key, escape( value ) ) else: out = "%s %s" % ( out, key ) if between is not None: out = "%s>%s</%s>" % ( out, between, tag ) else: if single: out = "%s />" % out else: out = "%s>" % out if self.parent is not None: self.parent.content.append( out ) else: return out
Append the actual tags to content.
def _get_hd(self, hdr_info): """Open the file, read and get the basic file header info and set the mda dictionary """ hdr_map, variable_length_headers, text_headers = hdr_info with open(self.filename) as fp: total_header_length = 16 while fp.tell() < total_header_length: hdr_id = np.fromfile(fp, dtype=common_hdr, count=1)[0] the_type = hdr_map[hdr_id['hdr_id']] if the_type in variable_length_headers: field_length = int((hdr_id['record_length'] - 3) / the_type.itemsize) current_hdr = np.fromfile(fp, dtype=the_type, count=field_length) key = variable_length_headers[the_type] if key in self.mda: if not isinstance(self.mda[key], list): self.mda[key] = [self.mda[key]] self.mda[key].append(current_hdr) else: self.mda[key] = current_hdr elif the_type in text_headers: field_length = int((hdr_id['record_length'] - 3) / the_type.itemsize) char = list(the_type.fields.values())[0][0].char new_type = np.dtype(char + str(field_length)) current_hdr = np.fromfile(fp, dtype=new_type, count=1)[0] self.mda[text_headers[the_type]] = current_hdr else: current_hdr = np.fromfile(fp, dtype=the_type, count=1)[0] self.mda.update( dict(zip(current_hdr.dtype.names, current_hdr))) total_header_length = self.mda['total_header_length'] self.mda.setdefault('number_of_bits_per_pixel', 10) self.mda['projection_parameters'] = {'a': 6378169.00, 'b': 6356583.80, 'h': 35785831.00, # FIXME: find a reasonable SSP 'SSP_longitude': 0.0} self.mda['navigation_parameters'] = {}
Open the file, read and get the basic file header info and set the mda dictionary
def warp_vrt(directory, delete_extra=False, use_band_map=False, overwrite=False, remove_bqa=True, return_profile=False): """ Read in image geometry, resample subsequent images to same grid. The purpose of this function is to snap many Landsat images to one geometry. Use Landsat578 to download and unzip them, then run them through this to get identical geometries for analysis. Files :param use_band_map: :param delete_extra: :param directory: A directory containing sub-directories of Landsat images. :return: None """ if 'resample_meta.txt' in os.listdir(directory) and not overwrite: print('{} has already had component images warped'.format(directory)) return None mapping = {'LC8': Landsat8, 'LE7': Landsat7, 'LT5': Landsat5} vrt_options = {} list_dir = [x[0] for x in os.walk(directory) if os.path.basename(x[0])[:3] in mapping.keys()] extras = [os.path.join(directory, x) for x in os.listdir(directory) if x.endswith('.tif')] first = True for d in list_dir: sat = LandsatImage(d).satellite paths = extras root = os.path.join(directory, d) if os.path.isdir(root): for x in os.listdir(root): if remove_bqa and x.endswith('BQA.TIF'): try: os.remove(x) except FileNotFoundError: pass elif use_band_map: bands = BandMap().selected for y in bands[sat]: if x.endswith('B{}.TIF'.format(y)): paths.append(os.path.join(directory, d, x)) else: if x.endswith('.TIF') or x.endswith('.tif'): paths.append(os.path.join(directory, d, x)) if x.endswith('MTL.txt'): mtl = os.path.join(directory, d, x) if first: landsat = mapping[sat](os.path.join(directory, d)) dst = landsat.rasterio_geometry vrt_options = {'resampling': Resampling.nearest, 'dst_crs': dst['crs'], 'dst_transform': dst['transform'], 'dst_height': dst['height'], 'dst_width': dst['width']} message = """ This directory has been resampled to same grid. Master grid is {}. {} """.format(d, datetime.now()) with open(os.path.join(directory, 'resample_meta.txt'), 'w') as f: f.write(message) first = False for tif_path in paths: print('warping {}'.format(os.path.basename(tif_path))) with rasopen(tif_path, 'r') as src: with WarpedVRT(src, **vrt_options) as vrt: data = vrt.read() dst_dir, name = os.path.split(tif_path) outfile = os.path.join(dst_dir, name) meta = vrt.meta.copy() meta['driver'] = 'GTiff' with rasopen(outfile, 'w', **meta) as dst: dst.write(data) if delete_extra: for x in os.listdir(os.path.join(directory, d)): x_file = os.path.join(directory, d, x) if x_file not in paths: if x[-7:] not in ['ask.tif', 'MTL.txt']: print('removing {}'.format(x_file)) os.remove(x_file) if return_profile: return dst
Read in image geometry, resample subsequent images to same grid. The purpose of this function is to snap many Landsat images to one geometry. Use Landsat578 to download and unzip them, then run them through this to get identical geometries for analysis. Files :param use_band_map: :param delete_extra: :param directory: A directory containing sub-directories of Landsat images. :return: None
def to_fp32(learn:Learner): "Put `learn` back to FP32 precision mode." learn.data.remove_tfm(batch_to_half) for cb in learn.callbacks: if isinstance(cb, MixedPrecision): learn.callbacks.remove(cb) learn.model = learn.model.float() return learn
Put `learn` back to FP32 precision mode.
def _setTaskParsObj(self, theTask): """ Overridden version for ConfigObj. theTask can be either a .cfg file name or a ConfigObjPars object. """ # Create the ConfigObjPars obj self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask, self._strict, False) # Tell it that we can be used for catching debug lines self._taskParsObj.setDebugLogger(self) # Immediately make a copy of it's un-tampered internal dict. # The dict() method returns a deep-copy dict of the keyvals. self._lastSavedState = self._taskParsObj.dict()
Overridden version for ConfigObj. theTask can be either a .cfg file name or a ConfigObjPars object.
def main_help_text(self, commands_only=False): """ Returns the script's main help text, as a string. """ if commands_only: usage = sorted(get_commands().keys()) else: usage = [ "", "Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name, "", "Available subcommands:", ] commands_dict = collections.defaultdict(lambda: []) for name, app in six.iteritems(get_commands()): if app == 'pug.crawlnmine': app = 'pug' else: app = app.rpartition('.')[-1] commands_dict[app].append(name) style = color_style() for app in sorted(commands_dict.keys()): usage.append("") usage.append(style.NOTICE("[%s]" % app)) for name in sorted(commands_dict[app]): usage.append(" %s" % name) # Output an extra note if settings are not properly configured if self.settings_exception is not None: usage.append(style.NOTICE( "Note that only Django core commands are listed " "as settings are not properly configured (error: %s)." % self.settings_exception)) return '\n'.join(usage)
Returns the script's main help text, as a string.
def sendfrom(self, user_id, dest_address, amount, minconf=1): """ Send coins from user's account. Args: user_id (str): this user's unique identifier dest_address (str): address which is to receive coins amount (str or Decimal): amount to send (eight decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: str: transaction ID """ amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN) txhash = self.rpc.call("sendfrom", user_id, dest_address, float(str(amount)), minconf ) self.logger.debug("Send %s %s from %s to %s" % (str(amount), self.coin, str(user_id), dest_address)) self.logger.debug("Transaction hash: %s" % txhash) return txhash
Send coins from user's account. Args: user_id (str): this user's unique identifier dest_address (str): address which is to receive coins amount (str or Decimal): amount to send (eight decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: str: transaction ID
def _has_name(soup_obj): """checks if soup_obj is really a soup object or just a string If it has a name it is a soup object""" try: name = soup_obj.name if name == None: return False return True except AttributeError: return False
checks if soup_obj is really a soup object or just a string If it has a name it is a soup object
def do_init_fields(self, flist): """ Initialize each fields of the fields_desc dict """ for f in flist: self.default_fields[f.name] = copy.deepcopy(f.default) self.fieldtype[f.name] = f if f.holds_packets: self.packetfields.append(f)
Initialize each fields of the fields_desc dict
def get_rank(): """ Gets distributed rank or returns zero if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): rank = torch.distributed.get_rank() else: rank = 0 return rank
Gets distributed rank or returns zero if distributed is not initialized.
def make_child_of(self, chunk): """ Link one YAML chunk to another. Used when inserting a chunk of YAML into another chunk. """ if self.is_mapping(): for key, value in self.contents.items(): self.key(key, key).pointer.make_child_of(chunk.pointer) self.val(key).make_child_of(chunk) elif self.is_sequence(): for index, item in enumerate(self.contents): self.index(index).make_child_of(chunk) else: self.pointer.make_child_of(chunk.pointer)
Link one YAML chunk to another. Used when inserting a chunk of YAML into another chunk.
def _FormatSubjectOrProcessToken(self, token_data): """Formats a subject or process token as a dictionary of values. Args: token_data (bsm_token_data_subject32|bsm_token_data_subject64): AUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token data. Returns: dict[str, str]: token values. """ ip_address = self._FormatPackedIPv4Address(token_data.ip_address) return { 'aid': token_data.audit_user_identifier, 'euid': token_data.effective_user_identifier, 'egid': token_data.effective_group_identifier, 'uid': token_data.real_user_identifier, 'gid': token_data.real_group_identifier, 'pid': token_data.process_identifier, 'session_id': token_data.session_identifier, 'terminal_port': token_data.terminal_port, 'terminal_ip': ip_address}
Formats a subject or process token as a dictionary of values. Args: token_data (bsm_token_data_subject32|bsm_token_data_subject64): AUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token data. Returns: dict[str, str]: token values.
def get_agent_queue(self, queue_id, project=None, action_filter=None): """GetAgentQueue. [Preview API] Get information about an agent queue. :param int queue_id: The agent queue to get information about :param str project: Project ID or project name :param str action_filter: Filter by whether the calling user has use or manage permissions :rtype: :class:`<TaskAgentQueue> <azure.devops.v5_1.task-agent.models.TaskAgentQueue>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if queue_id is not None: route_values['queueId'] = self._serialize.url('queue_id', queue_id, 'int') query_parameters = {} if action_filter is not None: query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str') response = self._send(http_method='GET', location_id='900fa995-c559-4923-aae7-f8424fe4fbea', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TaskAgentQueue', response)
GetAgentQueue. [Preview API] Get information about an agent queue. :param int queue_id: The agent queue to get information about :param str project: Project ID or project name :param str action_filter: Filter by whether the calling user has use or manage permissions :rtype: :class:`<TaskAgentQueue> <azure.devops.v5_1.task-agent.models.TaskAgentQueue>`
def sort_index( self, axis=0, level=None, ascending=True, inplace=False, kind="quicksort", na_position="last", sort_remaining=True, by=None, ): """Sort a DataFrame by one of the indices (columns or index). Args: axis: The axis to sort over. level: The MultiIndex level to sort over. ascending: Ascending or descending inplace: Whether or not to update this DataFrame inplace. kind: How to perform the sort. na_position: Where to position NA on the sort. sort_remaining: On Multilevel Index sort based on all levels. by: (Deprecated) argument to pass to sort_values. Returns: A sorted DataFrame """ axis = self._get_axis_number(axis) if level is not None: new_query_compiler = self._default_to_pandas( "sort_index", axis=axis, level=level, ascending=ascending, inplace=False, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ) return self._create_or_update_from_compiler(new_query_compiler, inplace) if by is not None: warnings.warn( "by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2, ) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) new_query_compiler = self._query_compiler.sort_index( axis=axis, ascending=ascending, kind=kind, na_position=na_position ) if inplace: self._update_inplace(new_query_compiler=new_query_compiler) else: return self.__constructor__(query_compiler=new_query_compiler)
Sort a DataFrame by one of the indices (columns or index). Args: axis: The axis to sort over. level: The MultiIndex level to sort over. ascending: Ascending or descending inplace: Whether or not to update this DataFrame inplace. kind: How to perform the sort. na_position: Where to position NA on the sort. sort_remaining: On Multilevel Index sort based on all levels. by: (Deprecated) argument to pass to sort_values. Returns: A sorted DataFrame