positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def cmyk_to_rgb(Class, c, m, y, k): """CMYK in % to RGB in 0-255 based on https://www.openprocessing.org/sketch/46231# """ c = float(c)/100.0 m = float(m)/100.0 y = float(y)/100.0 k = float(k)/100.0 nc = (c * (1-k)) + k nm = (m * (1-k)) + k ny = (y * (1-k)) + k r = int((1-nc) * 255) g = int((1-nm) * 255) b = int((1-ny) * 255) return dict(r=r, g=g, b=b)
CMYK in % to RGB in 0-255 based on https://www.openprocessing.org/sketch/46231#
def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: int) -> EpochInfo: """ Create Epoch info """ raise NotImplementedError
Create Epoch info
def create_subscriber(self): '''Create a subscriber instance using specified addresses and message types. ''' if self.subscriber is None: if self.topics: self.subscriber = NSSubscriber(self.services, self.topics, addr_listener=True, addresses=self.addresses, nameserver=self.nameserver) self.recv = self.subscriber.start().recv
Create a subscriber instance using specified addresses and message types.
def _consolidate(self, inplace=False): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Parameters ---------- inplace : boolean, default False If False return new object, otherwise modify existing object Returns ------- consolidated : same type as caller """ inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: self._consolidate_inplace() else: f = lambda: self._data.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self)
Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Parameters ---------- inplace : boolean, default False If False return new object, otherwise modify existing object Returns ------- consolidated : same type as caller
def matrix(self): """The current calibration matrix for this device. Returns: (bool, (float, float, float, float, float, float)): :obj:`False` if no calibration is set and the returned matrix is the identity matrix, :obj:`True` otherwise. :obj:`tuple` representing the first two rows of a 3x3 matrix as described in :meth:`set_matrix`. """ matrix = (c_float * 6)() rc = self._libinput.libinput_device_config_calibration_get_matrix( self._handle, matrix) return rc, tuple(matrix)
The current calibration matrix for this device. Returns: (bool, (float, float, float, float, float, float)): :obj:`False` if no calibration is set and the returned matrix is the identity matrix, :obj:`True` otherwise. :obj:`tuple` representing the first two rows of a 3x3 matrix as described in :meth:`set_matrix`.
def restart(self, soft=False): """ Restarts client. If soft is True, the client attempts to re-subscribe to all channels which it was previously subscribed to. :return: """ log.info("BitfinexWSS.restart(): Restarting client..") super(BitfinexWSS, self).restart() # cache channel labels temporarily if soft == True channel_labels = [self.channel_labels[k] for k in self.channel_labels] if soft else None # clear previous channel caches self.channels = {} self.channel_labels = {} self.channel_states = {} if channel_labels: # re-subscribe to channels for channel_name, kwargs in channel_labels: self._subscribe(channel_name, **kwargs)
Restarts client. If soft is True, the client attempts to re-subscribe to all channels which it was previously subscribed to. :return:
def add_edge(self, x, y, label=None): """Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None`` """ self.adjacency_list[x].append((y, label)) # multiple edges are allowed, so be careful if x not in self.reverse_list[y]: self.reverse_list[y].append(x)
Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None``
def increment(self, size: int): '''Increment the number of files downloaded. Args: size: The size of the file ''' assert size >= 0, size self.files += 1 self.size += size self.bandwidth_meter.feed(size)
Increment the number of files downloaded. Args: size: The size of the file
def _save(self): """ save the cache index, in case it was modified. Saves the index table and the file name repository in the file `index.dat` """ if self.__modified_flag: self.__filename_rep.update_id_counter() indexfilename = os.path.join(self.__dir, "index.dat") self._write_file( indexfilename, (self.__index, self.__filename_rep)) self.__modified_flag = False
save the cache index, in case it was modified. Saves the index table and the file name repository in the file `index.dat`
def clear_from(self, timestamp): """Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary""" block_size = self.config.block_size offset, remainder = timestamp // block_size, timestamp % block_size if remainder: raise ValueError('Timestamp must be on a block boundary') self.driver.clear_from(offset, timestamp)
Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary
def initiate_migration(self): """ Initiates a pending migration that is already scheduled for this Linode Instance """ self._client.post('{}/migrate'.format(Instance.api_endpoint), model=self)
Initiates a pending migration that is already scheduled for this Linode Instance
def cross(triangles): """ Returns the cross product of two edges from input triangles Parameters -------------- triangles: (n, 3, 3) float Vertices of triangles Returns -------------- crosses : (n, 3) float Cross product of two edge vectors """ vectors = np.diff(triangles, axis=1) crosses = np.cross(vectors[:, 0], vectors[:, 1]) return crosses
Returns the cross product of two edges from input triangles Parameters -------------- triangles: (n, 3, 3) float Vertices of triangles Returns -------------- crosses : (n, 3) float Cross product of two edge vectors
async def get_creds(self) -> dict: """ Gets the credentials from a disclosed proof Example: msg_id = '1' phone_number = '8019119191' connection = await Connection.create(source_id) await connection.connect(phone_number) disclosed_proof = await DisclosedProof.create_with_msgid(source_id, connection, msg_id) creds = await disclosed_proof.get_creds() :return: credentials """ if not hasattr(DisclosedProof.get_creds, "cb"): self.logger.debug("vcx_disclosed_proof_retrieve_credentials: Creating callback") DisclosedProof.get_creds.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p)) c_disclosed_proof_handle = c_uint32(self.handle) data = await do_call('vcx_disclosed_proof_retrieve_credentials', c_disclosed_proof_handle, DisclosedProof.get_creds.cb) return json.loads(data.decode())
Gets the credentials from a disclosed proof Example: msg_id = '1' phone_number = '8019119191' connection = await Connection.create(source_id) await connection.connect(phone_number) disclosed_proof = await DisclosedProof.create_with_msgid(source_id, connection, msg_id) creds = await disclosed_proof.get_creds() :return: credentials
def backup_db(self): """ " Generate a xxxxx.backup.json. """ with self.db_mutex: if os.path.exists(self.json_db_path): try: shutil.copy2(self.json_db_path, self.backup_json_db_path) except (IOError, OSError): _logger.debug("*** No file to copy.")
" Generate a xxxxx.backup.json.
def owns_data_key(self, data_key): """Determines if data_key object is owned by this RawMasterKey. :param data_key: Data key to evaluate :type data_key: :class:`aws_encryption_sdk.structures.DataKey`, :class:`aws_encryption_sdk.structures.RawDataKey`, or :class:`aws_encryption_sdk.structures.EncryptedDataKey` :returns: Boolean statement of ownership :rtype: bool """ expected_key_info_len = -1 if ( self.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.ASYMMETRIC and data_key.key_provider == self.key_provider ): return True elif self.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.SYMMETRIC: expected_key_info_len = ( len(self._key_info_prefix) + self.config.wrapping_key.wrapping_algorithm.algorithm.iv_len ) if ( data_key.key_provider.provider_id == self.provider_id and len(data_key.key_provider.key_info) == expected_key_info_len and data_key.key_provider.key_info.startswith(self._key_info_prefix) ): return True _LOGGER.debug( ( "RawMasterKey does not own data_key: %s\n" "Expected provider_id: %s\n" "Expected key_info len: %s\n" "Expected key_info prefix: %s" ), data_key, self.provider_id, expected_key_info_len, self._key_info_prefix, ) return False
Determines if data_key object is owned by this RawMasterKey. :param data_key: Data key to evaluate :type data_key: :class:`aws_encryption_sdk.structures.DataKey`, :class:`aws_encryption_sdk.structures.RawDataKey`, or :class:`aws_encryption_sdk.structures.EncryptedDataKey` :returns: Boolean statement of ownership :rtype: bool
def get_documents(self, subtypes=None, refresh=False): """Return list of author's publications using ScopusSearch, which fit a specified set of document subtypes. """ search = ScopusSearch('au-id({})'.format(self.identifier), refresh) if subtypes: return [p for p in search.results if p.subtype in subtypes] else: return search.results
Return list of author's publications using ScopusSearch, which fit a specified set of document subtypes.
def search(query, tld='com', lang='en', num=10, start=0, stop=None, pause=2.0, only_standard=False): """ Search the given query string using Google. @type query: str @param query: Query string. Must NOT be url-encoded. @type tld: str @param tld: Top level domain. @type lang: str @param lang: Languaje. @type num: int @param num: Number of results per page. @type start: int @param start: First result to retrieve. @type stop: int @param stop: Last result to retrieve. Use C{None} to keep searching forever. @type pause: float @param pause: Lapse to wait between HTTP requests. A lapse too long will make the search slow, but a lapse too short may cause Google to block your IP. Your mileage may vary! @type only_standard: bool @param only_standard: If C{True}, only returns the standard results from each page. If C{False}, it returns every possible link from each page, except for those that point back to Google itself. Defaults to C{False} for backwards compatibility with older versions of this module. @rtype: generator @return: Generator (iterator) that yields found URLs. If the C{stop} parameter is C{None} the iterator will loop forever. """ # Lazy import of BeautifulSoup. # Try to use BeautifulSoup 4 if available, fall back to 3 otherwise. global BeautifulSoup if BeautifulSoup is None: try: from bs4 import BeautifulSoup except ImportError: from BeautifulSoup import BeautifulSoup # Set of hashes for the results found. # This is used to avoid repeated results. hashes = set() # Prepare the search string. query = quote_plus(query) # Grab the cookie from the home page. get_page(url_home % vars()) # Prepare the URL of the first request. if start: if num == 10: url = url_next_page % vars() else: url = url_next_page_num % vars() else: if num == 10: url = url_search % vars() else: url = url_search_num % vars() # Loop until we reach the maximum result, if any (otherwise, loop forever). while not stop or start < stop: # Sleep between requests. time.sleep(pause) # Request the Google Search results page. html = get_page(url) # Parse the response and process every anchored URL. soup = BeautifulSoup(html) anchors = soup.find(id='search').findAll('a') for a in anchors: # Leave only the "standard" results if requested. # Otherwise grab all possible links. if only_standard and ( not a.parent or a.parent.name.lower() != "h3"): continue # Get the URL from the anchor tag. try: link = a['href'] except KeyError: continue # Filter invalid links and links pointing to Google itself. link = filter_result(link) if not link: continue # Discard repeated results. h = hash(link) if h in hashes: continue hashes.add(h) # Yield the result. yield link # End if there are no more results. if not soup.find(id='nav'): break # Prepare the URL for the next request. start += num if num == 10: url = url_next_page % vars() else: url = url_next_page_num % vars()
Search the given query string using Google. @type query: str @param query: Query string. Must NOT be url-encoded. @type tld: str @param tld: Top level domain. @type lang: str @param lang: Languaje. @type num: int @param num: Number of results per page. @type start: int @param start: First result to retrieve. @type stop: int @param stop: Last result to retrieve. Use C{None} to keep searching forever. @type pause: float @param pause: Lapse to wait between HTTP requests. A lapse too long will make the search slow, but a lapse too short may cause Google to block your IP. Your mileage may vary! @type only_standard: bool @param only_standard: If C{True}, only returns the standard results from each page. If C{False}, it returns every possible link from each page, except for those that point back to Google itself. Defaults to C{False} for backwards compatibility with older versions of this module. @rtype: generator @return: Generator (iterator) that yields found URLs. If the C{stop} parameter is C{None} the iterator will loop forever.
def get_group_tokens(root): """Function to extract tokens in hyphenated groups (saunameheks-tallimeheks). Parameters ---------- root: str The root form. Returns ------- list of (list of str) List of grouped root tokens. """ global all_markers if root in all_markers or root in ['-', '_']: # special case return [[root]] groups = [] for group in root.split('-'): toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_')] groups.append(toks) return groups
Function to extract tokens in hyphenated groups (saunameheks-tallimeheks). Parameters ---------- root: str The root form. Returns ------- list of (list of str) List of grouped root tokens.
def load(cls, path, prefix, network=None): r""" Load data from the \'dat\' files located in specified folder. Parameters ---------- path : string The full path to the folder containing the set of \'dat\' files. prefix : string The file name prefix on each file. The data files are stored as \<prefix\>_node1.dat. network : OpenPNM Network Object If given then the data will be loaded on it and returned. If not given, a Network will be created and returned. Returns ------- An OpenPNM Project containing a GenericNetwork holding all the data """ net = {} # --------------------------------------------------------------------- # Parse the link1 file path = Path(path) filename = Path(path.resolve(), prefix+'_link1.dat') with open(filename, mode='r') as f: link1 = pd.read_table(filepath_or_buffer=f, header=None, skiprows=1, sep=' ', skipinitialspace=True, index_col=0) link1.columns = ['throat.pore1', 'throat.pore2', 'throat.radius', 'throat.shape_factor', 'throat.total_length'] # Add link1 props to net net['throat.conns'] = sp.vstack((link1['throat.pore1']-1, link1['throat.pore2']-1)).T net['throat.conns'] = sp.sort(net['throat.conns'], axis=1) net['throat.radius'] = sp.array(link1['throat.radius']) net['throat.shape_factor'] = sp.array(link1['throat.shape_factor']) net['throat.total_length'] = sp.array(link1['throat.total_length']) # --------------------------------------------------------------------- filename = Path(path.resolve(), prefix+'_link2.dat') with open(filename, mode='r') as f: link2 = pd.read_table(filepath_or_buffer=f, header=None, sep=' ', skipinitialspace=True, index_col=0) link2.columns = ['throat.pore1', 'throat.pore2', 'throat.pore1_length', 'throat.pore2_length', 'throat.length', 'throat.volume', 'throat.clay_volume'] # Add link2 props to net net['throat.length'] = sp.array(link2['throat.length']) net['throat.volume'] = sp.array(link2['throat.volume']) net['throat.clay_volume'] = sp.array(link2['throat.clay_volume']) # --------------------------------------------------------------------- # Parse the node1 file filename = Path(path.resolve(), prefix+'_node1.dat') with open(filename, mode='r') as f: row_0 = f.readline().split() num_lines = int(row_0[0]) array = sp.ndarray([num_lines, 6]) for i in range(num_lines): row = f.readline()\ .replace('\t', ' ').replace('\n', ' ').split() array[i, :] = row[0:6] node1 = pd.DataFrame(array[:, [1, 2, 3, 4]]) node1.columns = ['pore.x_coord', 'pore.y_coord', 'pore.z_coord', 'pore.coordination_number'] # Add node1 props to net net['pore.coords'] = sp.vstack((node1['pore.x_coord'], node1['pore.y_coord'], node1['pore.z_coord'])).T # --------------------------------------------------------------------- # Parse the node1 file filename = Path(path.resolve(), prefix+'_node2.dat') with open(filename, mode='r') as f: node2 = pd.read_table(filepath_or_buffer=f, header=None, sep=' ', skipinitialspace=True, index_col=0) node2.columns = ['pore.volume', 'pore.radius', 'pore.shape_factor', 'pore.clay_volume'] # Add node2 props to net net['pore.volume'] = sp.array(node2['pore.volume']) net['pore.radius'] = sp.array(node2['pore.radius']) net['pore.shape_factor'] = sp.array(node2['pore.shape_factor']) net['pore.clay_volume'] = sp.array(node2['pore.clay_volume']) if network is None: network = GenericNetwork() network = cls._update_network(network=network, net=net) # Use OpenPNM Tools to clean up network # Trim throats connected to 'inlet' or 'outlet' reservoirs trim1 = sp.where(sp.any(net['throat.conns'] == -1, axis=1))[0] # Apply 'outlet' label to these pores outlets = network['throat.conns'][trim1, 1] network['pore.outlets'] = False network['pore.outlets'][outlets] = True trim2 = sp.where(sp.any(net['throat.conns'] == -2, axis=1))[0] # Apply 'inlet' label to these pores inlets = network['throat.conns'][trim2, 1] network['pore.inlets'] = False network['pore.inlets'][inlets] = True # Now trim the throats to_trim = sp.hstack([trim1, trim2]) trim(network=network, throats=to_trim) return network.project
r""" Load data from the \'dat\' files located in specified folder. Parameters ---------- path : string The full path to the folder containing the set of \'dat\' files. prefix : string The file name prefix on each file. The data files are stored as \<prefix\>_node1.dat. network : OpenPNM Network Object If given then the data will be loaded on it and returned. If not given, a Network will be created and returned. Returns ------- An OpenPNM Project containing a GenericNetwork holding all the data
def screenshot(self): """ Take screenshot with session check Returns: PIL.Image """ b64data = self.http.get('/screenshot').value raw_data = base64.b64decode(b64data) from PIL import Image buff = io.BytesIO(raw_data) return Image.open(buff)
Take screenshot with session check Returns: PIL.Image
def set_runtime_value_bool(self, ihcid: int, value: bool) -> bool: """ Set bool runtime value with re-authenticate if needed""" if self.client.set_runtime_value_bool(ihcid, value): return True self.re_authenticate() return self.client.set_runtime_value_bool(ihcid, value)
Set bool runtime value with re-authenticate if needed
def _unicode_to_native(s): """Convert string from unicode to native format (required in Python 2).""" if six.PY2: return s.encode("utf-8") if isinstance(s, unicode) else s else: return s
Convert string from unicode to native format (required in Python 2).
def set_freq(self, fout, freq): """ Sets new output frequency, required parameters are real current frequency at output and new required frequency. """ hsdiv_tuple = (4, 5, 6, 7, 9, 11) # possible dividers n1div_tuple = (1,) + tuple(range(2,129,2)) # fdco_min = 5670.0 # set maximum as minimum hsdiv = self.get_hs_div() # read curent dividers n1div = self.get_n1_div() # if abs((freq-fout)*1e6/fout) > 3500: # Large change of frequency fdco = fout * hsdiv * n1div # calculate high frequency oscillator fxtal = fdco / self.get_rfreq() # should be fxtal = 114.285 for hsdiv_iter in hsdiv_tuple: # find dividers with minimal power consumption for n1div_iter in n1div_tuple: fdco_new = freq * hsdiv_iter * n1div_iter if (fdco_new >= 4850) and (fdco_new <= 5670): if (fdco_new <= fdco_min): fdco_min = fdco_new hsdiv = hsdiv_iter n1div = n1div_iter rfreq = fdco_min / fxtal self.freeze_dco() # write registers self.set_hs_div(hsdiv) self.set_n1_div(n1div) self.set_rfreq(rfreq) self.unfreeze_dco() self.new_freq() else: # Small change of frequency rfreq = self.get_rfreq() * (freq/fout) self.freeze_m() # write registers self.set_rfreq(rfreq) self.unfreeze_m()
Sets new output frequency, required parameters are real current frequency at output and new required frequency.
def decode(self, initial_state: State, transition_function: TransitionFunction, supervision: SupervisionType) -> Dict[str, torch.Tensor]: """ Takes an initial state object, a means of transitioning from state to state, and a supervision signal, and uses the supervision to train the transition function to pick "good" states. This function should typically return a ``loss`` key during training, which the ``Model`` will use as its loss. Parameters ---------- initial_state : ``State`` This is the initial state for decoding, typically initialized after running some kind of encoder on some inputs. transition_function : ``TransitionFunction`` This is the transition function that scores all possible actions that can be taken in a given state, and returns a ranked list of next states at each step of decoding. supervision : ``SupervisionType`` This is the supervision that is used to train the ``transition_function`` function to pick "good" states. You can use whatever kind of supervision you want (e.g., a single "gold" action sequence, a set of possible "gold" action sequences, a reward function, etc.). We use ``typing.Generics`` to make sure that our static type checker is happy with how you've matched the supervision that you provide in the model to the ``DecoderTrainer`` that you want to use. """ raise NotImplementedError
Takes an initial state object, a means of transitioning from state to state, and a supervision signal, and uses the supervision to train the transition function to pick "good" states. This function should typically return a ``loss`` key during training, which the ``Model`` will use as its loss. Parameters ---------- initial_state : ``State`` This is the initial state for decoding, typically initialized after running some kind of encoder on some inputs. transition_function : ``TransitionFunction`` This is the transition function that scores all possible actions that can be taken in a given state, and returns a ranked list of next states at each step of decoding. supervision : ``SupervisionType`` This is the supervision that is used to train the ``transition_function`` function to pick "good" states. You can use whatever kind of supervision you want (e.g., a single "gold" action sequence, a set of possible "gold" action sequences, a reward function, etc.). We use ``typing.Generics`` to make sure that our static type checker is happy with how you've matched the supervision that you provide in the model to the ``DecoderTrainer`` that you want to use.
def _set_igp_sync(self, v, load=False): """ Setter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container) If this variable is read-only (config: false) in the source YANG file, then _set_igp_sync is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igp_sync() directly. YANG Description: MPLS Rsvp IGP Synchronization information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """igp_sync must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=igp_sync.igp_sync, is_container='container', presence=False, yang_name="igp-sync", rest_name="igp-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""", }) self.__igp_sync = t if hasattr(self, '_set'): self._set()
Setter method for igp_sync, mapped from YANG variable /mpls_state/rsvp/igp_sync (container) If this variable is read-only (config: false) in the source YANG file, then _set_igp_sync is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igp_sync() directly. YANG Description: MPLS Rsvp IGP Synchronization information
def run_main(args: argparse.Namespace, do_exit=True) -> None: """Runs the checks and exits. To extend this tool, use this function and set do_exit to False to get returned the status code. """ if args.init: generate() return None # exit after generate instead of starting to lint handler = CheckHandler( file=args.config_file, out_json=args.json, files=args.files) for style in get_stylers(): handler.run_linter(style()) for linter in get_linters(): handler.run_linter(linter()) for security in get_security(): handler.run_linter(security()) for tool in get_tools(): tool = tool() # Only run pypi if everything else passed if tool.name == "pypi" and handler.status_code != 0: continue handler.run_linter(tool) if do_exit: handler.exit() return handler.status_code
Runs the checks and exits. To extend this tool, use this function and set do_exit to False to get returned the status code.
def validate_config(self, values, argv=None, strict=False): """Validate all config values through the command-line parser. This takes all supplied options (which could have been retrieved from a number of sources (such as CLI, env vars, etc...) and then validates them by running them through argparser (and raises SystemExit on failure). :returns dict: key/values for all config values (from all sources) :raises: SystemExit """ options = [] for option in self._options: kwargs = option.kwargs.copy() if option.name in values: if 'default' in kwargs: # Since we're overriding defaults, we need to # preserve the default value for the help text: help_text = kwargs.get('help') if help_text: if '(default: ' not in help_text: kwargs['help'] = '%s (default: %s)' % ( help_text, kwargs['default'] ) kwargs['default'] = values[option.name] kwargs['required'] = False # since we have a value temp = Option(*option.args, **kwargs) options.append(temp) parser = self.build_parser(options, formatter_class=argparse.HelpFormatter) if argv: parsed, extras = parser.parse_known_args(argv[1:]) if extras: valid, _ = self.parse_passthru_args(argv[1:]) parsed, extras = parser.parse_known_args(valid) if extras and strict: # still self.build_parser(options) parser.parse_args(argv[1:]) else: parsed = parser.parse_args([]) results = vars(parsed) raise_for_group = {} for option in self._options: if option.kwargs.get('required'): if option.dest not in results or results[option.dest] is None: if getattr(option, '_mutexgroup', None): raise_for_group.setdefault(option._mutexgroup, []) raise_for_group[option._mutexgroup].append( option._action) else: raise SystemExit("'%s' is required. See --help " "for more info." % option.name) else: if getattr(option, '_mutexgroup', None): raise_for_group.pop(option._mutexgroup, None) if raise_for_group: optstrings = [str(k.option_strings) for k in raise_for_group.values()[0]] msg = "One of %s required. " % " ,".join(optstrings) raise SystemExit(msg + "See --help for more info.") return results
Validate all config values through the command-line parser. This takes all supplied options (which could have been retrieved from a number of sources (such as CLI, env vars, etc...) and then validates them by running them through argparser (and raises SystemExit on failure). :returns dict: key/values for all config values (from all sources) :raises: SystemExit
def has_entities(status): """ Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API """ try: if sum(len(v) for v in status.entities.values()) > 0: return True except AttributeError: if sum(len(v) for v in status['entities'].values()) > 0: return True return False
Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API
async def officers(self, root): """Regional Officers. Does not include the Founder or the Delegate, unless they have additional titles as Officers. In the correct order. Returns ------- an :class:`ApiQuery` of a list of :class:`Officer` """ officers = sorted( root.find('OFFICERS'), # I struggle to say what else this tag would be useful for. key=lambda elem: int(elem.find('ORDER').text) ) return [Officer(elem) for elem in officers]
Regional Officers. Does not include the Founder or the Delegate, unless they have additional titles as Officers. In the correct order. Returns ------- an :class:`ApiQuery` of a list of :class:`Officer`
def get_starsep_RaDecDeg(ra1_deg, dec1_deg, ra2_deg, dec2_deg): """Calculate separation.""" sep = deltaStarsRaDecDeg(ra1_deg, dec1_deg, ra2_deg, dec2_deg) sgn, deg, mn, sec = degToDms(sep) if deg != 0: txt = '%02d:%02d:%06.3f' % (deg, mn, sec) else: txt = '%02d:%06.3f' % (mn, sec) return txt
Calculate separation.
def prepare(self): """Un-serialize data from data attribute and add instance_id key if necessary :return: None """ # Maybe the Brok is a old daemon one or was already prepared # if so, the data is already ok if hasattr(self, 'prepared') and not self.prepared: self.data = unserialize(self.data) if self.instance_id: self.data['instance_id'] = self.instance_id self.prepared = True
Un-serialize data from data attribute and add instance_id key if necessary :return: None
def runWizard( self ): """ Runs the current wizard. """ plugin = self.currentPlugin() if ( plugin and plugin.runWizard(self) ): self.accept()
Runs the current wizard.
def get_mac_acl_for_intf_input_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf") config = get_mac_acl_for_intf input = ET.SubElement(get_mac_acl_for_intf, "input") interface_type = ET.SubElement(input, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def mosaic_info(name, pretty): '''Get information for a specific mosaic''' cl = clientv1() echo_json_response(call_and_wrap(cl.get_mosaic_by_name, name), pretty)
Get information for a specific mosaic
def _IsHomeDir(self, subject, token): """Checks user access permissions for paths under aff4:/users.""" h = CheckAccessHelper("IsHomeDir") h.Allow("aff4:/users/%s" % token.username) h.Allow("aff4:/users/%s/*" % token.username) try: return h.CheckAccess(subject, token) except access_control.UnauthorizedAccess: raise access_control.UnauthorizedAccess( "User can only access their " "home directory.", subject=subject)
Checks user access permissions for paths under aff4:/users.
def _is_valid_inherit_element(self, element): """ Check that the children of element can be manipulated to apply the CSS properties. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the children of element can be manipulated to apply the CSS properties or False if the children of element cannot be manipulated to apply the CSS properties. :rtype: bool """ # pylint: disable=no-self-use tag_name = element.get_tag_name() return ( (tag_name in AccessibleCSSImplementation.VALID_INHERIT_TAGS) and (not element.has_attribute(CommonFunctions.DATA_IGNORE)) )
Check that the children of element can be manipulated to apply the CSS properties. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: True if the children of element can be manipulated to apply the CSS properties or False if the children of element cannot be manipulated to apply the CSS properties. :rtype: bool
def _normalize_correlation_data(self, corr_data, norm_unit): """Normalize the correlation data if necessary. Fisher-transform and then z-score the data for every norm_unit samples if norm_unit > 1. Parameters ---------- corr_data: the correlation data in shape [num_samples, num_processed_voxels, num_voxels] norm_unit: int the number of samples on which the normalization is performed Returns ------- normalized_corr_data: the normalized correlation data in shape [num_samples, num_voxels, num_voxels] """ # normalize if necessary if norm_unit > 1: num_samples = len(corr_data) [_, d2, d3] = corr_data.shape second_dimension = d2 * d3 # this is a shallow copy normalized_corr_data = corr_data.reshape(1, num_samples, second_dimension) fcma_extension.normalization(normalized_corr_data, norm_unit) normalized_corr_data = normalized_corr_data.reshape(num_samples, d2, d3) logger.debug( 'normalization done' ) else: normalized_corr_data = corr_data return normalized_corr_data
Normalize the correlation data if necessary. Fisher-transform and then z-score the data for every norm_unit samples if norm_unit > 1. Parameters ---------- corr_data: the correlation data in shape [num_samples, num_processed_voxels, num_voxels] norm_unit: int the number of samples on which the normalization is performed Returns ------- normalized_corr_data: the normalized correlation data in shape [num_samples, num_voxels, num_voxels]
def change_user_password(self, ID, data): """Change password of a User.""" # http://teampasswordmanager.com/docs/api-users/#change_password log.info('Change user %s password' % ID) self.put('users/%s/change_password.json' % ID, data)
Change password of a User.
async def set_review(self, **params): """Writes review for content Accepts: - cid - review - public_key - rating - txid - coinid """ if params.get("message"): params = json.loads(params.get("message", "{}")) if not params: return {"error":400, "reason":"Missed required fields"} cid = int(params.get("cid", 0)) txid = params.get("txid") coinid = params.get("coinid") try: coinid = coinid.replace("TEST", "") except: pass # Get content database = client[coinid] content_collection = database[settings.CONTENT] content = await content_collection.find_one({"cid":cid}) if not content: return {"error":404, "reason":"Not found current content"} database = client[coinid] review_collection = database[settings.REVIEW] await review_collection.insert_one({"cid":cid, "confirmed":None, "txid":txid, "coinid":coinid}) return {"result":"ok"}
Writes review for content Accepts: - cid - review - public_key - rating - txid - coinid
def topological_sort(dag): """ topological sort :param dag: directed acyclic graph :type dag: dict .. seealso:: `Topographical Sorting <http://en.wikipedia.org/wiki/Topological_sorting>`_, `Directed Acyclic Graph (DAG) <https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_ """ # find all edges of dag topsort = [node for node, edge in dag.iteritems() if not edge] # loop through nodes until topologically sorted while len(topsort) < len(dag): num_nodes = len(topsort) # number of nodes # unsorted nodes for node in dag.viewkeys() - set(topsort): # nodes with no incoming edges if set(dag[node]) <= set(topsort): topsort.append(node) break # circular dependencies if len(topsort) == num_nodes: raise CircularDependencyError(dag.viewkeys() - set(topsort)) return topsort
topological sort :param dag: directed acyclic graph :type dag: dict .. seealso:: `Topographical Sorting <http://en.wikipedia.org/wiki/Topological_sorting>`_, `Directed Acyclic Graph (DAG) <https://en.wikipedia.org/wiki/Directed_acyclic_graph>`_
def get_gradebook_ids_by_grade_system(self, grade_system_id): """Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``. arg: grade_system_id (osid.id.Id): ``Id`` of a ``GradeSystem`` return: (osid.id.IdList) - list of gradebook ``Ids`` raise: NotFound - ``grade_system_id`` is not found raise: NullArgument - ``grade_system_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bin_ids_by_resource mgr = self._get_provider_manager('GRADING', local=True) lookup_session = mgr.get_grade_system_lookup_session(proxy=self._proxy) lookup_session.use_federated_gradebook_view() grade_system = lookup_session.get_grade_system(grade_system_id) id_list = [] for idstr in grade_system._my_map['assignedGradebookIds']: id_list.append(Id(idstr)) return IdList(id_list)
Gets the list of ``Gradebook`` ``Ids`` mapped to a ``GradeSystem``. arg: grade_system_id (osid.id.Id): ``Id`` of a ``GradeSystem`` return: (osid.id.IdList) - list of gradebook ``Ids`` raise: NotFound - ``grade_system_id`` is not found raise: NullArgument - ``grade_system_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def get_group_node_size(node): """ Shared getter for AddrmapNode and RegfileNode's "size" property """ # After structural placement, children are sorted if( not node.inst.children or (not isinstance(node.inst.children[-1], comp.AddressableComponent)) ): # No addressable child exists. return 0 # Current node's size is based on last child last_child_node = Node._factory(node.inst.children[-1], node.env, node) return( last_child_node.inst.addr_offset + last_child_node.total_size )
Shared getter for AddrmapNode and RegfileNode's "size" property
def mod_root(a, p): """ Return a root of `a' modulo p """ if a == 0: return 0 if not mod_issquare(a, p): raise ValueError n = 2 while mod_issquare(n, p): n += 1 q = p - 1 r = 0 while not q.getbit(r): r += 1 q = q >> r y = pow(n, q, p) h = q >> 1 b = pow(a, h, p) x = (a * b) % p b = (b * x) % p while b != 1: h = (b * b) % p m = 1 while h != 1: h = (h * h) % p m += 1 h = gmpy.mpz(0) h = h.setbit(r - m - 1) t = pow(y, h, p) y = (t * t) % p r = m x = (x * t) % p b = (b * y) % p return x
Return a root of `a' modulo p
def cells(self): """The number of cells in the MOC. This gives the total number of cells at all orders, with cells from every order counted equally. >>> m = MOC(0, (1, 2)) >>> m.cells 2 """ n = 0 for (order, cells) in self: n += len(cells) return n
The number of cells in the MOC. This gives the total number of cells at all orders, with cells from every order counted equally. >>> m = MOC(0, (1, 2)) >>> m.cells 2
def push_all(self, record_shard_pairs): """Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order. :param record_shard_pairs: list of ``(record, shard)`` tuples (see :func:`~bloop.stream.buffer.RecordBuffer.push`). """ # Faster than inserting one at a time; the heap is sorted once after all inserts. for record, shard in record_shard_pairs: item = heap_item(self.clock, record, shard) self.heap.append(item) heapq.heapify(self.heap)
Push multiple (record, shard) pairs at once, with only one :meth:`heapq.heapify` call to maintain order. :param record_shard_pairs: list of ``(record, shard)`` tuples (see :func:`~bloop.stream.buffer.RecordBuffer.push`).
def log_tail(self, nlines=10): """ Return the last ``nlines`` lines of the log file """ log_path = os.path.join(self.working_dir, self.log_name) with open(log_path) as fp: d = collections.deque(maxlen=nlines) d.extend(fp) return ''.join(d)
Return the last ``nlines`` lines of the log file
def _create_default_config_file(self): """ If config file does not exists create and set default values. """ logger.info('Initialize Maya launcher, creating config file...\n') self.add_section(self.DEFAULTS) self.add_section(self.PATTERNS) self.add_section(self.ENVIRONMENTS) self.add_section(self.EXECUTABLES) self.set(self.DEFAULTS, 'executable', None) self.set(self.DEFAULTS, 'environment', None) self.set(self.PATTERNS, 'exclude', ', '.join(self.EXLUDE_PATTERNS)) self.set(self.PATTERNS, 'icon_ext', ', '.join(self.ICON_EXTENSIONS)) self.config_file.parent.mkdir(exist_ok=True) self.config_file.touch() with self.config_file.open('wb') as f: self.write(f) # If this function is run inform the user that a new file has been # created. sys.exit('Maya launcher has successfully created config file at:\n' ' "{}"'.format(str(self.config_file)))
If config file does not exists create and set default values.
def parse_args(): """ Parse the command line arguments """ parser = argparse.ArgumentParser(description = 'Decrypt AEADs', add_help = True, formatter_class = argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Enable verbose operation', ) parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug operation', ) parser.add_argument('--format', dest='format', default='raw', help='Select output format (aead, raw or yubikey-csv)', ) parser.add_argument('--output-dir', dest='output_dir', help='Output dir basename (for --format aead)', metavar='DIR', ) parser.add_argument('--print-filename', dest='print_filename', action='store_true', default=False, help='Prefix each row with the AEAD filename', ) parser.add_argument('--key-handle', dest='key_handle', help='Key handle used when generating the AEADs.', metavar='HANDLE', ) parser.add_argument('--key-handle-out', dest='key_handle_out', help='Key handle used when generating *new* AEADs (with --format aead).', metavar='HANDLE', ) parser.add_argument('--aes-key', dest='aes_key', required=True, help='AES key used when generating the AEADs.', metavar='HEXSTR', ) parser.add_argument('--aes-key-out', dest='aes_key_out', required=False, help='AES key used when generating *new* AEADs (with --format aead).', metavar='HEXSTR', ) parser.add_argument('--start-public-id', dest='start_id', required=False, default=None, help='The first public id to decrypt', metavar='INT-OR-MODHEX', ) parser.add_argument('--stop-public-id', dest='stop_id', required=False, default=None, help='The last public id to decrypt', metavar='INT-OR-MODHEX', ) parser.add_argument('--fail-fast', dest='fail_fast', action='store_true', default=False, help='Terminate on the first AEAD failure, rather than keep going.', ) parser.add_argument('paths', nargs='+', help='Files and/or directories to process.', metavar='FILE-OR-DIR' ) args = parser.parse_args() # argument fixups args.format = args.format.lower() args.aes_key = args.aes_key.decode('hex') if args.key_handle: args.key_handle = pyhsm.util.key_handle_to_int(args.key_handle) if args.start_id is not None: try: n = int(args.start_id) except ValueError: hexstr = pyhsm.yubikey.modhex_decode(args.start_id) n = int(hexstr, 16) args.start_id = n if args.stop_id is not None: try: n = int(args.stop_id) except ValueError: hexstr = pyhsm.yubikey.modhex_decode(args.stop_id) n = int(hexstr, 16) args.stop_id = n # some checks if args.format == 'aead': if not args.output_dir: sys.stderr.write("error: --output-dir is required when using --format aead.\n") return False if not os.path.isdir(args.output_dir): sys.stderr.write("error: Output directory '%s' not found\n" % (args.output_dir)) return False if not args.aes_key_out: sys.stderr.write("error: --aes-key-out is required when using --format aead.\n") return False if not args.key_handle_out: sys.stderr.write("error: --key-handle-out is required when using --format aead.\n") return False # argument fixups args.aes_key_out = args.aes_key_out.decode('hex') args.key_handle_out_orig = args.key_handle_out # save to use in AEAD output paths args.key_handle_out = pyhsm.util.key_handle_to_int(args.key_handle_out) return args
Parse the command line arguments
def __load_settings_from_file(self): """ Loads settings info from the settings json file :returns: True if the settings info is valid :rtype: boolean """ filename = self.get_base_path() + 'settings.json' if not exists(filename): raise OneLogin_Saml2_Error( 'Settings file not found: %s', OneLogin_Saml2_Error.SETTINGS_FILE_NOT_FOUND, filename ) # In the php toolkit instead of being a json file it is a php file and # it is directly included json_data = open(filename, 'r') settings = json.load(json_data) json_data.close() advanced_filename = self.get_base_path() + 'advanced_settings.json' if exists(advanced_filename): json_data = open(advanced_filename, 'r') settings.update(json.load(json_data)) # Merge settings json_data.close() return self.__load_settings_from_dict(settings)
Loads settings info from the settings json file :returns: True if the settings info is valid :rtype: boolean
def _proc_gnusparse_00(self, next, pax_headers, buf): """Process a GNU tar extended sparse header, version 0.0. """ offsets = [] for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): offsets.append(int(match.group(1))) numbytes = [] for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): numbytes.append(int(match.group(1))) next.sparse = list(zip(offsets, numbytes))
Process a GNU tar extended sparse header, version 0.0.
def createProfile(self, profile=None, clearLayout=True): """ Prompts the user to create a new profile. """ if profile: prof = profile elif not self.viewWidget() or clearLayout: prof = XViewProfile() else: prof = self.viewWidget().saveProfile() blocked = self.signalsBlocked() self.blockSignals(False) changed = self.editProfile(prof) self.blockSignals(blocked) if not changed: return act = self.addProfile(prof) act.setChecked(True) # update the interface if self.viewWidget() and (profile or clearLayout): self.viewWidget().restoreProfile(prof) if not self.signalsBlocked(): self.profileCreated.emit(prof) self.profilesChanged.emit()
Prompts the user to create a new profile.
def mask(args): """ %prog mask fastafile Mask the contaminants. By default, this will compare against UniVec_Core and Ecoli.fasta. Merge the contaminant results, and use `maskFastaFromBed`. Can perform FASTA tidy if requested. """ p = OptionParser(mask.__doc__) p.add_option("--db", help="Contaminant db other than Ecoli K12 [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args assert op.exists(fastafile) outfastafile = fastafile.rsplit(".", 1)[0] + ".masked.fasta" vecbedfile = blast([fastafile]) ecoliurl = \ "ftp://ftp.ncbi.nih.gov/genomes/Bacteria/Escherichia_coli_K_12_substr__DH10B_uid58979/NC_010473.fna" ecolifile = opts.db or download(ecoliurl, filename="Ecoli.fasta") assert op.exists(ecolifile) ecolibedfile = blast([fastafile, "--db={0}".format(ecolifile)]) cmd = "cat {0} {1}".format(vecbedfile, ecolibedfile) cmd += " | mergeBed -nms -d 100 -i stdin" cmd += " | maskFastaFromBed -fi {0} -bed stdin -fo {1}".\ format(fastafile, outfastafile) sh(cmd) return tidy([outfastafile])
%prog mask fastafile Mask the contaminants. By default, this will compare against UniVec_Core and Ecoli.fasta. Merge the contaminant results, and use `maskFastaFromBed`. Can perform FASTA tidy if requested.
def orphan_entry(self, rval: RawObject) -> "ArrayEntry": """Return an isolated entry of the receiver. Args: rval: Raw object to be used for the returned entry. """ val = self.entry_from_raw(rval) return ArrayEntry(0, EmptyList(), EmptyList(), val, None, self, val.timestamp)
Return an isolated entry of the receiver. Args: rval: Raw object to be used for the returned entry.
def progress(count, total, suffix=''): ''' Display progress bar sources: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3 ''' bar_len = 60 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) bar = '=' * filled_len + '-' * (bar_len - filled_len) sys.stdout.write('[%s] %s%s %s\r' % (bar, percents, '%', suffix)) sys.stdout.flush()
Display progress bar sources: https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def use_plenary_composition_view(self): """Pass through to provider CompositionLookupSession.use_plenary_composition_view""" self._object_views['composition'] = PLENARY # self._get_provider_session('composition_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_composition_view() except AttributeError: pass
Pass through to provider CompositionLookupSession.use_plenary_composition_view
def get_cdd_only_candidate_models( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col ): """ Return a list of all possible candidate cdd-only models. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns with names of the form ``cdd_<balance_point>``. All columns with names of this form will be used to fit a candidate model. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of cdd-only candidate models, with any associated warnings. """ balance_points = [int(col[4:]) for col in data.columns if col.startswith("cdd")] candidate_models = [ get_single_cdd_only_candidate_model( data, minimum_non_zero_cdd, minimum_total_cdd, beta_cdd_maximum_p_value, weights_col, balance_point, ) for balance_point in balance_points ] return candidate_models
Return a list of all possible candidate cdd-only models. Parameters ---------- data : :any:`pandas.DataFrame` A DataFrame containing at least the column ``meter_value`` and 1 to n columns with names of the form ``cdd_<balance_point>``. All columns with names of this form will be used to fit a candidate model. DataFrames of this form can be made using the :any:`eemeter.create_caltrack_daily_design_matrix` or :any:`eemeter.create_caltrack_billing_design_matrix` methods. minimum_non_zero_cdd : :any:`int` Minimum allowable number of non-zero cooling degree day values. minimum_total_cdd : :any:`float` Minimum allowable total sum of cooling degree day values. beta_cdd_maximum_p_value : :any:`float` The maximum allowable p-value of the beta cdd parameter. weights_col : :any:`str` or None The name of the column (if any) in ``data`` to use as weights. Returns ------- candidate_models : :any:`list` of :any:`CalTRACKUsagePerDayCandidateModel` A list of cdd-only candidate models, with any associated warnings.
def remove_child(self, child): """ Removes a child from this node (parent and child nodes still exit but are no longer connected). """ try: self.children.remove(child) except ValueError as e: raise TreeError("child not found") else: child.up = None return child
Removes a child from this node (parent and child nodes still exit but are no longer connected).
def use_plenary_log_view(self): """Pass through to provider LogEntryLogSession.use_plenary_log_view""" self._log_view = PLENARY # self._get_provider_session('log_entry_log_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_log_view() except AttributeError: pass
Pass through to provider LogEntryLogSession.use_plenary_log_view
def submit_task(self, function_descriptor, args, actor_id=None, actor_handle_id=None, actor_counter=0, actor_creation_id=None, actor_creation_dummy_object_id=None, max_actor_reconstructions=0, execution_dependencies=None, new_actor_handles=None, num_return_vals=None, resources=None, placement_resources=None, driver_id=None): """Submit a remote task to the scheduler. Tell the scheduler to schedule the execution of the function with function_descriptor with arguments args. Retrieve object IDs for the outputs of the function from the scheduler and immediately return them. Args: function_descriptor: The function descriptor to execute. args: The arguments to pass into the function. Arguments can be object IDs or they can be values. If they are values, they must be serializable objects. actor_id: The ID of the actor that this task is for. actor_counter: The counter of the actor task. actor_creation_id: The ID of the actor to create, if this is an actor creation task. actor_creation_dummy_object_id: If this task is an actor method, then this argument is the dummy object ID associated with the actor creation task for the corresponding actor. execution_dependencies: The execution dependencies for this task. num_return_vals: The number of return values this function should have. resources: The resource requirements for this task. placement_resources: The resources required for placing the task. If this is not provided or if it is an empty dictionary, then the placement resources will be equal to resources. driver_id: The ID of the relevant driver. This is almost always the driver ID of the driver that is currently running. However, in the exceptional case that an actor task is being dispatched to an actor created by a different driver, this should be the driver ID of the driver that created the actor. Returns: The return object IDs for this task. """ with profiling.profile("submit_task"): if actor_id is None: assert actor_handle_id is None actor_id = ActorID.nil() actor_handle_id = ActorHandleID.nil() else: assert actor_handle_id is not None if actor_creation_id is None: actor_creation_id = ActorID.nil() if actor_creation_dummy_object_id is None: actor_creation_dummy_object_id = ObjectID.nil() # Put large or complex arguments that are passed by value in the # object store first. args_for_raylet = [] for arg in args: if isinstance(arg, ObjectID): args_for_raylet.append(arg) elif ray._raylet.check_simple_value(arg): args_for_raylet.append(arg) else: args_for_raylet.append(put(arg)) # By default, there are no execution dependencies. if execution_dependencies is None: execution_dependencies = [] if new_actor_handles is None: new_actor_handles = [] if driver_id is None: driver_id = self.task_driver_id if resources is None: raise ValueError("The resources dictionary is required.") for value in resources.values(): assert (isinstance(value, int) or isinstance(value, float)) if value < 0: raise ValueError( "Resource quantities must be nonnegative.") if (value >= 1 and isinstance(value, float) and not value.is_integer()): raise ValueError( "Resource quantities must all be whole numbers.") # Remove any resources with zero quantity requirements resources = { resource_label: resource_quantity for resource_label, resource_quantity in resources.items() if resource_quantity > 0 } if placement_resources is None: placement_resources = {} # Increment the worker's task index to track how many tasks # have been submitted by the current task so far. self.task_context.task_index += 1 # The parent task must be set for the submitted task. assert not self.current_task_id.is_nil() # Current driver id must not be nil when submitting a task. # Because every task must belong to a driver. assert not self.task_driver_id.is_nil() # Submit the task to raylet. function_descriptor_list = ( function_descriptor.get_function_descriptor_list()) assert isinstance(driver_id, DriverID) task = ray._raylet.Task( driver_id, function_descriptor_list, args_for_raylet, num_return_vals, self.current_task_id, self.task_context.task_index, actor_creation_id, actor_creation_dummy_object_id, max_actor_reconstructions, actor_id, actor_handle_id, actor_counter, new_actor_handles, execution_dependencies, resources, placement_resources, ) self.raylet_client.submit_task(task) return task.returns()
Submit a remote task to the scheduler. Tell the scheduler to schedule the execution of the function with function_descriptor with arguments args. Retrieve object IDs for the outputs of the function from the scheduler and immediately return them. Args: function_descriptor: The function descriptor to execute. args: The arguments to pass into the function. Arguments can be object IDs or they can be values. If they are values, they must be serializable objects. actor_id: The ID of the actor that this task is for. actor_counter: The counter of the actor task. actor_creation_id: The ID of the actor to create, if this is an actor creation task. actor_creation_dummy_object_id: If this task is an actor method, then this argument is the dummy object ID associated with the actor creation task for the corresponding actor. execution_dependencies: The execution dependencies for this task. num_return_vals: The number of return values this function should have. resources: The resource requirements for this task. placement_resources: The resources required for placing the task. If this is not provided or if it is an empty dictionary, then the placement resources will be equal to resources. driver_id: The ID of the relevant driver. This is almost always the driver ID of the driver that is currently running. However, in the exceptional case that an actor task is being dispatched to an actor created by a different driver, this should be the driver ID of the driver that created the actor. Returns: The return object IDs for this task.
def restore_repository_from_recycle_bin(self, repository_details, project, repository_id): """RestoreRepositoryFromRecycleBin. [Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable. :param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details: :param str project: Project ID or project name :param str repository_id: The ID of the repository. :rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') content = self._serialize.body(repository_details, 'GitRecycleBinRepositoryDetails') response = self._send(http_method='PATCH', location_id='a663da97-81db-4eb3-8b83-287670f63073', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('GitRepository', response)
RestoreRepositoryFromRecycleBin. [Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable. :param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details: :param str project: Project ID or project name :param str repository_id: The ID of the repository. :rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>`
def grad_and_hess(self): """ Computes self's gradient and Hessian. Used if the optimization method for a NormApprox doesn't use gradients and hessians, for instance fmin. """ for i in xrange(self.len): di = self.diff(i) self.grad[i] = di self.hess[i, i] = self.diff(i, 2) if i < self.len - 1: for j in xrange(i + 1, self.len): dij = self.diff2(i, j) self.hess[i, j] = dij self.hess[j, i] = dij
Computes self's gradient and Hessian. Used if the optimization method for a NormApprox doesn't use gradients and hessians, for instance fmin.
def create_repository(cls, repository_data): """Create a standalone, in-memory repository. Using this function bypasses the `package_repository_manager` singleton. This is usually desired however, since in-memory repositories are for temporarily storing programmatically created packages, which we do not want to cache and that do not persist. Args: repository_data (dict): Repository data, see class docstring. Returns: `MemoryPackageRepository` object. """ location = "memory{%s}" % hex(id(repository_data)) resource_pool = ResourcePool(cache_size=None) repo = MemoryPackageRepository(location, resource_pool) repo.data = repository_data return repo
Create a standalone, in-memory repository. Using this function bypasses the `package_repository_manager` singleton. This is usually desired however, since in-memory repositories are for temporarily storing programmatically created packages, which we do not want to cache and that do not persist. Args: repository_data (dict): Repository data, see class docstring. Returns: `MemoryPackageRepository` object.
def validate(self, uri): """ Check an URI for compatibility with this specification. Return True if the URI is compatible. :param uri: an URI to check :return: bool """ requirement = self.requirement() uri_component = uri.component(self.component()) if uri_component is None: return requirement != WURIComponentVerifier.Requirement.required if requirement == WURIComponentVerifier.Requirement.unsupported: return False re_obj = self.re_obj() if re_obj is not None: return re_obj.match(uri_component) is not None return True
Check an URI for compatibility with this specification. Return True if the URI is compatible. :param uri: an URI to check :return: bool
def request_chunked(self, method, url, body=None, headers=None): """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ headers = HTTPHeaderDict(headers if headers is not None else {}) skip_accept_encoding = 'accept-encoding' in headers skip_host = 'host' in headers self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) for header, value in headers.items(): self.putheader(header, value) if 'transfer-encoding' not in headers: self.putheader('Transfer-Encoding', 'chunked') self.endheaders() if body is not None: stringish_types = six.string_types + (bytes,) if isinstance(body, stringish_types): body = (body,) for chunk in body: if not chunk: continue if not isinstance(chunk, bytes): chunk = chunk.encode('utf8') len_str = hex(len(chunk))[2:] self.send(len_str.encode('utf-8')) self.send(b'\r\n') self.send(chunk) self.send(b'\r\n') # After the if clause, to always have a closed body self.send(b'0\r\n\r\n')
Alternative to the common request method, which sends the body with chunked encoding and not as one block
def insert_element_to_dict_of_dicts(dict_of_dicts: Dict[str, Dict[str, str]], first_key: str, second_key: str, contents): """ Utility method :param dict_of_dicts: :param first_key: :param second_key: :param contents: :return: """ if first_key not in dict_of_dicts.keys(): dict_of_dicts[first_key] = {second_key: contents} else: if second_key not in dict_of_dicts[first_key].keys(): dict_of_dicts[first_key][second_key] = contents else: warn('Overriding contents for ' + first_key + '/' + second_key) dict_of_dicts[first_key][second_key] = contents
Utility method :param dict_of_dicts: :param first_key: :param second_key: :param contents: :return:
def set_task_object(self, task_id, task_progress_object): """ Defines a new progress bar with the given information using a TaskProgress object. :param task_id: Unique identifier for this progress bar. Will erase if already existing. :param task_progress_object: TaskProgress object holding the progress bar information. """ self.set_task(task_id=task_id, total=task_progress_object.total, prefix=task_progress_object.prefix, suffix=task_progress_object.suffix, decimals=task_progress_object.decimals, bar_length=task_progress_object.bar_length, keep_alive=task_progress_object.keep_alive, display_time=task_progress_object.display_time)
Defines a new progress bar with the given information using a TaskProgress object. :param task_id: Unique identifier for this progress bar. Will erase if already existing. :param task_progress_object: TaskProgress object holding the progress bar information.
def logsumexp(a, axis=None, b=None, use_numexpr=True): """Compute the log of the sum of exponentials of input elements. Parameters ---------- a : array_like Input array. axis : None or int, optional, default=None Axis or axes over which the sum is taken. By default `axis` is None, and all elements are summed. b : array-like, optional Scaling factor for exp(`a`) must be of the same shape as `a` or broadcastable to `a`. use_numexpr : bool, optional, default=True If True, use the numexpr library to speed up the calculation, which can give a 2-4X speedup when working with large arrays. Returns ------- res : ndarray The result, ``log(sum(exp(a)))`` calculated in a numerically more stable way. If `b` is given then ``log(sum(b*exp(a)))`` is returned. See Also -------- numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp) Notes ----- This is based on scipy.misc.logsumexp but with optional numexpr support for improved performance. """ a = np.asarray(a) a_max = np.amax(a, axis=axis, keepdims=True) if a_max.ndim > 0: a_max[~np.isfinite(a_max)] = 0 elif not np.isfinite(a_max): a_max = 0 if b is not None: b = np.asarray(b) if use_numexpr and HAVE_NUMEXPR: out = np.log(numexpr.evaluate("b * exp(a - a_max)").sum(axis)) else: out = np.log(np.sum(b * np.exp(a - a_max), axis=axis)) else: if use_numexpr and HAVE_NUMEXPR: out = np.log(numexpr.evaluate("exp(a - a_max)").sum(axis)) else: out = np.log(np.sum(np.exp(a - a_max), axis=axis)) a_max = np.squeeze(a_max, axis=axis) out += a_max return out
Compute the log of the sum of exponentials of input elements. Parameters ---------- a : array_like Input array. axis : None or int, optional, default=None Axis or axes over which the sum is taken. By default `axis` is None, and all elements are summed. b : array-like, optional Scaling factor for exp(`a`) must be of the same shape as `a` or broadcastable to `a`. use_numexpr : bool, optional, default=True If True, use the numexpr library to speed up the calculation, which can give a 2-4X speedup when working with large arrays. Returns ------- res : ndarray The result, ``log(sum(exp(a)))`` calculated in a numerically more stable way. If `b` is given then ``log(sum(b*exp(a)))`` is returned. See Also -------- numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp (soon to be replaced with scipy.special.logsumexp) Notes ----- This is based on scipy.misc.logsumexp but with optional numexpr support for improved performance.
def smart_reroot(treefile, outgroupfile, outfile, format=0): """ simple function to reroot Newick format tree using ete2 Tree reading format options see here: http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees """ tree = Tree(treefile, format=format) leaves = [t.name for t in tree.get_leaves()][::-1] outgroup = [] for o in must_open(outgroupfile): o = o.strip() for leaf in leaves: if leaf[:len(o)] == o: outgroup.append(leaf) if outgroup: break if not outgroup: print("Outgroup not found. Tree {0} cannot be rerooted.".format(treefile), file=sys.stderr) return treefile try: tree.set_outgroup(tree.get_common_ancestor(*outgroup)) except ValueError: assert type(outgroup) == list outgroup = outgroup[0] tree.set_outgroup(outgroup) tree.write(outfile=outfile, format=format) logging.debug("Rerooted tree printed to {0}".format(outfile)) return outfile
simple function to reroot Newick format tree using ete2 Tree reading format options see here: http://packages.python.org/ete2/tutorial/tutorial_trees.html#reading-newick-trees
def is_hidden_container(self, key, val): """ The key is not one of the Mapfile keywords, and its values are a list """ if key in ("layers", "classes", "styles", "symbols", "labels", "outputformats", "features", "scaletokens", "composites") and isinstance(val, list): return True else: return False
The key is not one of the Mapfile keywords, and its values are a list
def run(self, force=False): """ Run all pending tasks; 'force' will run all tasks whether they're pending or not. """ now = time.time() for func, spec in self.tasks.items(): if force or now >= spec.get('next_run', 0): func() spec['next_run'] = now + spec['interval']
Run all pending tasks; 'force' will run all tasks whether they're pending or not.
def fdate(self, *cols, precision: str="S", format: str=None): """ Convert column values to formated date string :param \*cols: names of the colums :type \*cols: str, at least one :param precision: time precision: Y, M, D, H, Min S, defaults to "S" :type precision: str, optional :param format: python date format, defaults to None :type format: str, optional :example: ``ds.fdate("mycol1", "mycol2", precision)`` """ def formatdate(row): return row.strftime(format) def convert(row): encoded = '%Y-%m-%d %H:%M:%S' if precision == "Min": encoded = '%Y-%m-%d %H:%M' elif precision == "H": encoded = '%Y-%m-%d %H' elif precision == "D": encoded = '%Y-%m-%d' elif precision == "M": encoded = '%Y-%m' elif precision == "Y": encoded = '%Y' return row.strftime(encoded) try: for f in cols: try: if format is None: self.df[f] = pd.to_datetime(self.df[f]).apply(convert) else: self.df[f] = pd.to_datetime( self.df[f]).apply(formatdate) except ValueError as e: self.err(e, "Can not convert date") return except KeyError: self.warning("Can not find colums " + " ".join(cols)) return except Exception as e: self.err(e, "Can not process date col")
Convert column values to formated date string :param \*cols: names of the colums :type \*cols: str, at least one :param precision: time precision: Y, M, D, H, Min S, defaults to "S" :type precision: str, optional :param format: python date format, defaults to None :type format: str, optional :example: ``ds.fdate("mycol1", "mycol2", precision)``
def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if six.PY2: klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass
A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class.
def _init_map(self, record_types=None): """Initialize map for form""" OsidForm._init_map(self) self._my_map['displayName'] = dict(self._display_name_default) self._my_map['description'] = dict(self._description_default) self._my_map['genusTypeId'] = self._genus_type_default OsidExtensibleForm._init_map(self, record_types)
Initialize map for form
def process_rollout(self, batch_info, rollout: Rollout): """ Process rollout for ALGO before any chunking/shuffling """ assert isinstance(rollout, Trajectories), "A2C requires trajectory rollouts" advantages = discount_bootstrap_gae( rewards_buffer=rollout.transition_tensors['rewards'], dones_buffer=rollout.transition_tensors['dones'], values_buffer=rollout.transition_tensors['values'], final_values=rollout.rollout_tensors['final_values'], discount_factor=self.discount_factor, gae_lambda=self.gae_lambda, number_of_steps=rollout.num_steps ) returns = advantages + rollout.transition_tensors['values'] rollout.transition_tensors['advantages'] = advantages rollout.transition_tensors['returns'] = returns return rollout
Process rollout for ALGO before any chunking/shuffling
def start(self): """ Run the commands""" self.check_dependencies() self.args = self.parser.parse_args() # Python 3 doesn't set the cmd if no args are given if not hasattr(self.args, 'cmd'): self.parser.print_help() return cmd = self.args.cmd try: if cmd.app_dir_required and not self.in_app_directory: raise EnvironmentError( "'enaml-native {}' must be run within an app root " "directory not: {}".format(cmd.title, os.getcwd())) cmd.run(self.args) except sh.ErrorReturnCode as e: raise
Run the commands
def addUsage_Label(self,usage_label): '''Appends one Usage_Label to usage_labels ''' if isinstance(usage_label, Usage_Label): self.usage_labels.append(usage_label) else: raise (Usage_LabelError, 'usage_label Type should be Usage_Label, not %s' % type( usage_label) )
Appends one Usage_Label to usage_labels
def _list_like_func(self, func, axis, *args, **kwargs): """Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """ func_prepared = self._prepare_method( lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)) ) new_data = self._map_across_full_axis(axis, func_prepared) # When the function is list-like, the function names become the index/columns new_index = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 0 else self.index ) new_columns = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 1 else self.columns ) return self.__constructor__(new_data, new_index, new_columns)
Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
def serialize(self, method="urlencoded", lev=0, **kwargs): """ Convert this instance to another representation. Which representation is given by the choice of serialization method. :param method: A serialization method. Presently 'urlencoded', 'json', 'jwt' and 'dict' is supported. :param lev: :param kwargs: Extra key word arguments :return: THe content of this message serialized using a chosen method """ return getattr(self, "to_%s" % method)(lev=lev, **kwargs)
Convert this instance to another representation. Which representation is given by the choice of serialization method. :param method: A serialization method. Presently 'urlencoded', 'json', 'jwt' and 'dict' is supported. :param lev: :param kwargs: Extra key word arguments :return: THe content of this message serialized using a chosen method
def solve_tuple(expr, vars): """Build a tuple from subexpressions.""" result = tuple(solve(x, vars).value for x in expr.children) return Result(result, ())
Build a tuple from subexpressions.
def _iter_grouped(self): """Iterate over each element in this group""" for indices in self._group_indices: yield self._obj.isel(**{self._group_dim: indices})
Iterate over each element in this group
def deprecate(name, alternative, version, alt_name=None, klass=None, stacklevel=2, msg=None): """ Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning warning_msg = msg or '{} is deprecated, use {} instead'.format(name, alt_name) @wraps(alternative) def wrapper(*args, **kwargs): warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) # adding deprecated directive to the docstring msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) doc_error_msg = ('deprecate needs a correctly formatted docstring in ' 'the target function (should have a one liner short ' 'summary, and opening quotes should be in their own ' 'line). Found:\n{}'.format(alternative.__doc__)) # when python is running in optimized mode (i.e. `-OO`), docstrings are # removed, so we check that a docstring with correct formatting is used # but we allow empty docstrings if alternative.__doc__: if alternative.__doc__.count('\n') < 3: raise AssertionError(doc_error_msg) empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3) if empty1 or empty2 and not summary: raise AssertionError(doc_error_msg) wrapper.__doc__ = dedent(""" {summary} .. deprecated:: {depr_version} {depr_msg} {rest_of_docstring}""").format(summary=summary.strip(), depr_version=version, depr_msg=msg, rest_of_docstring=dedent(doc)) return wrapper
Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.'
def is_zero_bytes_file(self, path): """Return True if file <path> is zero bytes in size, else return False""" return self._getReturnCodeCmd([self._hadoop_cmd, 'fs', '-test', '-z', self._full_hdfs_path(path)]) == 0
Return True if file <path> is zero bytes in size, else return False
def output(self, stream, value): """SPL output port assignment expression. Arguments: stream(Stream): Output stream the assignment is for. value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator. """ if stream not in self.outputs: raise ValueError("Stream is not an output of this operator.") e = self.expression(value) e._stream = stream return e
SPL output port assignment expression. Arguments: stream(Stream): Output stream the assignment is for. value(str): SPL expression used for an output assignment. This can be a string, a constant, or an :py:class:`Expression`. Returns: Expression: Output assignment expression that is valid as a the context of this operator.
def getTimeZone(lat, lon): """Get timezone for a given lat/lon """ #Need to fix for Python 2.x and 3.X support import urllib.request, urllib.error, urllib.parse import xml.etree.ElementTree as ET #http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points=37.78%2C-122.42%3B40.71%2C-74.01&databases=Point%2CTimeZone%2CAstronomy%2CNaturalEarthCountry%2CUsState2010%2CUsCounty2010%2CUsCountySubdivision2010%2CUsTract2010%2CUsBlockGroup2010%2CUsPlace2010%2CUsZcta2010 req = "http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points="+str(lat)+"%2C"+str(lon)+"&databases=TimeZone" opener = urllib.request.build_opener() f = opener.open(req) tree = ET.parse(f) root = tree.getroot() #Check response tzid = None if root.attrib['code'] == '0': tz = list(root.iter('TimeZone'))[0] #shortname = tz.attrib['ShortName'] tzid = tz.attrib['TimeZoneId'] return tzid
Get timezone for a given lat/lon
def init_hardware(self, serial=None, device_number=ANY_MODULE): """ Initializes the device with the corresponding serial or device number. :param int or None serial: Serial number of the USB-CANmodul. :param int device_number: Device number (0 – 254, or :const:`ANY_MODULE` for the first device). """ if not self._hw_is_initialized: # initialize hardware either by device number or serial if serial is None: UcanInitHardwareEx(byref(self._handle), device_number, self._callback_ref, None) else: UcanInitHardwareEx2(byref(self._handle), serial, self._callback_ref, None) self._hw_is_initialized = True
Initializes the device with the corresponding serial or device number. :param int or None serial: Serial number of the USB-CANmodul. :param int device_number: Device number (0 – 254, or :const:`ANY_MODULE` for the first device).
def get(self, index): """ Constructs a SyncListItemContext :param index: The index :returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext """ return SyncListItemContext( self._version, service_sid=self._solution['service_sid'], list_sid=self._solution['list_sid'], index=index, )
Constructs a SyncListItemContext :param index: The index :returns: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemContext
def error_messages(self, driver_id=None): """Get the error messages for all drivers or a specific driver. Args: driver_id: The specific driver to get the errors for. If this is None, then this method retrieves the errors for all drivers. Returns: A dictionary mapping driver ID to a list of the error messages for that driver. """ if driver_id is not None: assert isinstance(driver_id, ray.DriverID) return self._error_messages(driver_id) error_table_keys = self.redis_client.keys( ray.gcs_utils.TablePrefix_ERROR_INFO_string + "*") driver_ids = [ key[len(ray.gcs_utils.TablePrefix_ERROR_INFO_string):] for key in error_table_keys ] return { binary_to_hex(driver_id): self._error_messages( ray.DriverID(driver_id)) for driver_id in driver_ids }
Get the error messages for all drivers or a specific driver. Args: driver_id: The specific driver to get the errors for. If this is None, then this method retrieves the errors for all drivers. Returns: A dictionary mapping driver ID to a list of the error messages for that driver.
def scroll(self, query=None, scroll='5m', size=100, unpack=True): """Scroll an index with the specified search query. Works as a generator. Will yield `size` results per iteration until all hits are returned. """ query = self.match_all if query is None else query response = self.instance.search(index=self.index, doc_type=self.doc_type, body=query, size=size, scroll=scroll) while len(response['hits']['hits']) > 0: scroll_id = response['_scroll_id'] logging.debug(response) if unpack: yield [source['_source'] if '_source' in source else source for source in response['hits']['hits']] else: yield response['hits']['hits'] response = self.instance.scroll(scroll_id=scroll_id, scroll=scroll)
Scroll an index with the specified search query. Works as a generator. Will yield `size` results per iteration until all hits are returned.
def _unbind_topics(self, topics): """Unsubscribe to all of the topics we needed for communication with device Args: topics (MQTTTopicValidator): The topic validator for this device that we have connected to. """ self.client.unsubscribe(topics.status) self.client.unsubscribe(topics.tracing) self.client.unsubscribe(topics.streaming) self.client.unsubscribe(topics.response)
Unsubscribe to all of the topics we needed for communication with device Args: topics (MQTTTopicValidator): The topic validator for this device that we have connected to.
def generate_adsorption_structures(self, molecule, repeat=None, min_lw=5.0, reorient=True, find_args={}): """ Function that generates all adsorption structures for a given molecular adsorbate. Can take repeat argument or minimum length/width of precursor slab as an input Args: molecule (Molecule): molecule corresponding to adsorbate repeat (3-tuple or list): repeat argument for supercell generation min_lw (float): minimum length and width of the slab, only used if repeat is None reorient (bool): flag on whether or not to reorient adsorbate along the miller index find_args (dict): dictionary of arguments to be passed to the call to self.find_adsorption_sites, e.g. {"distance":2.0} """ if repeat is None: xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0])) yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1])) repeat = [xrep, yrep, 1] structs = [] for coords in self.find_adsorption_sites(**find_args)['all']: structs.append(self.add_adsorbate( molecule, coords, repeat=repeat, reorient=reorient)) return structs
Function that generates all adsorption structures for a given molecular adsorbate. Can take repeat argument or minimum length/width of precursor slab as an input Args: molecule (Molecule): molecule corresponding to adsorbate repeat (3-tuple or list): repeat argument for supercell generation min_lw (float): minimum length and width of the slab, only used if repeat is None reorient (bool): flag on whether or not to reorient adsorbate along the miller index find_args (dict): dictionary of arguments to be passed to the call to self.find_adsorption_sites, e.g. {"distance":2.0}
def linesubst(line, variables): """ In a string, substitute '{{varname}}' occurrences with the value of variables['varname'], '\\' being an escaping char... If at first you don't understand this function, draw its finite state machine and everything will become crystal clear :) """ # trivial no substitution early detection: if '{{' not in line and '\\' not in line: return line st = NORM out = "" curvar = "" for c in line: if st is NORM: if c == '{': st = ONE elif c == '\\': st = LIT else: out += c elif st is LIT: out += c st = NORM elif st is ONE: if c == '{': st = TWO elif c == '\\': out += '{' st = LIT else: out += '{' + c st = NORM elif st is TWO: if c == '\\': st = TLIT elif c == '}': st = TERM else: curvar += c elif st is TLIT: curvar += c st = TWO elif st is TERM: if c == '}': if curvar not in variables: LOG.warning("Unknown variable %r detected, will just be replaced by an empty string", curvar) else: LOG.debug("Substitution of {{%s}} by %r", curvar, variables[curvar]) value = variables[curvar] if isinstance(value, (float, int, long)): value = str(value) out += value curvar = '' st = NORM elif c == '\\': curvar += '}' st = TLIT else: curvar += '}' + c st = TWO if st is not NORM: LOG.warning("st is not NORM at end of line: " + line) LOG.warning("returned substitution: " + out) return out
In a string, substitute '{{varname}}' occurrences with the value of variables['varname'], '\\' being an escaping char... If at first you don't understand this function, draw its finite state machine and everything will become crystal clear :)
def _reset_timeout(self): """Reset timeout for date keep alive.""" if self._timeout: self._timeout.cancel() self._timeout = self.loop.call_later(self.client.timeout, self.transport.close)
Reset timeout for date keep alive.
def count_in_category(x='call_type', filter_dict=None, model=DEFAULT_MODEL, app=DEFAULT_APP, sort=True, limit=1000): """ Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts. >>> x, y = count_in_category(x='call_type', filter_dict={'model__startswith': 'LC60'}, limit=5, sort=1) >>> len(x) == len(y) == 5 True >>> y[1] >= y[0] True """ sort = sort_prefix(sort) model = get_model(model, app) filter_dict = filter_dict or {} x = fuzzy.extractOne(str(x), model._meta.get_all_field_names())[0] objects = model.objects.filter(**filter_dict) objects = objects.values(x) objects = objects.annotate(y=djmodels.Count(x)) if sort is not None: objects = objects.order_by(sort + 'y') objects = objects.all() if limit: objects = objects[:int(limit)] objects = normalize_choices(util.sod_transposed(objects), field_name=x, app=app, human_readable=True) if not objects: return None objects = consolidated_counts(objects, field_name=x, count_name='y') if sort is not None: objects = sorted_dict_of_lists(objects, field_names=['y', x], reverse=bool(sort)) return objects[x], objects['y']
Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts. >>> x, y = count_in_category(x='call_type', filter_dict={'model__startswith': 'LC60'}, limit=5, sort=1) >>> len(x) == len(y) == 5 True >>> y[1] >= y[0] True
def laplacian_eigenmaps(adjacency_matrix, k): """ Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix. Introduced in: Belkin, M., & Niyogi, P. (2003). Laplacian eigenmaps for dimensionality reduction and data representation. Neural computation, 15(6), 1373-1396. Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix. - k: The number of eigenvectors to extract. Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector. """ # Calculate sparse graph Laplacian. laplacian = get_normalized_laplacian(adjacency_matrix) # Calculate bottom k+1 eigenvalues and eigenvectors of normalized Laplacian. try: eigenvalues, eigenvectors = spla.eigsh(laplacian, k=k, which='SM', return_eigenvectors=True) except spla.ArpackNoConvergence as e: print("ARPACK has not converged.") eigenvalue = e.eigenvalues eigenvectors = e.eigenvectors # Discard the eigenvector corresponding to the zero-valued eigenvalue. eigenvectors = eigenvectors[:, 1:] return eigenvectors
Performs spectral graph embedding using the graph symmetric normalized Laplacian matrix. Introduced in: Belkin, M., & Niyogi, P. (2003). Laplacian eigenmaps for dimensionality reduction and data representation. Neural computation, 15(6), 1373-1396. Inputs: - A in R^(nxn): Adjacency matrix of an network represented as a SciPy Sparse COOrdinate matrix. - k: The number of eigenvectors to extract. Outputs: - X in R^(nxk): The latent space embedding represented as a NumPy array. We discard the first eigenvector.
def _load_model(self): """Loads robot and optionally add grippers.""" super()._load_model() self.mujoco_robot = Baxter() if self.has_gripper_right: self.gripper_right = gripper_factory(self.gripper_right_name) if not self.gripper_visualization: self.gripper_right.hide_visualization() self.mujoco_robot.add_gripper("right_hand", self.gripper_right) if self.has_gripper_left: self.gripper_left = gripper_factory(self.gripper_left_name) if not self.gripper_visualization: self.gripper_left.hide_visualization() self.mujoco_robot.add_gripper("left_hand", self.gripper_left)
Loads robot and optionally add grippers.
def lv_to_pypsa(network): """ Convert LV grid topology to PyPSA representation Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids` Parameters ---------- network : Network eDisGo grid container Returns ------- dict of :pandas:`pandas.DataFrame<dataframe>` A DataFrame for each type of PyPSA components constituting the grid topology. Keys included * 'Generator' * 'Load' * 'Line' * 'BranchTee' * 'StorageUnit' """ generators = [] loads = [] branch_tees = [] lines = [] lv_stations = [] storages = [] for lv_grid in network.mv_grid.lv_grids: generators.extend(lv_grid.graph.nodes_by_attribute('generator')) loads.extend(lv_grid.graph.nodes_by_attribute('load')) branch_tees.extend(lv_grid.graph.nodes_by_attribute('branch_tee')) lines.extend(lv_grid.graph.lines()) lv_stations.extend(lv_grid.graph.nodes_by_attribute('lv_station')) storages.extend(lv_grid.graph.nodes_by_attribute('storage')) omega = 2 * pi * 50 generator = {'name': [], 'bus': [], 'control': [], 'p_nom': [], 'type': [], 'p_nom_extendable': [], 'p_nom_min': [], 'p_nom_max': [], 'capital_cost': [] } bus = {'name': [], 'v_nom': [], 'x': [], 'y': []} load = {'name': [], 'bus': []} line = {'name': [], 'bus0': [], 'bus1': [], 'type': [], 'x': [], 'r': [], 's_nom': [], 's_nom_min': [], 's_max_pu': [], 's_nom_extendable': [], 'capital_cost': [], 'length': []} storage = { 'name': [], 'bus': [], 'p_nom': [], 'p_nom_extendable': [], 'p_nom_min': [], 'p_nom_max': [], 'capital_cost': [], 'max_hours': []} # create dictionary representing generators and associated buses for gen in generators: bus_name = '_'.join(['Bus', repr(gen)]) generator['name'].append(repr(gen)) generator['bus'].append(bus_name) generator['control'].append('PQ') generator['p_nom'].append(gen.nominal_capacity / 1e3) generator['type'].append('_'.join([gen.type, gen.subtype])) generator['p_nom_extendable'].append(False) generator['p_nom_min'].append(0) # 0.3 generator['p_nom_max'].append(0) generator['capital_cost'].append(0) bus['name'].append(bus_name) bus['v_nom'].append(gen.grid.voltage_nom) bus['x'].append(None) bus['y'].append(None) # create dictionary representing branch tees for bt in branch_tees: bus['name'].append('_'.join(['Bus', repr(bt)])) bus['v_nom'].append(bt.grid.voltage_nom) bus['x'].append(None) bus['y'].append(None) # create dataframes representing loads and associated buses for lo in loads: bus_name = '_'.join(['Bus', repr(lo)]) load['name'].append(repr(lo)) load['bus'].append(bus_name) bus['name'].append(bus_name) bus['v_nom'].append(lo.grid.voltage_nom) bus['x'].append(None) bus['y'].append(None) # create dataframe for lines for l in lines: line['name'].append(repr(l['line'])) if l['adj_nodes'][0] in lv_stations: line['bus0'].append( '_'.join(['Bus', l['adj_nodes'][0].__repr__(side='lv')])) else: line['bus0'].append('_'.join(['Bus', repr(l['adj_nodes'][0])])) if l['adj_nodes'][1] in lv_stations: line['bus1'].append( '_'.join(['Bus', l['adj_nodes'][1].__repr__(side='lv')])) else: line['bus1'].append('_'.join(['Bus', repr(l['adj_nodes'][1])])) line['type'].append("") line['x'].append( l['line'].type['L'] * omega / 1e3 * l['line'].length) line['r'].append(l['line'].type['R'] * l['line'].length) s_nom = sqrt(3) * l['line'].type['I_max_th'] * \ l['line'].type['U_n'] / 1e3 line['s_nom'].append(s_nom) line['s_nom_min'].append(s_nom) line['s_max_pu'].append(0.6) line['s_nom_extendable'].append(True) line['capital_cost'].append(100) line['length'].append(l['line'].length) lv_components = { 'Generator': pd.DataFrame(generator).set_index('name'), 'Bus': pd.DataFrame(bus).set_index('name'), 'Load': pd.DataFrame(load).set_index('name'), 'Line': pd.DataFrame(line).set_index('name'), 'StorageUnit': pd.DataFrame(storage).set_index('name')} return lv_components
Convert LV grid topology to PyPSA representation Includes grid topology of all LV grids of :attr:`~.grid.grid.Grid.lv_grids` Parameters ---------- network : Network eDisGo grid container Returns ------- dict of :pandas:`pandas.DataFrame<dataframe>` A DataFrame for each type of PyPSA components constituting the grid topology. Keys included * 'Generator' * 'Load' * 'Line' * 'BranchTee' * 'StorageUnit'
def top(self, objects: Set[Object]) -> Set[Object]: """ Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each box. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set: Set[Object] = set() for _, box_objects in objects_per_box.items(): min_y_loc = min([obj.y_loc for obj in box_objects]) return_set.update(set([obj for obj in box_objects if obj.y_loc == min_y_loc])) return return_set
Return the topmost objects (i.e. minimum y_loc). The comparison is done separately for each box.
def filter_genes_cv_deprecated(X, Ecutoff, cvFilter): """Filter genes by coefficient of variance and mean. See `filter_genes_dispersion`. Reference: Weinreb et al. (2017). """ if issparse(X): raise ValueError('Not defined for sparse input. See `filter_genes_dispersion`.') mean_filter = np.mean(X, axis=0) > Ecutoff var_filter = np.std(X, axis=0) / (np.mean(X, axis=0) + .0001) > cvFilter gene_subset = np.nonzero(np.all([mean_filter, var_filter], axis=0))[0] return gene_subset
Filter genes by coefficient of variance and mean. See `filter_genes_dispersion`. Reference: Weinreb et al. (2017).
def reset_flags(self): """ Resets flags to an "unknown state" """ self.C = None self.Z = None self.P = None self.S = None
Resets flags to an "unknown state"
def render_scene(self): "render scene one time" self.init_gl() # should be a no-op after the first frame is rendered glfw.make_context_current(self.window) self.renderer.render_scene() # Done rendering # glfw.swap_buffers(self.window) # avoid double buffering to avoid stalling glFlush() # single buffering glfw.poll_events()
render scene one time