code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def calculate_deltat(year, month): """Calculate the difference between Terrestrial Dynamical Time (TD) and Universal Time (UT). Note: This function is not yet compatible for calculations using Numba. Equations taken from http://eclipse.gsfc.nasa.gov/SEcat5/deltatpoly.html """ plw = 'Deltat is unknown for years before -1999 and after 3000. ' \ 'Delta values will be calculated, but the calculations ' \ 'are not intended to be used for these years.' try: if np.any((year > 3000) | (year < -1999)): warnings.warn(plw) except ValueError: if (year > 3000) | (year < -1999): warnings.warn(plw) except TypeError: return 0 y = year + (month - 0.5)/12 deltat = np.where(year < -500, -20+32*((y-1820)/100)**2, 0) deltat = np.where((-500 <= year) & (year < 500), 10583.6-1014.41*(y/100) + 33.78311*(y/100)**2 - 5.952053*(y/100)**3 - 0.1798452*(y/100)**4 + 0.022174192*(y/100)**5 + 0.0090316521*(y/100)**6, deltat) deltat = np.where((500 <= year) & (year < 1600), 1574.2-556.01*((y-1000)/100) + 71.23472*((y-1000)/100)**2 + 0.319781*((y-1000)/100)**3 - 0.8503463*((y-1000)/100)**4 - 0.005050998*((y-1000)/100)**5 + 0.0083572073*((y-1000)/100)**6, deltat) deltat = np.where((1600 <= year) & (year < 1700), 120-0.9808*(y-1600) - 0.01532*(y-1600)**2 + (y-1600)**3/7129, deltat) deltat = np.where((1700 <= year) & (year < 1800), 8.83+0.1603*(y-1700) - 0.0059285*(y-1700)**2 + 0.00013336*(y-1700)**3 - (y-1700)**4/1174000, deltat) deltat = np.where((1800 <= year) & (year < 1860), 13.72-0.332447*(y-1800) + 0.0068612*(y-1800)**2 + 0.0041116*(y-1800)**3 - 0.00037436*(y-1800)**4 + 0.0000121272*(y-1800)**5 - 0.0000001699*(y-1800)**6 + 0.000000000875*(y-1800)**7, deltat) deltat = np.where((1860 <= year) & (year < 1900), 7.62+0.5737*(y-1860) - 0.251754*(y-1860)**2 + 0.01680668*(y-1860)**3 - 0.0004473624*(y-1860)**4 + (y-1860)**5/233174, deltat) deltat = np.where((1900 <= year) & (year < 1920), -2.79+1.494119*(y-1900) - 0.0598939*(y-1900)**2 + 0.0061966*(y-1900)**3 - 0.000197*(y-1900)**4, deltat) deltat = np.where((1920 <= year) & (year < 1941), 21.20+0.84493*(y-1920) - 0.076100*(y-1920)**2 + 0.0020936*(y-1920)**3, deltat) deltat = np.where((1941 <= year) & (year < 1961), 29.07+0.407*(y-1950) - (y-1950)**2/233 + (y-1950)**3/2547, deltat) deltat = np.where((1961 <= year) & (year < 1986), 45.45+1.067*(y-1975) - (y-1975)**2/260 - (y-1975)**3/718, deltat) deltat = np.where((1986 <= year) & (year < 2005), 63.86+0.3345*(y-2000) - 0.060374*(y-2000)**2 + 0.0017275*(y-2000)**3 + 0.000651814*(y-2000)**4 + 0.00002373599*(y-2000)**5, deltat) deltat = np.where((2005 <= year) & (year < 2050), 62.92+0.32217*(y-2000) + 0.005589*(y-2000)**2, deltat) deltat = np.where((2050 <= year) & (year < 2150), -20+32*((y-1820)/100)**2 - 0.5628*(2150-y), deltat) deltat = np.where(year >= 2150, -20+32*((y-1820)/100)**2, deltat) deltat = deltat.item() if np.isscalar(year) & np.isscalar(month)\ else deltat return deltat
Calculate the difference between Terrestrial Dynamical Time (TD) and Universal Time (UT). Note: This function is not yet compatible for calculations using Numba. Equations taken from http://eclipse.gsfc.nasa.gov/SEcat5/deltatpoly.html
def createSegment(self, cell): """ Create a :class:`~nupic.algorithms.connections.Segment` on the specified cell. This method calls :meth:`~nupic.algorithms.connections.Connections.createSegment` on the underlying :class:`~nupic.algorithms.connections.Connections`, and it does some extra bookkeeping. Unit tests should call this method, and not :meth:`~nupic.algorithms.connections.Connections.createSegment`. :param cell: (int) Index of cell to create a segment on. :returns: (:class:`~nupic.algorithms.connections.Segment`) The created segment. """ return self._createSegment( self.connections, self.lastUsedIterationForSegment, cell, self.iteration, self.maxSegmentsPerCell)
Create a :class:`~nupic.algorithms.connections.Segment` on the specified cell. This method calls :meth:`~nupic.algorithms.connections.Connections.createSegment` on the underlying :class:`~nupic.algorithms.connections.Connections`, and it does some extra bookkeeping. Unit tests should call this method, and not :meth:`~nupic.algorithms.connections.Connections.createSegment`. :param cell: (int) Index of cell to create a segment on. :returns: (:class:`~nupic.algorithms.connections.Segment`) The created segment.
def add_metadata(self, metadata_matrix, meta_index_store): ''' Returns a new corpus with a the metadata matrix and index store integrated. :param metadata_matrix: scipy.sparse matrix (# docs, # metadata) :param meta_index_store: IndexStore of metadata values :return: TermDocMatrixWithoutCategories ''' assert isinstance(meta_index_store, IndexStore) assert len(metadata_matrix.shape) == 2 assert metadata_matrix.shape[0] == self.get_num_docs() return self._make_new_term_doc_matrix(new_X=self._X, new_y=None, new_category_idx_store=None, new_y_mask=np.ones(self.get_num_docs()).astype(bool), new_mX=metadata_matrix, new_term_idx_store=self._term_idx_store, new_metadata_idx_store=meta_index_store)
Returns a new corpus with a the metadata matrix and index store integrated. :param metadata_matrix: scipy.sparse matrix (# docs, # metadata) :param meta_index_store: IndexStore of metadata values :return: TermDocMatrixWithoutCategories
def standardize_input_data(data): """ Ensure utf-8 encoded strings are passed to the indico API """ if type(data) == bytes: data = data.decode('utf-8') if type(data) == list: data = [ el.decode('utf-8') if type(data) == bytes else el for el in data ] return data
Ensure utf-8 encoded strings are passed to the indico API
def _construct_columns(self, column_map): ''' a helper method for constructing the column objects for a table object ''' from sqlalchemy import Column, String, Boolean, Integer, Float, Binary column_args = [] for key, value in column_map.items(): record_key = value[0] datatype = value[1] max_length = value[2] if record_key == 'id': if datatype in ('string', 'float', 'integer'): if datatype == 'string': if max_length: column_args.insert(0, Column(record_key, String(max_length), primary_key=True)) else: column_args.insert(0, Column(record_key, String, primary_key=True)) elif datatype == 'float': column_args.insert(0, Column(record_key, Float, primary_key=True)) elif datatype == 'integer': column_args.insert(0, Column(record_key, Integer, primary_key=True)) else: raise ValueError('Field "id" in record_schema must be a string, float or integer.') else: if datatype == 'boolean': column_args.append(Column(record_key, Boolean)) elif datatype == 'string': if max_length: column_args.append(Column(record_key, String(max_length))) else: column_args.append(Column(record_key, String)) elif datatype == 'float': column_args.append(Column(record_key, Float)) elif datatype == 'integer': column_args.append(Column(record_key, Integer)) elif datatype == 'list': column_args.append(Column(record_key, Binary)) return column_args
a helper method for constructing the column objects for a table object
def dependencies(self) -> List[Dependency]: """Return the PB dependencies.""" dependencies_str = DB.get_hash_value(self.key, 'dependencies') dependencies = [] for dependency in ast.literal_eval(dependencies_str): dependencies.append(Dependency(dependency)) return dependencies
Return the PB dependencies.
def node_style(self, node, **kwargs): ''' Modifies a node style to the dot representation. ''' if node not in self.edges: self.edges[node] = {} self.nodes[node] = kwargs
Modifies a node style to the dot representation.
def extend_reservation(request, user_id, days=7): ''' Allows staff to extend the reservation on a given user's cart. ''' user = User.objects.get(id=int(user_id)) cart = CartController.for_user(user) cart.extend_reservation(datetime.timedelta(days=days)) return redirect(request.META["HTTP_REFERER"])
Allows staff to extend the reservation on a given user's cart.
def field_values(self): """ Access the field_values :returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList :rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList """ if self._field_values is None: self._field_values = FieldValueList( self._version, assistant_sid=self._solution['assistant_sid'], field_type_sid=self._solution['sid'], ) return self._field_values
Access the field_values :returns: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList :rtype: twilio.rest.autopilot.v1.assistant.field_type.field_value.FieldValueList
def _parse_pool_transaction_file( ledger, nodeReg, cliNodeReg, nodeKeys, activeValidators, ledger_size=None): """ helper function for parseLedgerForHaAndKeys """ for _, txn in ledger.getAllTxn(to=ledger_size): if get_type(txn) == NODE: txn_data = get_payload_data(txn) nodeName = txn_data[DATA][ALIAS] clientStackName = nodeName + CLIENT_STACK_SUFFIX nHa = (txn_data[DATA][NODE_IP], txn_data[DATA][NODE_PORT]) \ if (NODE_IP in txn_data[DATA] and NODE_PORT in txn_data[DATA]) \ else None cHa = (txn_data[DATA][CLIENT_IP], txn_data[DATA][CLIENT_PORT]) \ if (CLIENT_IP in txn_data[DATA] and CLIENT_PORT in txn_data[DATA]) \ else None if nHa: nodeReg[nodeName] = HA(*nHa) if cHa: cliNodeReg[clientStackName] = HA(*cHa) try: # TODO: Need to handle abbreviated verkey key_type = 'verkey' verkey = cryptonymToHex(str(txn_data[TARGET_NYM])) key_type = 'identifier' cryptonymToHex(get_from(txn)) except ValueError: logger.exception( 'Invalid {}. Rebuild pool transactions.'.format(key_type)) exit('Invalid {}. Rebuild pool transactions.'.format(key_type)) nodeKeys[nodeName] = verkey services = txn_data[DATA].get(SERVICES) if isinstance(services, list): if VALIDATOR in services: activeValidators.add(nodeName) else: activeValidators.discard(nodeName)
helper function for parseLedgerForHaAndKeys
def cis(x: float) -> complex: r""" Implements Euler's formula :math:`\text{cis}(x) = e^{i \pi x} = \cos(x) + i \sin(x)` """ return np.cos(x) + 1.0j * np.sin(x)
r""" Implements Euler's formula :math:`\text{cis}(x) = e^{i \pi x} = \cos(x) + i \sin(x)`
def create_datastream(self, datastream): """ To create Datastream :param datastream: Datastream :param options: dict """ raw_datastream = self.http.post('/Datastream', datastream) return Schemas.Datastream(datastream=raw_datastream)
To create Datastream :param datastream: Datastream :param options: dict
def run_once(self): """This function runs one iteration of the IRC client. This is called in a loop by the run_loop function. It can be called separately, but most of the time there is no need to do this. """ packet = _parse_irc_packet(next(self.lines)) #Get next line from generator for event_handler in list(self.on_packet_received): event_handler(self, packet) if packet.command == "PRIVMSG": if packet.arguments[0].startswith("#"): for event_handler in list(self.on_public_message): event_handler(self, packet.arguments[0], packet.prefix.split("!")[0], packet.arguments[1]) else: for event_handler in list(self.on_private_message): event_handler(self, packet.prefix.split("!")[0], packet.arguments[1]) elif packet.command == "PING": self.send_line("PONG :{}".format(packet.arguments[0])) for event_handler in list(self.on_ping): event_handler(self) elif packet.command == "433" or packet.command == "437": #Command 433 is "Nick in use" #Add underscore to the nick self.set_nick("{}_".format(self.nick)) elif packet.command == "001": for event_handler in list(self.on_welcome): event_handler(self) elif packet.command == "JOIN": for event_handler in list(self.on_join): event_handler(self, packet.arguments[0], packet.prefix.split("!")[0]) elif packet.command == "PART": for event_handler in list(self.on_leave): event_handler(self, packet.arguments[0], packet.prefix.split("!")[0])
This function runs one iteration of the IRC client. This is called in a loop by the run_loop function. It can be called separately, but most of the time there is no need to do this.
def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Serve the web API for development. Usage: pld-devserver [options] Options: -h --help Show this screen. --host=<host> The host to use [default: 0.0.0.0]. --port=<port> The port to use [default: 5000]. --debug=<debug> Whether or not to use debug mode [default: 0]. """ arguments = docopt(devserver_cmd.__doc__, argv=argv) initialize_config() app.run( host=arguments['--host'], port=int(arguments['--port']), debug=int(arguments['--debug']), )
\ Serve the web API for development. Usage: pld-devserver [options] Options: -h --help Show this screen. --host=<host> The host to use [default: 0.0.0.0]. --port=<port> The port to use [default: 5000]. --debug=<debug> Whether or not to use debug mode [default: 0].
def delete(self, path, data=None): """Executes a DELETE. 'path' may not be None. Should include the full path to the resoure. 'data' may be None or a dictionary. Returns a named tuple that includes: status: the HTTP status code json: the returned JSON-HAL If the key was not set, throws an APIConfigurationException.""" # Argument error checking. assert path is not None assert data is None or isinstance(data, dict) # Execute the request. response = self.conn.request('DELETE', path, data, self._get_headers()) # Extract the result. self._last_status = response_status = response.status response_content = response.data.decode() # return (status, json) return Result(status=response_status, json=response_content)
Executes a DELETE. 'path' may not be None. Should include the full path to the resoure. 'data' may be None or a dictionary. Returns a named tuple that includes: status: the HTTP status code json: the returned JSON-HAL If the key was not set, throws an APIConfigurationException.
def lcm(*numbers): """ Return lowest common multiple of a sequence of numbers. Args: \*numbers: Sequence of numbers. Returns: (int) Lowest common multiple of numbers. """ n = 1 for i in numbers: n = (i * n) // gcd(i, n) return n
Return lowest common multiple of a sequence of numbers. Args: \*numbers: Sequence of numbers. Returns: (int) Lowest common multiple of numbers.
def listBlockSummaries(self, block_name="", dataset="", detail=False): """ API that returns summary information like total size and total number of events in a dataset or a list of blocks :param block_name: list block summaries for block_name(s) :type block_name: str, list :param dataset: list block summaries for all blocks in dataset :type dataset: str :param detail: list summary by block names if detail=True, default=False :type detail: str, bool :returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided """ if bool(dataset)+bool(block_name)!=1: dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "Dataset or block_names must be specified at a time.") if block_name and isinstance(block_name, basestring): try: block_name = [str(block_name)] except: dbsExceptionHandler("dbsException-invalid-input", "Invalid block_name for listBlockSummaries. ") for this_block_name in block_name: if re.search("[*, %]", this_block_name): dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No wildcards are allowed in block_name list") if re.search("[*, %]", dataset): dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No wildcards are allowed in dataset") data = [] try: with self.dbi.connection() as conn: data = self.dbsBlockSummaryListDAO.execute(conn, block_name, dataset, detail) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listBlockSummaries. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) for item in data: yield item
API that returns summary information like total size and total number of events in a dataset or a list of blocks :param block_name: list block summaries for block_name(s) :type block_name: str, list :param dataset: list block summaries for all blocks in dataset :type dataset: str :param detail: list summary by block names if detail=True, default=False :type detail: str, bool :returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
def emit(self): """Get a mapping from a transcript :return: One random Transcript sequence :rtype: sequence """ i = self.options.rand.get_weighted_random_index(self._weights) return self._transcriptome.transcripts[i]
Get a mapping from a transcript :return: One random Transcript sequence :rtype: sequence
def merge_entity(self, entity, if_match='*'): ''' Adds a merge entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.merge_entity` for more information on merges. The operation will not be executed until the batch is committed. :param entity: The entity to merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The merge operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional merge, set If-Match to the wildcard character (*). ''' request = _merge_entity(entity, if_match, self._require_encryption, self._key_encryption_key) self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
Adds a merge entity operation to the batch. See :func:`~azure.storage.table.tableservice.TableService.merge_entity` for more information on merges. The operation will not be executed until the batch is committed. :param entity: The entity to merge. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The merge operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional merge, set If-Match to the wildcard character (*).
def save_df_output( df_output: pd.DataFrame, freq_s: int = 3600, site: str = '', path_dir_save: Path = Path('.'),)->list: '''save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files ''' list_path_save = [] list_group = df_output.columns.get_level_values('group').unique() list_grid = df_output.index.get_level_values('grid').unique() for grid in list_grid: for group in list_group: df_output_grid_group = df_output\ .loc[grid, group]\ .dropna(how='all', axis=0) # save output at the runtime frequency (usually 5 min) # 'DailyState' group will be save a daily frequency path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) # resample output if freq_s is different from runtime freq (usually 5 min) freq_save = pd.Timedelta(freq_s, 's') # resample `df_output` at `freq_save` df_rsmp = resample_output(df_output, freq_save) # 'DailyState' group will be dropped in `resample_output` as resampling is not needed df_rsmp = df_rsmp.drop(columns='DailyState') list_group = df_rsmp.columns.get_level_values('group').unique() list_grid = df_rsmp.index.get_level_values('grid').unique() # save output at the resampling frequency for grid in list_grid: for group in list_group: df_output_grid_group = df_rsmp.loc[grid, group] path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) return list_path_save
save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files
def setup(self, in_name=None, out_name=None, required=None, hidden=None, multiple=None, defaults=None): """ Set the options of the block. Only the not None given options are set .. note:: a block may have multiple inputs but have only one output :param in_name: name(s) of the block input data :type in_name: str or list of str :param out_name: name of the block output data :type out_name: str :param required: whether the block will be required or not :type required: bool :param hidden: whether the block will be hidden to the user or not :type hidden: bool :param multiple: if True more than one component may be selected/ run) :type multiple: bool :param defaults: names of the selected components :type defaults: list of str, or str """ if in_name is not None: self.in_name = in_name if isinstance(in_name, list) else [in_name] if out_name is not None: self.out_name = out_name if required is not None: self.required = required if hidden is not None: self.hidden = hidden if multiple is not None: self.multiple = multiple if defaults is not None: #if default is just a 'str' it is managed in setter self.defaults = defaults
Set the options of the block. Only the not None given options are set .. note:: a block may have multiple inputs but have only one output :param in_name: name(s) of the block input data :type in_name: str or list of str :param out_name: name of the block output data :type out_name: str :param required: whether the block will be required or not :type required: bool :param hidden: whether the block will be hidden to the user or not :type hidden: bool :param multiple: if True more than one component may be selected/ run) :type multiple: bool :param defaults: names of the selected components :type defaults: list of str, or str
def _protected_division(x1, x2): """Closure of division (x1/x2) for zero denominator.""" with np.errstate(divide='ignore', invalid='ignore'): return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), 1.)
Closure of division (x1/x2) for zero denominator.
def init_logger(self): """Init logger.""" if not self.result_logger: if not os.path.exists(self.local_dir): os.makedirs(self.local_dir) if not self.logdir: self.logdir = tempfile.mkdtemp( prefix="{}_{}".format( str(self)[:MAX_LEN_IDENTIFIER], date_str()), dir=self.local_dir) elif not os.path.exists(self.logdir): os.makedirs(self.logdir) self.result_logger = UnifiedLogger( self.config, self.logdir, upload_uri=self.upload_dir, loggers=self.loggers, sync_function=self.sync_function)
Init logger.
def ionic_strength(mis, zis): r'''Calculate the ionic strength of a solution in one of two ways, depending on the inputs only. For Pitzer and Bromley models, `mis` should be molalities of each component. For eNRTL models, `mis` should be mole fractions of each electrolyte in the solution. This will sum to be much less than 1. .. math:: I = \frac{1}{2} \sum M_i z_i^2 I = \frac{1}{2} \sum x_i z_i^2 Parameters ---------- mis : list Molalities of each ion, or mole fractions of each ion [mol/kg or -] zis : list Charges of each ion [-] Returns ------- I : float ionic strength, [?] Examples -------- >>> ionic_strength([0.1393, 0.1393], [1, -1]) 0.1393 References ---------- .. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local Composition Model for Excess Gibbs Energy of Electrolyte Systems. Part I: Single Solvent, Single Completely Dissociated Electrolyte Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96. doi:10.1002/aic.690280410 .. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation. Weinheim, Germany: Wiley-VCH, 2012. ''' return 0.5*sum([mi*zi*zi for mi, zi in zip(mis, zis)])
r'''Calculate the ionic strength of a solution in one of two ways, depending on the inputs only. For Pitzer and Bromley models, `mis` should be molalities of each component. For eNRTL models, `mis` should be mole fractions of each electrolyte in the solution. This will sum to be much less than 1. .. math:: I = \frac{1}{2} \sum M_i z_i^2 I = \frac{1}{2} \sum x_i z_i^2 Parameters ---------- mis : list Molalities of each ion, or mole fractions of each ion [mol/kg or -] zis : list Charges of each ion [-] Returns ------- I : float ionic strength, [?] Examples -------- >>> ionic_strength([0.1393, 0.1393], [1, -1]) 0.1393 References ---------- .. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local Composition Model for Excess Gibbs Energy of Electrolyte Systems. Part I: Single Solvent, Single Completely Dissociated Electrolyte Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96. doi:10.1002/aic.690280410 .. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation. Weinheim, Germany: Wiley-VCH, 2012.
def __compute_evolution( df, id_cols, value_col, date_col=None, freq=1, compare_to=None, method='abs', format='column', offseted_suffix='_offseted', evolution_col_name='evolution_computed', how='left', fillna=None, raise_duplicate_error=True ): """ Compute an evolution column : - against a period distant from a fixed frequency. - against a part of the df Unfortunately, pandas doesn't allow .change() and .pct_change() to be executed with a MultiIndex. Args: df (pd.DataFrame): id_cols (list(str)): value_col (str): date_col (str/dict): default None freq (int/pd.DateOffset/pd.Serie): default 1 compare_to (str): default None method (str): default ``'abs'`` can be also ``'pct'`` format(str): default 'column' can be also 'df' offseted_suffix(str): default '_offseted' evolution_col_name(str): default 'evolution_computed' how(str): default 'left' fillna(str/int): default None """ if date_col is not None: is_date_to_format = isinstance(date_col, dict) or (df[date_col].dtype == np.object) if is_date_to_format: if isinstance(date_col, dict): date_format = date_col.get('format', None) date_col = date_col['selector'] else: date_format = None df['_'+date_col + '_copy_'] = pd.to_datetime(df[date_col], format=date_format) date_col = '_'+date_col + '_copy_' is_freq_dict = isinstance(freq, dict) if is_freq_dict: freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()}) check_params_columns_duplicate(id_cols + [value_col, date_col]) # create df_offseted group_cols = id_cols + [date_col] df_offseted = df[group_cols + [value_col]].copy() df_offseted[date_col] += freq df_with_offseted_values = apply_merge( df, df_offseted, group_cols, how, offseted_suffix, raise_duplicate_error ) if is_date_to_format: del df_with_offseted_values[date_col] elif compare_to is not None: # create df_offseted check_params_columns_duplicate(id_cols + [value_col]) group_cols = id_cols df_offseted = df.query(compare_to).copy() df_offseted = df_offseted[group_cols + [value_col]] df_with_offseted_values = apply_merge( df, df_offseted, group_cols, how, offseted_suffix, raise_duplicate_error ) apply_fillna(df_with_offseted_values, value_col, offseted_suffix, fillna) apply_method(df_with_offseted_values, evolution_col_name, value_col, offseted_suffix, method) return apply_format(df_with_offseted_values, evolution_col_name, format)
Compute an evolution column : - against a period distant from a fixed frequency. - against a part of the df Unfortunately, pandas doesn't allow .change() and .pct_change() to be executed with a MultiIndex. Args: df (pd.DataFrame): id_cols (list(str)): value_col (str): date_col (str/dict): default None freq (int/pd.DateOffset/pd.Serie): default 1 compare_to (str): default None method (str): default ``'abs'`` can be also ``'pct'`` format(str): default 'column' can be also 'df' offseted_suffix(str): default '_offseted' evolution_col_name(str): default 'evolution_computed' how(str): default 'left' fillna(str/int): default None
def license_present(name): ''' Ensures that the specified PowerPath license key is present on the host. name The license key to ensure is present ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not __salt__['powerpath.has_powerpath'](): ret['result'] = False ret['comment'] = 'PowerPath is not installed.' return ret licenses = [l['key'] for l in __salt__['powerpath.list_licenses']()] if name in licenses: ret['result'] = True ret['comment'] = 'License key {0} already present'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'License key {0} is set to be added'.format(name) return ret data = __salt__['powerpath.add_license'](name) if data['result']: ret['changes'] = {name: 'added'} ret['result'] = True ret['comment'] = data['output'] return ret else: ret['result'] = False ret['comment'] = data['output'] return ret
Ensures that the specified PowerPath license key is present on the host. name The license key to ensure is present
def output(self, output, accepts, set_http_code, set_content_type): """ Formats a response from a WSGI app to handle any RDF graphs If a view function returns a single RDF graph, serialize it based on Accept header If it's not an RDF graph, return it without any special handling """ graph = Decorator._get_graph(output) if graph is not None: # decide the format output_mimetype, output_format = self.format_selector.decide(accepts, graph.context_aware) # requested content couldn't find anything if output_mimetype is None: set_http_code("406 Not Acceptable") return ['406 Not Acceptable'.encode('utf-8')] # explicitly mark text mimetypes as utf-8 if 'text' in output_mimetype: output_mimetype = output_mimetype + '; charset=utf-8' # format the new response serialized = graph.serialize(format=output_format) set_content_type(output_mimetype) return [serialized] else: return output
Formats a response from a WSGI app to handle any RDF graphs If a view function returns a single RDF graph, serialize it based on Accept header If it's not an RDF graph, return it without any special handling
def centroid_2dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Invalid values (e.g. NaNs or infs) in the ``data`` or ``error`` arrays are automatically masked. The mask for invalid values represents the combination of the invalid-value masks for the ``data`` and ``error`` arrays. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- centroid : `~numpy.ndarray` The ``x, y`` coordinates of the centroid. """ gfit = fit_2dgaussian(data, error=error, mask=mask) return np.array([gfit.x_mean.value, gfit.y_mean.value])
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Invalid values (e.g. NaNs or infs) in the ``data`` or ``error`` arrays are automatically masked. The mask for invalid values represents the combination of the invalid-value masks for the ``data`` and ``error`` arrays. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- centroid : `~numpy.ndarray` The ``x, y`` coordinates of the centroid.
def statement(self): """ A terminated relational algebra statement. """ return (self.assignment ^ self.expression) + Suppress( self.syntax.terminator)
A terminated relational algebra statement.
def parse_option(self, option, block_name, *values): """ Parse domain values for option. """ _extra_subs = ('www', 'm', 'mobile') if len(values) == 0: # expect some values here.. raise ValueError for value in values: value = value.lower() # if it doesn't look like a protocol, assume http # (e.g. only domain supplied) if not _RE_PROTOCOL.match(value): value = 'http://' + value # did it parse? pull hostname/domain parsed = urlparse.urlparse(value) if parsed: domain = parsed.hostname if domain and _RE_TLD.search(domain): # must have a TLD # doesn't have subdomain, tack on www, m, and mobile # for good measure. note, this check fails for # multi-part TLDs, e.g. .co.uk domain = _RE_WWW_SUB.sub('', domain) # strip "www." if len(domain.split('.')) == 2: for sub in _extra_subs: self.domains.add('{0}.{1}'.format(sub, domain)) self.domains.add(domain) # no domains.. must have failed if not self.domains: raise ValueError
Parse domain values for option.
def state(self, new_state): """Set the state.""" with self.lock: self._state.exit() self._state = new_state self._state.enter()
Set the state.
def list_all_customer_groups(cls, **kwargs): """List CustomerGroups Return a list of CustomerGroups This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customer_groups(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[CustomerGroup] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_customer_groups_with_http_info(**kwargs) else: (data) = cls._list_all_customer_groups_with_http_info(**kwargs) return data
List CustomerGroups Return a list of CustomerGroups This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customer_groups(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[CustomerGroup] If the method is called asynchronously, returns the request thread.
def AddShadow(self, fileset): """Add the shadow entries to the shadow store.""" shadow = fileset.get("/etc/shadow") if shadow: self._ParseFile(shadow, self.ParseShadowEntry) else: logging.debug("No /etc/shadow file.")
Add the shadow entries to the shadow store.
def get_content_slug_by_slug(self, slug): """Returns the latest :class:`Content <pages.models.Content>` slug object that match the given slug for the current site domain. :param slug: the wanted slug. """ content = self.filter(type='slug', body=slug) if settings.PAGE_USE_SITE_ID: content = content.filter(page__sites__id=global_settings.SITE_ID) try: content = content.latest('creation_date') except self.model.DoesNotExist: return None else: return content
Returns the latest :class:`Content <pages.models.Content>` slug object that match the given slug for the current site domain. :param slug: the wanted slug.
def build(self): """ Builds and returns all combinations of parameters specified by the param grid. """ keys = self._param_grid.keys() grid_values = self._param_grid.values() def to_key_value_pairs(keys, values): return [(key, key.typeConverter(value)) for key, value in zip(keys, values)] return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]
Builds and returns all combinations of parameters specified by the param grid.
def luminosity_integral(self, x, axis_ratio): """Routine to integrate the luminosity of an elliptical light profile. The axis ratio is set to 1.0 for computing the luminosity within a circle""" r = x * axis_ratio return 2 * np.pi * r * self.intensities_from_grid_radii(x)
Routine to integrate the luminosity of an elliptical light profile. The axis ratio is set to 1.0 for computing the luminosity within a circle
def insertions_from_masked(seq): """ get coordinates of insertions from insertion-masked sequence """ insertions = [] prev = True for i, base in enumerate(seq): if base.isupper() and prev is True: insertions.append([]) prev = False elif base.islower(): insertions[-1].append(i) prev = True return [[min(i), max(i)] for i in insertions if i != []]
get coordinates of insertions from insertion-masked sequence
def writeCleanup(self, varBind, **context): """Finalize Managed Object Instance modification. Implements the successful third step of the multi-step workflow of the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the third (successful) phase is to seal the new state of the requested Managed Object Instance. Once the system transition into the *cleanup* state, no roll back to the previous Managed Object Instance state is possible. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to set Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object. """ name, val = varBind (debug.logger & debug.FLAG_INS and debug.logger('%s: writeCleanup(%s, %r)' % (self, name, val))) cbFun = context['cbFun'] self.branchVersionId += 1 instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}}) idx = context['idx'] if idx in instances[self.ST_CREATE]: self.createCleanup(varBind, **context) return if idx in instances[self.ST_DESTROY]: self.destroyCleanup(varBind, **context) return try: node = self.getBranch(name, **context) except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc: cbFun(varBind, **dict(context, error=exc)) else: node.writeCleanup(varBind, **context)
Finalize Managed Object Instance modification. Implements the successful third step of the multi-step workflow of the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the third (successful) phase is to seal the new state of the requested Managed Object Instance. Once the system transition into the *cleanup* state, no roll back to the previous Managed Object Instance state is possible. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to set Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object.
def should_stop(self, result): """Whether the given result meets this trial's stopping criteria.""" if result.get(DONE): return True for criteria, stop_value in self.stopping_criterion.items(): if criteria not in result: raise TuneError( "Stopping criteria {} not provided in result {}.".format( criteria, result)) if result[criteria] >= stop_value: return True return False
Whether the given result meets this trial's stopping criteria.
def set_genre(self, genre): """Sets song's genre :param genre: genre """ self._set_attr(TCON(encoding=3, text=str(genre)))
Sets song's genre :param genre: genre
def drop_if(df, fun): """Drops columns where fun(ction) is true Args: fun: a function that will be applied to columns """ def _filter_f(col): try: return fun(df[col]) except: return False cols = list(filter(_filter_f, df.columns)) return df.drop(cols, axis=1)
Drops columns where fun(ction) is true Args: fun: a function that will be applied to columns
def resolve_orm_path(model, orm_path): """ Follows the queryset-style query path of ``orm_path`` starting from ``model`` class. If the path ends up referring to a bad field name, ``django.db.models.fields.FieldDoesNotExist`` will be raised. """ bits = orm_path.split('__') endpoint_model = reduce(get_model_at_related_field, [model] + bits[:-1]) if bits[-1] == 'pk': field = endpoint_model._meta.pk else: field = endpoint_model._meta.get_field(bits[-1]) return field
Follows the queryset-style query path of ``orm_path`` starting from ``model`` class. If the path ends up referring to a bad field name, ``django.db.models.fields.FieldDoesNotExist`` will be raised.
def major_flux(self, fraction=0.9): r"""Returns the main pathway part of the net flux comprising at most the requested fraction of the full flux. """ (paths, pathfluxes) = self.pathways(fraction=fraction) return self._pathways_to_flux(paths, pathfluxes, n=self.nstates)
r"""Returns the main pathway part of the net flux comprising at most the requested fraction of the full flux.
def pprint(self, index=False, delimiter='-'): """Pretty-print the binary tree. :param index: If set to True (default: False), display level-order_ indexes using the format: ``{index}{delimiter}{value}``. :type index: bool :param delimiter: Delimiter character between the node index and the node value (default: '-'). :type delimiter: str | unicode **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) # index: 0, value: 1 >>> root.left = Node(2) # index: 1, value: 2 >>> root.right = Node(3) # index: 2, value: 3 >>> root.left.right = Node(4) # index: 4, value: 4 >>> >>> root.pprint() <BLANKLINE> __1 / \\ 2 3 \\ 4 <BLANKLINE> >>> root.pprint(index=True) # Format: {index}-{value} <BLANKLINE> _____0-1_ / \\ 1-2_ 2-3 \\ 4-4 <BLANKLINE> .. note:: If you do not need level-order_ indexes in the output string, use :func:`binarytree.Node.__str__` instead. .. _level-order: https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search """ lines = _build_tree_string(self, 0, index, delimiter)[0] print('\n' + '\n'.join((line.rstrip() for line in lines)))
Pretty-print the binary tree. :param index: If set to True (default: False), display level-order_ indexes using the format: ``{index}{delimiter}{value}``. :type index: bool :param delimiter: Delimiter character between the node index and the node value (default: '-'). :type delimiter: str | unicode **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) # index: 0, value: 1 >>> root.left = Node(2) # index: 1, value: 2 >>> root.right = Node(3) # index: 2, value: 3 >>> root.left.right = Node(4) # index: 4, value: 4 >>> >>> root.pprint() <BLANKLINE> __1 / \\ 2 3 \\ 4 <BLANKLINE> >>> root.pprint(index=True) # Format: {index}-{value} <BLANKLINE> _____0-1_ / \\ 1-2_ 2-3 \\ 4-4 <BLANKLINE> .. note:: If you do not need level-order_ indexes in the output string, use :func:`binarytree.Node.__str__` instead. .. _level-order: https://en.wikipedia.org/wiki/Tree_traversal#Breadth-first_search
def get(self, id, seq, line): # pylint: disable=invalid-name,redefined-builtin """Get a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param line: Line number in TestResult's logfile as an int. :return: :class:`highlights.Highlight <highlights.Highlight>` object """ schema = HighlightSchema() resp = self.service.get_id(self._base(id, seq), line) return self.service.decode(schema, resp)
Get a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param line: Line number in TestResult's logfile as an int. :return: :class:`highlights.Highlight <highlights.Highlight>` object
def deleteMember(self, address, id, headers=None, query_params=None, content_type="application/json"): """ Delete member from network It is method for DELETE /network/{id}/member/{address} """ uri = self.client.base_url + "/network/"+id+"/member/"+address return self.client.delete(uri, None, headers, query_params, content_type)
Delete member from network It is method for DELETE /network/{id}/member/{address}
def request(key, features, query, timeout=5): """Make an API request :param string key: API key to use :param list features: features to request. It must be a subset of :data:`FEATURES` :param string query: query to send :param integer timeout: timeout of the request :returns: result of the API request :rtype: dict """ data = {} data['key'] = key data['features'] = '/'.join([f for f in features if f in FEATURES]) data['query'] = quote(query) data['format'] = 'json' r = requests.get(API_URL.format(**data), timeout=timeout) results = json.loads(_unicode(r.content)) return results
Make an API request :param string key: API key to use :param list features: features to request. It must be a subset of :data:`FEATURES` :param string query: query to send :param integer timeout: timeout of the request :returns: result of the API request :rtype: dict
def _ztanh(Np: int, gridmin: float, gridmax: float) -> np.ndarray: """ typically call via setupz instead """ x0 = np.linspace(0, 3.14, Np) # arbitrarily picking 3.14 as where tanh gets to 99% of asymptote return np.tanh(x0)*gridmax+gridmin
typically call via setupz instead
def predecesors_pattern(element, root): """ Look for `element` by its predecesors. Args: element (obj): HTMLElement instance of the object you are looking for. root (obj): Root of the `DOM`. Returns: list: ``[PathCall()]`` - list with one :class:`PathCall` object (to \ allow use with ``.extend(predecesors_pattern())``). """ def is_root_container(el): return el.parent.parent.getTagName() == "" if not element.parent or not element.parent.parent or \ is_root_container(element): return [] trail = [ [ element.parent.parent.getTagName(), _params_or_none(element.parent.parent.params) ], [ element.parent.getTagName(), _params_or_none(element.parent.params) ], [element.getTagName(), _params_or_none(element.params)], ] match = root.match(*trail) if element in match: return [ PathCall("match", match.index(element), trail) ]
Look for `element` by its predecesors. Args: element (obj): HTMLElement instance of the object you are looking for. root (obj): Root of the `DOM`. Returns: list: ``[PathCall()]`` - list with one :class:`PathCall` object (to \ allow use with ``.extend(predecesors_pattern())``).
def _append_to_scalar_dict(self, tag, scalar_value, global_step, timestamp): """Adds a list [timestamp, step, value] to the value of `self._scalar_dict[tag]`. This allows users to store scalars in memory and dump them to a json file later.""" if tag not in self._scalar_dict.keys(): self._scalar_dict[tag] = [] self._scalar_dict[tag].append([timestamp, global_step, float(scalar_value)])
Adds a list [timestamp, step, value] to the value of `self._scalar_dict[tag]`. This allows users to store scalars in memory and dump them to a json file later.
def on_mouse_wheel(self, event): '''handle mouse wheel zoom changes''' state = self.state if not state.can_zoom: return mousepos = self.image_coordinates(event.GetPosition()) rotation = event.GetWheelRotation() / event.GetWheelDelta() oldzoom = self.zoom if rotation > 0: self.zoom /= 1.0/(1.1 * rotation) elif rotation < 0: self.zoom /= 1.1 * (-rotation) if self.zoom > 10: self.zoom = 10 elif self.zoom < 0.1: self.zoom = 0.1 if oldzoom < 1 and self.zoom > 1: self.zoom = 1 if oldzoom > 1 and self.zoom < 1: self.zoom = 1 self.need_redraw = True new = self.image_coordinates(event.GetPosition()) # adjust dragpos so the zoom doesn't change what pixel is under the mouse self.dragpos = wx.Point(self.dragpos.x - (new.x-mousepos.x), self.dragpos.y - (new.y-mousepos.y)) self.limit_dragpos()
handle mouse wheel zoom changes
def _bfs(root_node, process_node): """ Implementation of Breadth-first search (BFS) on caffe network DAG :param root_node: root node of caffe network DAG :param process_node: function to run on each node """ from collections import deque seen_nodes = set() next_nodes = deque() seen_nodes.add(root_node) next_nodes.append(root_node) while next_nodes: current_node = next_nodes.popleft() # process current node process_node(current_node) for child_node in current_node.children: if child_node not in seen_nodes: seen_nodes.add(child_node) next_nodes.append(child_node)
Implementation of Breadth-first search (BFS) on caffe network DAG :param root_node: root node of caffe network DAG :param process_node: function to run on each node
def Copier(source, destination): """Factory method to select the right copier for a given source and destination. """ if source.type == 'local' and destination.type == 'local': return LocalCopier(source, destination) elif source.type == 'local' and destination.type == 'google_storage': return Local2GoogleStorageCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'local': return GoogleStorage2LocalCopier(source, destination) elif source.type == 'google_storage' and destination.type == 'google_storage': return GoogleStorageCopier(source, destination) else: raise FileUtilsError('Could not find method to copy from source '\ '"%s" to destination "%s".' % (source, destination))
Factory method to select the right copier for a given source and destination.
def handle(self): """Handles start request.""" # Mapper spec as form arguments. mapreduce_name = self._get_required_param("name") mapper_input_reader_spec = self._get_required_param("mapper_input_reader") mapper_handler_spec = self._get_required_param("mapper_handler") mapper_output_writer_spec = self.request.get("mapper_output_writer") mapper_params = self._get_params( "mapper_params_validator", "mapper_params.") params = self._get_params( "params_validator", "params.") # Default values. mr_params = map_job.JobConfig._get_default_mr_params() mr_params.update(params) if "queue_name" in mapper_params: mr_params["queue_name"] = mapper_params["queue_name"] # Set some mapper param defaults if not present. mapper_params["processing_rate"] = int(mapper_params.get( "processing_rate") or parameters.config.PROCESSING_RATE_PER_SEC) # Validate the Mapper spec, handler, and input reader. mapper_spec = model.MapperSpec( mapper_handler_spec, mapper_input_reader_spec, mapper_params, int(mapper_params.get("shard_count", parameters.config.SHARD_COUNT)), output_writer_spec=mapper_output_writer_spec) mapreduce_id = self._start_map( mapreduce_name, mapper_spec, mr_params, queue_name=mr_params["queue_name"], _app=mapper_params.get("_app")) self.json_response["mapreduce_id"] = mapreduce_id
Handles start request.
def download_photo_async(photo): """Download a photo to the the path(global varialbe `directory`) :param photo: The photo information include id and title :type photo: dict """ photo_id = photo['id'] photo_title = photo['title'] download_url = get_photo_url(photo_id) photo_format = download_url.split('.')[-1] photo_title = photo_title + '.' + photo_format file_path = directory + os.sep + photo_title logger.info('Download %s...', photo_title.encode('utf-8')) req = [grequests.get(download_url)] counter_lock = multiprocessing.Lock() for resp in grequests.map(req): with open(file_path, 'w') as f: f.write(resp.content) with counter_lock: global counter counter -= 1 logger.info( 'The number of pictures remaining: %s', counter )
Download a photo to the the path(global varialbe `directory`) :param photo: The photo information include id and title :type photo: dict
def project_data(self): ''' Assign the sum of ``.integral``\* to each sensible point in the ``pyny.Space`` for the intervals that the points are visible to the Sun. The generated information is stored in: * **.proj_vor** (*ndarray*): ``.integral`` projected to the Voronoi diagram. * **.proj_points** (*ndarray*): ``.integral`` projected to the sensible points in the ``pyny.Space``. :returns: None .. note:: \* Trapezoidal data (``.arg_data``) integration over time (``.arg_t``). ''' from pyny3d.utils import sort_numpy proj = self.light_vor.astype(float) map_ = np.vstack((self.t2vor_map, self.integral)).T map_sorted = sort_numpy(map_) n_points = map_sorted.shape[0] for i in range(proj.shape[0]): a, b = np.searchsorted(map_sorted[:, 0], (i, i+1)) if b == n_points: b = -1 proj[i, :] *= np.sum(map_sorted[a:b, 1]) self.proj_vor = np.sum(proj, axis=1) self.proj_points = np.sum(proj, axis=0)
Assign the sum of ``.integral``\* to each sensible point in the ``pyny.Space`` for the intervals that the points are visible to the Sun. The generated information is stored in: * **.proj_vor** (*ndarray*): ``.integral`` projected to the Voronoi diagram. * **.proj_points** (*ndarray*): ``.integral`` projected to the sensible points in the ``pyny.Space``. :returns: None .. note:: \* Trapezoidal data (``.arg_data``) integration over time (``.arg_t``).
def ignore_path(path, ignore_list=None, whitelist=None): """ Returns a boolean indicating if a path should be ignored given an ignore_list and a whitelist of glob patterns. """ if ignore_list is None: return True should_ignore = matches_glob_list(path, ignore_list) if whitelist is None: return should_ignore return should_ignore and not matches_glob_list(path, whitelist)
Returns a boolean indicating if a path should be ignored given an ignore_list and a whitelist of glob patterns.
def register(self, reg_data, retry=True, interval=1, timeout=3): """ register function retry True, infinity retries False, no retries Number, retries times interval time period for retry return False if no success Tunnel if success """ if len(reg_data["resources"]) == 0: _logger.debug("%s no need to register due to no resources" % (reg_data["name"])) return def _register(): try: resp = self.publish.direct.post( "/controller/registration", reg_data) if resp.code == 200: return resp except TimeoutError: _logger.debug("Register message is timeout") return False resp = _register() while resp is False: _logger.debug("Register failed.") self.deregister(reg_data) resp = _register() if resp is None: _logger.error("Can\'t not register to controller") self.stop() return False self._conn.set_tunnel( reg_data["role"], resp.data["tunnel"], self.on_sanji_message) self.bundle.profile["currentTunnels"] = [ tunnel for tunnel, callback in self._conn.tunnels.items()] self.bundle.profile["regCount"] = \ self.bundle.profile.get("reg_count", 0) + 1 _logger.debug("Register successfully %s tunnel: %s" % (reg_data["name"], resp.data["tunnel"],))
register function retry True, infinity retries False, no retries Number, retries times interval time period for retry return False if no success Tunnel if success
def _split_python(python): """Split Python source into chunks. Chunks are separated by at least two return lines. The break must not be followed by a space. Also, long Python strings spanning several lines are not splitted. """ python = _preprocess(python) if not python: return [] lexer = PythonSplitLexer() lexer.read(python) return lexer.chunks
Split Python source into chunks. Chunks are separated by at least two return lines. The break must not be followed by a space. Also, long Python strings spanning several lines are not splitted.
def info(gandi, resource, id, value): """Display information about an SSH key. Resource can be a name or an ID """ output_keys = ['name', 'fingerprint'] if id: output_keys.append('id') if value: output_keys.append('value') ret = [] for item in resource: sshkey = gandi.sshkey.info(item) ret.append(output_sshkey(gandi, sshkey, output_keys)) return ret
Display information about an SSH key. Resource can be a name or an ID
def run(self, messages): """Determine if a student is elgible to recieve a hint. Based on their state, poses reflection questions. After more attempts, ask if students would like hints. If so, query the server. """ if self.args.local: return # Only run hinting protocol on supported assignments. if self.assignment.endpoint not in self.SUPPORTED_ASSIGNMENTS: message = "{0} does not support hinting".format(self.assignment.endpoint) log.info(message) if self.args.hint: print(message) return if 'analytics' not in messages: log.info('Analytics Protocol is required for hint generation') return if 'file_contents' not in messages: log.info('File Contents needed to generate hints') return if self.args.no_experiments: messages['hinting'] = {'disabled': 'user'} return messages['hinting'] = {} history = messages['analytics'].get('history', {}) questions = history.get('questions', []) current_q = history.get('question', {}) messages['hinting']['flagged'] = self.args.hint for question in current_q: if question not in questions: continue stats = questions[question] is_solved = stats['solved'] == True messages['hinting'][question] = {'prompts': {}, 'reflection': {}} hint_info = messages['hinting'][question] # Determine a users elgibility for a prompt # If the user just solved this question, provide a reflection prompt if is_solved: hint_info['elgible'] = False hint_info['disabled'] = 'solved' if self.args.hint: print("This question has already been solved.") continue elif stats['attempts'] < self.SMALL_EFFORT: log.info("Question %s is not elgible: Attempts: %s, Solved: %s", question, stats['attempts'], is_solved) hint_info['elgible'] = False if self.args.hint: hint_info['disabled'] = 'attempt-count' print("You need to make a few more attempts before the hint system is enabled") continue else: # Only prompt every WAIT_ATTEMPTS attempts to avoid annoying user if stats['attempts'] % self.WAIT_ATTEMPTS != 0: hint_info['disabled'] = 'timer' hint_info['elgible'] = False log.info('Waiting for %d more attempts before prompting', stats['attempts'] % self.WAIT_ATTEMPTS) else: hint_info['elgible'] = not is_solved if not self.args.hint: if hint_info['elgible']: with format.block("-"): print("To get hints, try using python3 ok --hint -q {}".format(question)) hint_info['suggested'] = True continue hint_info['accept'] = True with format.block("-"): print(("Thinking of a hint for {}".format(question) + "... (This could take up to 30 seconds)")) pre_hint = random.choice(PRE_HINT_MESSAGES) print("In the meantime, consider: \n{}".format(pre_hint)) hint_info['pre-prompt'] = pre_hint log.info('Prompting for hint on %s', question) try: response = self.query_server(messages, question) except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError): log.debug("Network error while fetching hint", exc_info=True) hint_info['fetch_error'] = True print("\r\nNetwork Error while generating hint. Try again later") response = None continue if response: hint_info['response'] = response hint = response.get('message') pre_prompt = response.get('pre-prompt') post_prompt = response.get('post-prompt') system_error = response.get('system-error') log.info("Hint server response: {}".format(response)) if not hint: if system_error: print("{}".format(system_error)) else: print("Sorry. No hints found for the current code. Try again making after some changes") continue # Provide padding for the the hint print("\n{}".format(hint.rstrip())) if post_prompt: results['prompts'][query] = prompt.explanation_msg(post_prompt)
Determine if a student is elgible to recieve a hint. Based on their state, poses reflection questions. After more attempts, ask if students would like hints. If so, query the server.
def rows_max(self, size=None, focus=False): """Return the number of rows for `size` If `size` is not given, the currently rendered number of rows is returned. """ if size is not None: ow = self._original_widget ow_size = self._get_original_widget_size(size) sizing = ow.sizing() if FIXED in sizing: self._rows_max_cached = ow.pack(ow_size, focus)[1] elif FLOW in sizing: self._rows_max_cached = ow.rows(ow_size, focus) else: raise RuntimeError('Not a flow/box widget: %r' % self._original_widget) return self._rows_max_cached
Return the number of rows for `size` If `size` is not given, the currently rendered number of rows is returned.
def legal_status(CASRN, Method=None, AvailableMethods=False, CASi=None): r'''Looks up the legal status of a chemical according to either a specifc method or with all methods. Returns either the status as a string for a specified method, or the status of the chemical in all available data sources, in the format {source: status}. Parameters ---------- CASRN : string CASRN [-] Returns ------- status : str or dict Legal status information [-] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain legal status with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in legal_status_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain the legal status for the desired chemical, and will return methods instead of the status CASi : int, optional CASRN as an integer, used internally [-] Notes ----- Supported methods are: * **DSL**: Canada Domestic Substance List, [1]_. As extracted on Feb 11, 2015 from a html list. This list is updated continuously, so this version will always be somewhat old. Strictly speaking, there are multiple lists but they are all bundled together here. A chemical may be 'Listed', or be on the 'Non-Domestic Substances List (NDSL)', or be on the list of substances with 'Significant New Activity (SNAc)', or be on the DSL but with a 'Ministerial Condition pertaining to this substance', or have been removed from the DSL, or have had a Ministerial prohibition for the substance. * **TSCA**: USA EPA Toxic Substances Control Act Chemical Inventory, [2]_. This list is as extracted on 2016-01. It is believed this list is updated on a periodic basis (> 6 month). A chemical may simply be 'Listed', or may have certain flags attached to it. All these flags are described in the dict TSCA_flags. * **EINECS**: European INventory of Existing Commercial chemical Substances, [3]_. As extracted from a spreadsheet dynamically generated at [1]_. This list was obtained March 2015; a more recent revision already exists. * **NLP**: No Longer Polymers, a list of chemicals with special regulatory exemptions in EINECS. Also described at [3]_. * **SPIN**: Substances Prepared in Nordic Countries. Also a boolean data type. Retrieved 2015-03 from [4]_. Other methods which could be added are: * Australia: AICS Australian Inventory of Chemical Substances * China: Inventory of Existing Chemical Substances Produced or Imported in China (IECSC) * Europe: REACH List of Registered Substances * India: List of Hazardous Chemicals * Japan: ENCS: Inventory of existing and new chemical substances * Korea: Existing Chemicals Inventory (KECI) * Mexico: INSQ National Inventory of Chemical Substances in Mexico * New Zealand: Inventory of Chemicals (NZIoC) * Philippines: PICCS Philippines Inventory of Chemicals and Chemical Substances Examples -------- >>> pprint(legal_status('64-17-5')) {'DSL': 'LISTED', 'EINECS': 'LISTED', 'NLP': 'UNLISTED', 'SPIN': 'LISTED', 'TSCA': 'LISTED'} References ---------- .. [1] Government of Canada.. "Substances Lists" Feb 11, 2015. https://www.ec.gc.ca/subsnouvelles-newsubs/default.asp?n=47F768FE-1. .. [2] US EPA. "TSCA Chemical Substance Inventory." Accessed April 2016. https://www.epa.gov/tsca-inventory. .. [3] ECHA. "EC Inventory". Accessed March 2015. http://echa.europa.eu/information-on-chemicals/ec-inventory. .. [4] SPIN. "SPIN Substances in Products In Nordic Countries." Accessed March 2015. http://195.215.202.233/DotNetNuke/default.aspx. ''' load_law_data() if not CASi: CASi = CAS2int(CASRN) methods = [COMBINED, DSL, TSCA, EINECS, NLP, SPIN] if AvailableMethods: return methods if not Method: Method = methods[0] if Method == DSL: if CASi in DSL_data.index: status = CAN_DSL_flags[DSL_data.at[CASi, 'Registry']] else: status = UNLISTED elif Method == TSCA: if CASi in TSCA_data.index: data = TSCA_data.loc[CASi].to_dict() if any(data.values()): status = sorted([TSCA_flags[i] for i in data.keys() if data[i]]) else: status = LISTED else: status = UNLISTED elif Method == EINECS: if CASi in EINECS_data.index: status = LISTED else: status = UNLISTED elif Method == NLP: if CASi in NLP_data.index: status = LISTED else: status = UNLISTED elif Method == SPIN: if CASi in SPIN_data.index: status = LISTED else: status = UNLISTED elif Method == COMBINED: status = {} for method in methods[1:]: status[method] = legal_status(CASRN, Method=method, CASi=CASi) else: raise Exception('Failure in in function') return status
r'''Looks up the legal status of a chemical according to either a specifc method or with all methods. Returns either the status as a string for a specified method, or the status of the chemical in all available data sources, in the format {source: status}. Parameters ---------- CASRN : string CASRN [-] Returns ------- status : str or dict Legal status information [-] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain legal status with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in legal_status_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain the legal status for the desired chemical, and will return methods instead of the status CASi : int, optional CASRN as an integer, used internally [-] Notes ----- Supported methods are: * **DSL**: Canada Domestic Substance List, [1]_. As extracted on Feb 11, 2015 from a html list. This list is updated continuously, so this version will always be somewhat old. Strictly speaking, there are multiple lists but they are all bundled together here. A chemical may be 'Listed', or be on the 'Non-Domestic Substances List (NDSL)', or be on the list of substances with 'Significant New Activity (SNAc)', or be on the DSL but with a 'Ministerial Condition pertaining to this substance', or have been removed from the DSL, or have had a Ministerial prohibition for the substance. * **TSCA**: USA EPA Toxic Substances Control Act Chemical Inventory, [2]_. This list is as extracted on 2016-01. It is believed this list is updated on a periodic basis (> 6 month). A chemical may simply be 'Listed', or may have certain flags attached to it. All these flags are described in the dict TSCA_flags. * **EINECS**: European INventory of Existing Commercial chemical Substances, [3]_. As extracted from a spreadsheet dynamically generated at [1]_. This list was obtained March 2015; a more recent revision already exists. * **NLP**: No Longer Polymers, a list of chemicals with special regulatory exemptions in EINECS. Also described at [3]_. * **SPIN**: Substances Prepared in Nordic Countries. Also a boolean data type. Retrieved 2015-03 from [4]_. Other methods which could be added are: * Australia: AICS Australian Inventory of Chemical Substances * China: Inventory of Existing Chemical Substances Produced or Imported in China (IECSC) * Europe: REACH List of Registered Substances * India: List of Hazardous Chemicals * Japan: ENCS: Inventory of existing and new chemical substances * Korea: Existing Chemicals Inventory (KECI) * Mexico: INSQ National Inventory of Chemical Substances in Mexico * New Zealand: Inventory of Chemicals (NZIoC) * Philippines: PICCS Philippines Inventory of Chemicals and Chemical Substances Examples -------- >>> pprint(legal_status('64-17-5')) {'DSL': 'LISTED', 'EINECS': 'LISTED', 'NLP': 'UNLISTED', 'SPIN': 'LISTED', 'TSCA': 'LISTED'} References ---------- .. [1] Government of Canada.. "Substances Lists" Feb 11, 2015. https://www.ec.gc.ca/subsnouvelles-newsubs/default.asp?n=47F768FE-1. .. [2] US EPA. "TSCA Chemical Substance Inventory." Accessed April 2016. https://www.epa.gov/tsca-inventory. .. [3] ECHA. "EC Inventory". Accessed March 2015. http://echa.europa.eu/information-on-chemicals/ec-inventory. .. [4] SPIN. "SPIN Substances in Products In Nordic Countries." Accessed March 2015. http://195.215.202.233/DotNetNuke/default.aspx.
def share_matrix(locifile, tree=None, nameorder=None): """ returns a matrix of shared RAD-seq data Parameters: ----------- locifile (str): Path to a ipyrad .loci file. tree (str): Path to Newick file or a Newick string representation of a tree. If used, names will be ordered by the ladderized tip order. nameorder (list): If a tree is not provided you can alternatively enter the sample order as a list here. The tree argument will override this argument. Returns: -------- matrix (numpy.array): A uint64 numpy array of the number of shared loci between all pairs of samples. """ ## load in the loci data with open(locifile, 'r') as locidata: loci = locidata.read().split("|\n")[:-1] ## load in the tree from a string if tree: tree = ete.Tree(tree) tree.ladderize() snames = tree.get_leaf_names() lxs, names = _getarray(loci, snames) elif nameorder: lxs, names = _getarray(loci, nameorder) else: raise IOError("must provide either tree or nameorder argument") ## get share matrix share = _countmatrix(lxs) return share
returns a matrix of shared RAD-seq data Parameters: ----------- locifile (str): Path to a ipyrad .loci file. tree (str): Path to Newick file or a Newick string representation of a tree. If used, names will be ordered by the ladderized tip order. nameorder (list): If a tree is not provided you can alternatively enter the sample order as a list here. The tree argument will override this argument. Returns: -------- matrix (numpy.array): A uint64 numpy array of the number of shared loci between all pairs of samples.
async def add_unknown_id(self, unknown_id, timeout=OTGW_DEFAULT_TIMEOUT): """ Inform the gateway that the boiler doesn't support the specified Data-ID, even if the boiler doesn't indicate that by returning an Unknown-DataId response. Using this command allows the gateway to send an alternative Data-ID to the boiler instead. Return the added ID, or None on failure. This method is a coroutine """ cmd = OTGW_CMD_UNKNOWN_ID unknown_id = int(unknown_id) if unknown_id < 1 or unknown_id > 255: return None ret = await self._wait_for_cmd(cmd, unknown_id, timeout) if ret is not None: return int(ret)
Inform the gateway that the boiler doesn't support the specified Data-ID, even if the boiler doesn't indicate that by returning an Unknown-DataId response. Using this command allows the gateway to send an alternative Data-ID to the boiler instead. Return the added ID, or None on failure. This method is a coroutine
def get_repos(self, visibility=github.GithubObject.NotSet, affiliation=github.GithubObject.NotSet, type=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet): """ :calls: `GET /user/repos <http://developer.github.com/v3/repos>` :param visibility: string :param affiliation: string :param type: string :param sort: string :param direction: string :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository` """ assert visibility is github.GithubObject.NotSet or isinstance(visibility, (str, unicode)), visibility assert affiliation is github.GithubObject.NotSet or isinstance(affiliation, (str, unicode)), affiliation assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction url_parameters = dict() if visibility is not github.GithubObject.NotSet: url_parameters["visibility"] = visibility if affiliation is not github.GithubObject.NotSet: url_parameters["affiliation"] = affiliation if type is not github.GithubObject.NotSet: url_parameters["type"] = type if sort is not github.GithubObject.NotSet: url_parameters["sort"] = sort if direction is not github.GithubObject.NotSet: url_parameters["direction"] = direction return github.PaginatedList.PaginatedList( github.Repository.Repository, self._requester, "/user/repos", url_parameters )
:calls: `GET /user/repos <http://developer.github.com/v3/repos>` :param visibility: string :param affiliation: string :param type: string :param sort: string :param direction: string :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
def create(self, _attributes=None, **attributes): """ Create a new instance of the related model. :param attributes: The attributes :type attributes: dict :rtype: Model """ if _attributes is not None: attributes.update(_attributes) instance = self._related.new_instance(attributes) instance.set_attribute(self.get_plain_foreign_key(), self.get_parent_key()) instance.save() return instance
Create a new instance of the related model. :param attributes: The attributes :type attributes: dict :rtype: Model
def find_usage(self): """ Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`. """ logger.debug("Checking usage for service %s", self.service_name) self.connect() for lim in self.limits.values(): lim._reset_usage() self._find_usage_nodes() self._find_usage_subnet_groups() self._find_usage_parameter_groups() self._find_usage_security_groups() self._have_usage = True logger.debug("Done checking usage.")
Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`.
def post_loader(*decorator_args, serializer): """ Decorator to automatically instantiate a model from json request data :param serializer: The ModelSerializer to use to load data from the request """ def wrapped(fn): @wraps(fn) def decorated(*args, **kwargs): return fn(*serializer.load(request.get_json())) return decorated if decorator_args and callable(decorator_args[0]): return wrapped(decorator_args[0]) return wrapped
Decorator to automatically instantiate a model from json request data :param serializer: The ModelSerializer to use to load data from the request
def undeploy_lambda_alb(self, lambda_name): """ The `zappa undeploy` functionality for ALB infrastructure. """ print("Undeploying ALB infrastructure...") # Locate and delete alb/lambda permissions try: # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission self.lambda_client.remove_permission( FunctionName=lambda_name, StatementId=lambda_name ) except botocore.exceptions.ClientError as e: # pragma: no cover if "ResourceNotFoundException" in e.response["Error"]["Code"]: pass else: raise e # Locate and delete load balancer try: # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers response = self.elbv2_client.describe_load_balancers( Names=[lambda_name] ) if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1: raise EnvironmentError("Failure to locate/delete ALB named [{}]. Response was: {}".format(lambda_name, repr(response))) load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"] # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners response = self.elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn) if not(response["Listeners"]): print('No listeners found.') elif len(response["Listeners"]) > 1: raise EnvironmentError("Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(lambda_name, repr(response))) else: listener_arn = response["Listeners"][0]["ListenerArn"] # Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups. # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener response = self.elbv2_client.delete_listener(ListenerArn=listener_arn) # Remove the load balancer and wait for completion # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer response = self.elbv2_client.delete_load_balancer(LoadBalancerArn=load_balancer_arn) waiter = self.elbv2_client.get_waiter('load_balancers_deleted') print('Waiting for load balancer [{}] to be deleted..'.format(lambda_name)) waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3}) except botocore.exceptions.ClientError as e: # pragma: no cover print(e.response["Error"]["Code"]) if "LoadBalancerNotFound" in e.response["Error"]["Code"]: pass else: raise e # Locate and delete target group try: # Locate the lambda ARN # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function response = self.lambda_client.get_function(FunctionName=lambda_name) lambda_arn = response["Configuration"]["FunctionArn"] # Locate the target group ARN # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups response = self.elbv2_client.describe_target_groups(Names=[lambda_name]) if not(response["TargetGroups"]) or len(response["TargetGroups"]) > 1: raise EnvironmentError("Failure to locate/delete ALB target group named [{}]. Response was: {}".format(lambda_name, repr(response))) target_group_arn = response["TargetGroups"][0]["TargetGroupArn"] # Deregister targets and wait for completion self.elbv2_client.deregister_targets( TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}] ) waiter = self.elbv2_client.get_waiter('target_deregistered') print('Waiting for target [{}] to be deregistered...'.format(lambda_name)) waiter.wait( TargetGroupArn=target_group_arn, Targets=[{"Id": lambda_arn}], WaiterConfig={"Delay": 3} ) # Remove the target group # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn) except botocore.exceptions.ClientError as e: # pragma: no cover print(e.response["Error"]["Code"]) if "TargetGroupNotFound" in e.response["Error"]["Code"]: pass else: raise e
The `zappa undeploy` functionality for ALB infrastructure.
def purge(self, session, checksum): ''' Deletes calc entry by checksum entirely from the database NB source files on disk are not deleted NB: this is the PUBLIC method @returns error ''' C = session.query(model.Calculation).get(checksum) if not C: return 'Calculation does not exist!' # dataset deletion includes editing the whole dataset hierarchical tree (if any) if C.siblings_count: C_meta = session.query(model.Metadata).get(checksum) higher_lookup = {} more = C.parent distance = 0 while True: distance += 1 higher, more = more, [] if not higher: break for item in higher: try: higher_lookup[distance].add(item) except KeyError: higher_lookup[distance] = set([item]) if item.parent: more += item.parent for distance, members in higher_lookup.items(): for member in members: if distance == 1: member.siblings_count -= 1 if not member.siblings_count: return 'The parent dataset contains only one (current) item, please, delete parent dataset first!' member.meta_data.download_size -= C_meta.download_size session.add(member) # low-level entry deletion deals with additional tables else: session.execute( model.delete( model.Spectra ).where( model.Spectra.checksum == checksum) ) session.execute( model.delete( model.Electrons ).where( model.Electrons.checksum == checksum ) ) session.execute( model.delete( model.Phonons ).where( model.Phonons.checksum == checksum ) ) session.execute( model.delete( model.Recipinteg ).where( model.Recipinteg.checksum == checksum ) ) session.execute( model.delete( model.Basis ).where( model.Basis.checksum == checksum ) ) session.execute( model.delete( model.Energy ).where( model.Energy.checksum == checksum ) ) session.execute( model.delete( model.Spacegroup ).where( model.Spacegroup.checksum == checksum ) ) session.execute( model.delete( model.Struct_ratios ).where( model.Struct_ratios.checksum == checksum ) ) session.execute( model.delete( model.Struct_optimisation ).where( model.Struct_optimisation.checksum == checksum ) ) struct_ids = [ int(i[0]) for i in session.query(model.Structure.struct_id).filter(model.Structure.checksum == checksum).all() ] for struct_id in struct_ids: session.execute( model.delete( model.Atom ).where( model.Atom.struct_id == struct_id ) ) session.execute( model.delete( model.Lattice ).where( model.Lattice.struct_id == struct_id ) ) session.execute( model.delete( model.Structure ).where( model.Structure.checksum == checksum ) ) # for all types of entries if len(C.references): left_references = [ int(i[0]) for i in session.query(model.Reference.reference_id).join(model.metadata_references, model.Reference.reference_id == model.metadata_references.c.reference_id).filter(model.metadata_references.c.checksum == checksum).all() ] session.execute( model.delete( model.metadata_references ).where( model.metadata_references.c.checksum == checksum ) ) # remove the whole citation? for lc in left_references: if not (session.query(model.metadata_references.c.checksum).filter(model.metadata_references.c.reference_id == lc).count()): session.execute( model.delete( model.Reference ).where(model.Reference.reference_id == lc) ) # TODO rewrite with cascading session.execute( model.delete( model.Metadata ).where( model.Metadata.checksum == checksum ) ) session.execute( model.delete( model.Grid ).where( model.Grid.checksum == checksum ) ) session.execute( model.delete( model.tags ).where( model.tags.c.checksum == checksum ) ) session.execute( model.delete( model.calcsets ).where( model.calcsets.c.children_checksum == checksum ) ) session.execute( model.delete( model.calcsets ).where( model.calcsets.c.parent_checksum == checksum ) ) session.execute( model.delete( model.Calculation ).where( model.Calculation.checksum == checksum ) ) session.commit() # NB tables topics, codefamily, codeversion, pottype are mostly irrelevant and, if needed, should be cleaned manually return False
Deletes calc entry by checksum entirely from the database NB source files on disk are not deleted NB: this is the PUBLIC method @returns error
def __convert_key(expression): """Converts keys in YAML that reference other keys. """ if type(expression) is str and len(expression) > 2 and expression[1] == '!': expression = eval(expression[2:-1]) return expression
Converts keys in YAML that reference other keys.
def request(self, request): """ Perform an HTTP request through the context Args: request: A v20.request.Request object Returns: A v20.response.Response object """ url = "{}{}".format(self._base_url, request.path) timeout = self.poll_timeout if request.stream is True: timeout = self.stream_timeout try: http_response = self._session.request( request.method, url, headers=self._headers, params=request.params, data=request.body, stream=request.stream, timeout=timeout ) except requests.exceptions.ConnectionError: raise V20ConnectionError(url) except requests.exceptions.ConnectTimeout: raise V20Timeout(url, "connect") except requests.exceptions.ReadTimeout: raise V20Timeout(url, "read") request.headers = http_response.request.headers response = Response( request, request.method, http_response.url, http_response.status_code, http_response.reason, http_response.headers ) if request.stream: response.set_line_parser( request.line_parser ) response.set_lines( http_response.iter_lines( self.stream_chunk_size ) ) else: response.set_raw_body(http_response.text) return response
Perform an HTTP request through the context Args: request: A v20.request.Request object Returns: A v20.response.Response object
def qorts_general_stats (self): """ Add columns to the General Statistics table """ headers = OrderedDict() headers['Genes_PercentWithNonzeroCounts'] = { 'title': '% Genes with Counts', 'description': 'Percent of Genes with Non-Zero Counts', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['NumberOfChromosomesCovered'] = { 'title': 'Chrs Covered', 'description': 'Number of Chromosomes Covered', 'format': '{:,.0f}' } self.general_stats_addcols(self.qorts_data, headers)
Add columns to the General Statistics table
def sql(self): """If you access this attribute, we will build an SQLite database out of the FASTA file and you will be able access everything in an indexed fashion, and use the blaze library via sql.frame""" from fasta.indexed import DatabaseFASTA, fasta_to_sql db = DatabaseFASTA(self.prefix_path + ".db") if not db.exists: fasta_to_sql(self.path, db.path) return db
If you access this attribute, we will build an SQLite database out of the FASTA file and you will be able access everything in an indexed fashion, and use the blaze library via sql.frame
def GET_AUTH(self, courseid, aggregationid=''): # pylint: disable=arguments-differ """ Edit a aggregation """ course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=True) if course.is_lti(): raise web.notfound() return self.display_page(course, aggregationid)
Edit a aggregation
def create_from_str(name_and_zone: str): """ Factory method for creating a user from a string in the form `name#zone`. :param name_and_zone: the user's name followed by hash followed by the user's zone :return: the created user """ if _NAME_ZONE_SEGREGATOR not in name_and_zone: raise ValueError("User's zone not set") name, zone = name_and_zone.split(_NAME_ZONE_SEGREGATOR) if len(name) == 0: raise ValueError("User's name cannot be blank") if len(zone) == 0: raise ValueError("User's zone cannot be blank") return User(name, zone)
Factory method for creating a user from a string in the form `name#zone`. :param name_and_zone: the user's name followed by hash followed by the user's zone :return: the created user
def _versioned_lib_name(env, libnode, version, prefix, suffix, prefix_generator, suffix_generator, **kw): """For libnode='/optional/dir/libfoo.so.X.Y.Z' it returns 'libfoo.so'""" Verbose = False if Verbose: print("_versioned_lib_name: libnode={:r}".format(libnode.get_path())) print("_versioned_lib_name: version={:r}".format(version)) print("_versioned_lib_name: prefix={:r}".format(prefix)) print("_versioned_lib_name: suffix={:r}".format(suffix)) print("_versioned_lib_name: suffix_generator={:r}".format(suffix_generator)) versioned_name = os.path.basename(libnode.get_path()) if Verbose: print("_versioned_lib_name: versioned_name={:r}".format(versioned_name)) versioned_prefix = prefix_generator(env, **kw) versioned_suffix = suffix_generator(env, **kw) if Verbose: print("_versioned_lib_name: versioned_prefix={:r}".format(versioned_prefix)) print("_versioned_lib_name: versioned_suffix={:r}".format(versioned_suffix)) versioned_prefix_re = '^' + re.escape(versioned_prefix) versioned_suffix_re = re.escape(versioned_suffix) + '$' name = re.sub(versioned_prefix_re, prefix, versioned_name) name = re.sub(versioned_suffix_re, suffix, name) if Verbose: print("_versioned_lib_name: name={:r}".format(name)) return name
For libnode='/optional/dir/libfoo.so.X.Y.Z' it returns 'libfoo.so
def split_by_files(self, valid_names:'ItemList')->'ItemLists': "Split the data by using the names in `valid_names` for validation." if isinstance(self.items[0], Path): return self.split_by_valid_func(lambda o: o.name in valid_names) else: return self.split_by_valid_func(lambda o: os.path.basename(o) in valid_names)
Split the data by using the names in `valid_names` for validation.
def scheme_specification(cls): """ :meth:`.WSchemeHandler.scheme_specification` method implementation """ return WSchemeSpecification( 'file', WURIComponentVerifier(WURI.Component.path, WURIComponentVerifier.Requirement.optional) )
:meth:`.WSchemeHandler.scheme_specification` method implementation
def run_script(self, filename, start_opts=None, globals_=None, locals_=None): """ Run debugger on Python script `filename'. The script may inspect sys.argv for command arguments. `globals_' and `locals_' are the dictionaries to use for local and global variables. If `globals' is not given, globals() (the current global variables) is used. If `locals_' is not given, it becomes a copy of `globals_'. True is returned if the program terminated normally and False if the debugger initiated a quit or the program did not normally terminate. See also `run_call' if what you to debug a function call, `run_eval' if you want to debug an expression, and `run' if you want to debug general Python statements not inside a file. """ self.mainpyfile = self.core.canonic(filename) # Start with fresh empty copy of globals and locals and tell the script # that it's being run as __main__ to avoid scripts being able to access # the debugger namespace. if globals_ is None: import __main__ # NOQA globals_ = {"__name__" : "__main__", "__file__" : self.mainpyfile, "__builtins__" : __builtins__ } # NOQA if locals_ is None: locals_ = globals_ retval = False self.core.execution_status = 'Running' try: compiled = compile(open(self.mainpyfile).read(), self.mainpyfile, 'exec') self.core.start(start_opts) exec(compiled, globals_, locals_) retval = True except SyntaxError: print(sys.exc_info()[1]) retval = False pass except IOError: print(sys.exc_info()[1]) except DebuggerQuit: retval = False pass except DebuggerRestart: self.core.execution_status = 'Restart requested' raise DebuggerRestart finally: self.core.stop(options={'remove': True}) return retval
Run debugger on Python script `filename'. The script may inspect sys.argv for command arguments. `globals_' and `locals_' are the dictionaries to use for local and global variables. If `globals' is not given, globals() (the current global variables) is used. If `locals_' is not given, it becomes a copy of `globals_'. True is returned if the program terminated normally and False if the debugger initiated a quit or the program did not normally terminate. See also `run_call' if what you to debug a function call, `run_eval' if you want to debug an expression, and `run' if you want to debug general Python statements not inside a file.
def merge_requests_data_to(to, food={}): """Merge a small analyzed result to a big one, this function will modify the original ``to``""" if not to: to.update(food) to['requests_counter']['normal'] += food['requests_counter']['normal'] to['requests_counter']['slow'] += food['requests_counter']['slow'] to['total_slow_duration'] += food['total_slow_duration'] for group_name, urls in food['data_details'].items(): if group_name not in to['data_details']: to['data_details'][group_name] = urls else: to_urls = to['data_details'][group_name] to_urls['duration_agr_data'] = to_urls['duration_agr_data'].merge_with( urls['duration_agr_data']) # Merge urls data merge_urls_data_to(to_urls['urls'], urls['urls'])
Merge a small analyzed result to a big one, this function will modify the original ``to``
def from_raw(self, raw: RawScalar) -> Optional[bytes]: """Override superclass method.""" try: return base64.b64decode(raw, validate=True) except TypeError: return None
Override superclass method.
def get_out_ip_addr(cls, tenant_id): """Retrieves the 'out' service subnet attributes. """ if tenant_id not in cls.serv_obj_dict: LOG.error("Fabric not prepared for tenant %s", tenant_id) return tenant_obj = cls.serv_obj_dict.get(tenant_id) return tenant_obj.get_out_ip_addr()
Retrieves the 'out' service subnet attributes.
def __add_text(self, text): """Adds the given Unicode text as a locally defined symbol.""" if text is not None and not isinstance(text, six.text_type): raise TypeError('Local symbol definition must be a Unicode sequence or None: %r' % text) sid = self.__new_sid() location = None if self.table_type.is_shared: location = self.__import_location(sid) token = SymbolToken(text, sid, location) self.__add(token) return token
Adds the given Unicode text as a locally defined symbol.
def Dadgostar_Shaw_integral_over_T(T, similarity_variable): r'''Calculate the integral of liquid constant-pressure heat capacitiy with the similarity variable concept and method as shown in [1]_. Parameters ---------- T : float Temperature of gas [K] similarity_variable : float similarity variable as defined in [1]_, [mol/g] Returns ------- S : float Difference in entropy from 0 K, [J/kg/K] Notes ----- Original model is in terms of J/g/K. Note that the model is for predicting mass heat capacity, not molar heat capacity like most other methods! Integral was computed with SymPy. See Also -------- Dadgostar_Shaw Dadgostar_Shaw_integral Examples -------- >>> Dadgostar_Shaw_integral_over_T(300.0, 0.1333) 1201.1409113147927 References ---------- .. [1] Dadgostar, Nafiseh, and John M. Shaw. "A Predictive Correlation for the Constant-Pressure Specific Heat Capacity of Pure and Ill-Defined Liquid Hydrocarbons." Fluid Phase Equilibria 313 (January 15, 2012): 211-226. doi:10.1016/j.fluid.2011.09.015. ''' a = similarity_variable a2 = a*a a11 = -0.3416 a12 = 2.2671 a21 = 0.1064 a22 = -0.3874 a31 = -9.8231E-05 a32 = 4.182E-04 constant = 24.5 S = T*T*0.5*(a2*a32 + a*a31) + T*(a2*a22 + a*a21) + a*constant*(a*a12 + a11)*log(T) return S*1000.
r'''Calculate the integral of liquid constant-pressure heat capacitiy with the similarity variable concept and method as shown in [1]_. Parameters ---------- T : float Temperature of gas [K] similarity_variable : float similarity variable as defined in [1]_, [mol/g] Returns ------- S : float Difference in entropy from 0 K, [J/kg/K] Notes ----- Original model is in terms of J/g/K. Note that the model is for predicting mass heat capacity, not molar heat capacity like most other methods! Integral was computed with SymPy. See Also -------- Dadgostar_Shaw Dadgostar_Shaw_integral Examples -------- >>> Dadgostar_Shaw_integral_over_T(300.0, 0.1333) 1201.1409113147927 References ---------- .. [1] Dadgostar, Nafiseh, and John M. Shaw. "A Predictive Correlation for the Constant-Pressure Specific Heat Capacity of Pure and Ill-Defined Liquid Hydrocarbons." Fluid Phase Equilibria 313 (January 15, 2012): 211-226. doi:10.1016/j.fluid.2011.09.015.
def find_files(path, patterns): """ Returns all files from a given path that matches the pattern or list of patterns @type path: str @param path: A path to traverse @typ patterns: str|list @param patterns: A pattern or a list of patterns to match @rtype: list[str]: @return: A list of matched files """ if not isinstance(patterns, (list, tuple)): patterns = [patterns] matches = [] for root, dirnames, filenames in os.walk(path): for pattern in patterns: for filename in fnmatch.filter(filenames, pattern): matches.append(os.path.join(root, filename)) return matches
Returns all files from a given path that matches the pattern or list of patterns @type path: str @param path: A path to traverse @typ patterns: str|list @param patterns: A pattern or a list of patterns to match @rtype: list[str]: @return: A list of matched files
def _execute_pillar(pillar_name, run_type): ''' Run one or more nagios plugins from pillar data and get the result of run_type The pillar have to be in this format: ------ webserver: Ping_google: - check_icmp: 8.8.8.8 - check_icmp: google.com Load: - check_load: -w 0.8 -c 1 APT: - check_apt ------- ''' groups = __salt__['pillar.get'](pillar_name) data = {} for group in groups: data[group] = {} commands = groups[group] for command in commands: # Check if is a dict to get the arguments # in command if not set the arguments to empty string if isinstance(command, dict): plugin = next(six.iterkeys(command)) args = command[plugin] else: plugin = command args = '' command_key = _format_dict_key(args, plugin) data[group][command_key] = run_type(plugin, args) return data
Run one or more nagios plugins from pillar data and get the result of run_type The pillar have to be in this format: ------ webserver: Ping_google: - check_icmp: 8.8.8.8 - check_icmp: google.com Load: - check_load: -w 0.8 -c 1 APT: - check_apt -------
def capability_info(self, name=None): """Return information about the requested capability from this list. Will return None if there is no information about the requested capability. """ for r in self.resources: if (r.capability == name): return(r) return(None)
Return information about the requested capability from this list. Will return None if there is no information about the requested capability.
def msg_name(code): """Convert integer message code into a string name.""" ids = {v: k for k, v in COMMANDS.items()} return ids[code]
Convert integer message code into a string name.
def normalize_parameters(params): """**Parameters Normalization** Per `section 3.4.1.3.2`_ of the spec. For example, the list of parameters from the previous section would be normalized as follows: Encoded:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | b5 | %3D%253D | | a3 | a | | c%40 | | | a2 | r%20b | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_token | kkk9d7dh3k39sjv7 | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_nonce | 7d8f3e4a | | c2 | | | a3 | 2%20q | +------------------------+------------------+ Sorted:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | a2 | r%20b | | a3 | 2%20q | | a3 | a | | b5 | %3D%253D | | c%40 | | | c2 | | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_nonce | 7d8f3e4a | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_token | kkk9d7dh3k39sjv7 | +------------------------+------------------+ Concatenated Pairs:: +-------------------------------------+ | Name=Value | +-------------------------------------+ | a2=r%20b | | a3=2%20q | | a3=a | | b5=%3D%253D | | c%40= | | c2= | | oauth_consumer_key=9djdj82h48djs9d2 | | oauth_nonce=7d8f3e4a | | oauth_signature_method=HMAC-SHA1 | | oauth_timestamp=137131201 | | oauth_token=kkk9d7dh3k39sjv7 | +-------------------------------------+ and concatenated together into a single string (line breaks are for display purposes only):: a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1 &oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7 .. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2 """ # The parameters collected in `Section 3.4.1.3`_ are normalized into a # single string as follows: # # .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3 # 1. First, the name and value of each parameter are encoded # (`Section 3.6`_). # # .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6 key_values = [(utils.escape(k), utils.escape(v)) for k, v in params] # 2. The parameters are sorted by name, using ascending byte value # ordering. If two or more parameters share the same name, they # are sorted by their value. key_values.sort() # 3. The name of each parameter is concatenated to its corresponding # value using an "=" character (ASCII code 61) as a separator, even # if the value is empty. parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values] # 4. The sorted name/value pairs are concatenated together into a # single string by using an "&" character (ASCII code 38) as # separator. return '&'.join(parameter_parts)
**Parameters Normalization** Per `section 3.4.1.3.2`_ of the spec. For example, the list of parameters from the previous section would be normalized as follows: Encoded:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | b5 | %3D%253D | | a3 | a | | c%40 | | | a2 | r%20b | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_token | kkk9d7dh3k39sjv7 | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_nonce | 7d8f3e4a | | c2 | | | a3 | 2%20q | +------------------------+------------------+ Sorted:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | a2 | r%20b | | a3 | 2%20q | | a3 | a | | b5 | %3D%253D | | c%40 | | | c2 | | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_nonce | 7d8f3e4a | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_token | kkk9d7dh3k39sjv7 | +------------------------+------------------+ Concatenated Pairs:: +-------------------------------------+ | Name=Value | +-------------------------------------+ | a2=r%20b | | a3=2%20q | | a3=a | | b5=%3D%253D | | c%40= | | c2= | | oauth_consumer_key=9djdj82h48djs9d2 | | oauth_nonce=7d8f3e4a | | oauth_signature_method=HMAC-SHA1 | | oauth_timestamp=137131201 | | oauth_token=kkk9d7dh3k39sjv7 | +-------------------------------------+ and concatenated together into a single string (line breaks are for display purposes only):: a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1 &oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7 .. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
def manifest_repr(self, p_num): """ Builds a manifest string representation of the parameters and returns it :param p_num: int :return: string """ # Build the parameter prefix prefix = "p" + str(p_num) + "_" # Generate the manifest string manifest = prefix + "MODE=" + ("IN" if self.type == Type.FILE else "") + "\n" manifest += prefix + "TYPE=" + str(self.type.value) + "\n" if self.type == Type.FILE and len(self.choices) > 0: manifest += prefix + "choices=" + self._choices() + "\n" manifest += prefix + "default_value=" + self.default_value + "\n" manifest += prefix + "description=" + GPTaskSpec.manifest_escape(self.description) + "\n" manifest += prefix + "fileFormat=" + ';'.join(self.file_format) + "\n" manifest += prefix + "flag=" + self.flag + "\n" manifest += prefix + "name=" + self.name + "\n" manifest += prefix + "numValues=" + self._num_values() + "\n" manifest += prefix + "optional=" + str(self.optional.value) + "\n" manifest += prefix + "prefix=" + (self.flag if self.prefix_when_specified else "") + "\n" manifest += prefix + "prefix_when_specified=" + (self.flag if self.prefix_when_specified else "") + "\n" manifest += prefix + "type=" + self._java_type() + "\n" manifest += prefix + "value=" + (self._choices() if self.type != Type.FILE and len(self.choices) > 0 else "") + "\n" # Return the manifest string return manifest
Builds a manifest string representation of the parameters and returns it :param p_num: int :return: string
def clear_dir(path): """Empty out the image directory.""" for f in os.listdir(path): f_path = os.path.join(path, f) if os.path.isfile(f_path) or os.path.islink(f_path): os.unlink(f_path)
Empty out the image directory.
def autogen_explicit_injectable_metaclass(classname, regen_command=None, conditional_imports=None): r""" Args: classname (?): Returns: ?: CommandLine: python -m utool.util_class --exec-autogen_explicit_injectable_metaclass Example: >>> # DISABLE_DOCTEST >>> from utool.util_class import * # NOQA >>> from utool.util_class import __CLASSTYPE_ATTRIBUTES__ # NOQA >>> import ibeis >>> import ibeis.control.IBEISControl >>> classname = ibeis.control.controller_inject.CONTROLLER_CLASSNAME >>> result = autogen_explicit_injectable_metaclass(classname) >>> print(result) """ import utool as ut vals_list = [] def make_redirect(func): # PRESERVES ALL SIGNATURES WITH EXECS src_fmt = r''' def {funcname}{defsig}: """ {orig_docstr}""" return {orig_funcname}{callsig} ''' from utool._internal import meta_util_six orig_docstr = meta_util_six.get_funcdoc(func) funcname = meta_util_six.get_funcname(func) orig_funcname = modname.split('.')[-1] + '.' + funcname orig_docstr = '' if orig_docstr is None else orig_docstr import textwrap # Put wrapped function into a scope import inspect argspec = inspect.getargspec(func) (args, varargs, varkw, defaults) = argspec defsig = inspect.formatargspec(*argspec) callsig = inspect.formatargspec(*argspec[0:3]) src_fmtdict = dict(funcname=funcname, orig_funcname=orig_funcname, defsig=defsig, callsig=callsig, orig_docstr=orig_docstr) src = textwrap.dedent(src_fmt).format(**src_fmtdict) return src src_list = [] for classkey, vals in __CLASSTYPE_ATTRIBUTES__.items(): modname = classkey[1] if classkey[0] == classname: vals_list.append(vals) for func in vals: src = make_redirect(func) src = ut.indent(src) src = '\n'.join([_.rstrip() for _ in src.split('\n')]) src_list.append(src) if regen_command is None: regen_command = 'FIXME None given' module_header = ut.codeblock( """ # -*- coding: utf-8 -*- """ + ut.TRIPLE_DOUBLE_QUOTE + """ Static file containing autogenerated functions for {classname} Autogenerated on {autogen_time} RegenCommand: {regen_command} """ + ut.TRIPLE_DOUBLE_QUOTE + """ from __future__ import absolute_import, division, print_function import utool as ut """).format( autogen_time=ut.get_timestamp(), regen_command=regen_command, classname=classname) depends_module_block = autogen_import_list(classname, conditional_imports) inject_statement_fmt = ("print, rrr, profile = " "ut.inject2(__name__, '[autogen_explicit_inject_{classname}]')") inject_statement = inject_statement_fmt.format(classname=classname) source_block_lines = [ module_header, depends_module_block, inject_statement, '\n', 'class ExplicitInject' + classname + '(object):', ] + src_list source_block = '\n'.join(source_block_lines) source_block = ut.autoformat_pep8(source_block, aggressive=2) return source_block
r""" Args: classname (?): Returns: ?: CommandLine: python -m utool.util_class --exec-autogen_explicit_injectable_metaclass Example: >>> # DISABLE_DOCTEST >>> from utool.util_class import * # NOQA >>> from utool.util_class import __CLASSTYPE_ATTRIBUTES__ # NOQA >>> import ibeis >>> import ibeis.control.IBEISControl >>> classname = ibeis.control.controller_inject.CONTROLLER_CLASSNAME >>> result = autogen_explicit_injectable_metaclass(classname) >>> print(result)
def _sanitize_inputs(self): """Sanitizes input fields and returns a map <GlobalStreamId -> Grouping>""" ret = {} if self.inputs is None: return if isinstance(self.inputs, dict): # inputs are dictionary, must be either <HeronComponentSpec -> Grouping> or # <GlobalStreamId -> Grouping> for key, grouping in self.inputs.items(): if not Grouping.is_grouping_sane(grouping): raise ValueError('A given grouping is not supported') if isinstance(key, HeronComponentSpec): # use default streamid if key.name is None: # should not happen as TopologyType metaclass sets name attribute # before calling this method raise RuntimeError("In _sanitize_inputs(): HeronComponentSpec doesn't have a name") global_streamid = GlobalStreamId(key.name, Stream.DEFAULT_STREAM_ID) ret[global_streamid] = grouping elif isinstance(key, GlobalStreamId): ret[key] = grouping else: raise ValueError("%s is not supported as a key to inputs" % str(key)) elif isinstance(self.inputs, (list, tuple)): # inputs are lists, must be either a list of HeronComponentSpec or GlobalStreamId # will use SHUFFLE grouping for input_obj in self.inputs: if isinstance(input_obj, HeronComponentSpec): if input_obj.name is None: # should not happen as TopologyType metaclass sets name attribute # before calling this method raise RuntimeError("In _sanitize_inputs(): HeronComponentSpec doesn't have a name") global_streamid = GlobalStreamId(input_obj.name, Stream.DEFAULT_STREAM_ID) ret[global_streamid] = Grouping.SHUFFLE elif isinstance(input_obj, GlobalStreamId): ret[input_obj] = Grouping.SHUFFLE else: raise ValueError("%s is not supported as an input" % str(input_obj)) else: raise TypeError("Inputs must be a list, dict, or None, given: %s" % str(self.inputs)) return ret
Sanitizes input fields and returns a map <GlobalStreamId -> Grouping>
def machine_to_machine(self): """ Access the machine_to_machine :returns: twilio.rest.api.v2010.account.available_phone_number.machine_to_machine.MachineToMachineList :rtype: twilio.rest.api.v2010.account.available_phone_number.machine_to_machine.MachineToMachineList """ if self._machine_to_machine is None: self._machine_to_machine = MachineToMachineList( self._version, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], ) return self._machine_to_machine
Access the machine_to_machine :returns: twilio.rest.api.v2010.account.available_phone_number.machine_to_machine.MachineToMachineList :rtype: twilio.rest.api.v2010.account.available_phone_number.machine_to_machine.MachineToMachineList
def relaxation_matvec(P, p0, obs, times=[1]): r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation Returns ------- res : ndarray Array of expectation value at given times """ times = np.asarray(times) """Sort in increasing order""" ind = np.argsort(times) times = times[ind] if times[0] < 0: raise ValueError("Times can not be negative") dt = times[1:] - times[0:-1] nt = len(times) relaxations = np.zeros(nt) """Propagate obs to initial time""" obs_t = 1.0 * obs obs_t = propagate(P, obs_t, times[0]) relaxations[0] = np.dot(p0, obs_t) for i in range(nt - 1): obs_t = propagate(P, obs_t, dt[i]) relaxations[i + 1] = np.dot(p0, obs_t) """Cast back to original order of time points""" relaxations = relaxations[ind] return relaxations
r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation Returns ------- res : ndarray Array of expectation value at given times
def expand(obj, relation, seen): """ Return the to_json or id of a sqlalchemy relationship. """ if hasattr(relation, 'all'): relation = relation.all() if hasattr(relation, '__iter__'): return [expand(obj, item, seen) for item in relation] if type(relation) not in seen: return to_json(relation, seen + [type(obj)]) else: return relation.id
Return the to_json or id of a sqlalchemy relationship.
def triggers_update_many(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/triggers#update-many-triggers" api_path = "/api/v2/triggers/update_many.json" return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/triggers#update-many-triggers
def dotter(self): """Prints formatted time to stdout at the start of a line, as well as a "." whenever the length of the line is equal or lesser than 80 "." long""" if self.globalcount <= 80: sys.stdout.write('.') self.globalcount += 1 else: sys.stdout.write('\n.') self.globalcount = 1
Prints formatted time to stdout at the start of a line, as well as a "." whenever the length of the line is equal or lesser than 80 "." long